1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
#!/usr/bin/env python
# −*− coding: UTF−8 −*−
import downloader
import histogram
import json
# Constants
RES_DIR = "../res/"
# Gather data
api = downloader.WikimediaAPI()
down = downloader.Downloader()
endpoint, geosearch_params = api.get_pages_around()
geosearch_results = down.download(endpoint, "geosearch",
geosearch_params, limit=50)
pages_title = []
for result_fragment in geosearch_results:
for page_properties in result_fragment["query"]["geosearch"]:
pages_title.append(page_properties["title"])
with open(RES_DIR + "Pages", "w") as f:
f.write('\n'.join(pages_title))
data_count = len(pages_title)
data = {}
i = 1
for page in pages_title:
print("\rGathering data, please wait: " + str(100*i/data_count) + "%", end="")
endpoint, watchers_params = api.get_watchers(page)
endpoint, revisions_params = api.get_revisions(page)
watchers_results = down.download(endpoint, "info",
watchers_params, limit=500)
revisions_results = down.download(endpoint, "revisions",
revisions_params, limit=100000)
for page_id, page in watchers_results[0]["query"]["pages"].items():
page_title = page["title"]
if "watchers" in page.keys():
page_watchers = page["watchers"]
else:
page_watchers = 0
page_revisions = 0
for revisions_result in revisions_results:
for page_id, page in revisions_result["query"]["pages"].items():
page_revisions += len(page["revisions"])
data[page_title] = {
"watchers": page_watchers,
"revisions": page_revisions
}
i += 1
with open(RES_DIR + "data-out", "w") as f:
f.write(json.dumps(data, indent=4, ensure_ascii=False))
# Make histograms
watcher_revisions = histogram.Histogram("../res/warev.svg",
title="Nombre de watchers par rapport \
au nombre de révisions")
watcher_contributers = histogram.Histogram("../res/waco.svg",
title="Nombre de watchers par \
rapport au nombre de \
contributeurs")
|