6 # get_article_revisions is a function that takes an article title in
7 # wikipedia and return a list of all the revisions and metadata for
9 def get_article_revisions(title):
12 # create a base url for the api and then a normal url which is initially
14 # The following line is what the requests call is doing, basically.
15 # "http://en.wikipedia.org/w/api.php/?action=query&titles={0}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json".format(title)
16 wp_api_url = "http://en.wikipedia.org/w/api.php/"
18 parameters = {'action' : 'query',
21 'rvprop' : 'flags|timestamp|user|size|ids',
26 # we'll repeat this forever (i.e., we'll only stop when we find
27 # the "break" command)
29 # the first line open the urls but also handles unicode urls
30 call = requests.get(wp_api_url, params=parameters)
31 api_answer = call.json()
33 # get the list of pages from the json object
34 pages = api_answer["query"]["pages"]
36 # for every page, (there should always be only one) get its revisions:
37 for page in pages.keys():
38 query_revisions = pages[page]["revisions"]
40 # for every revision, first we do some cleaning up
41 for rev in query_revisions:
42 # let's continue/skip this revision if the user is hidden
43 if "userhidden" in rev:
46 # 1: add a title field for the article because we're going to mix them together
49 # 2: let's "recode" anon so it's true or false instead of present/missing
55 # 3: let's recode "minor" in the same way
61 # we're going to change the timestamp to make it work a little better in excel/spreadsheets
62 rev["timestamp"] = rev["timestamp"].replace("T", " ")
63 rev["timestamp"] = rev["timestamp"].replace("Z", "")
65 # finally, save the revisions we've seen to a varaible
68 # 'continue' tells us there's more revisions to add
69 if 'continue' in api_answer:
70 # replace the 'continue' parameter with the contents of the
71 # api_answer dictionary.
72 parameters.update(api_answer['continue'])
76 # return all the revisions for this page
79 category = "Harry Potter"
81 # we'll use another api called catscan2 to grab a list of pages in
82 # categories and subcategories. it works like all the other apis we've
85 # The following requests call basically does the same thing as this string:
86 # "http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories={0}&doit=1&format=json".format(category)
87 url_catscan = "http://tools.wmflabs.org/catscan2/catscan2.php"
89 parameters = {'depth' : 10,
90 'categories' : category,
94 r = requests.get(url_catscan, params=parameters)
95 articles_json = r.json()
96 articles = articles_json["*"][0]["a"]["*"]
98 # open a file to write all the output
99 output = open("hp_wiki.tsv", "w", encoding="utf-8")
100 output.write("\t".join(["title", "user", "timestamp", "size", "anon", "minor", "revid"]) + "\n")
103 for article in articles:
105 # first grab the article's title
106 title = article["a"]["title"]
108 # get the list of revisions from our function and then iterate through it,
109 # printing it to our output file
110 revisions = get_article_revisions(title)
111 for rev in revisions:
112 output.write("\t".join(['"' + rev["title"] + '"', '"' + rev["user"] + '"',
113 rev["timestamp"], str(rev["size"]), str(rev["anon"]),
114 str(rev["minor"]), str(rev["revid"])]) + "\n")
116 # close the file, we're done here!