9 # get_article_revisions is a function that takes an article title in
10 # wikipedia and return a list of all the revisions and meatadata for
12 def get_article_revisions(title):
15 # create a base url for the api and then a normal url which is initially just a copy of it
16 wp_api_base = "http://en.wikipedia.org/w/api.php/?action=query&titles=%(article_title)s&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json"
17 wp_api_base = wp_api_base % {'article_title': title }
18 wp_api_url = wp_api_base
20 # we'll repeat this forever (i.e., we'll only stop when we find the "break" command)
22 # the first line open the urls but also handles unicode urls
23 call = requests.get(wp_api_url)
24 api_answer = json.loads(call.content)
26 # get the list of pages from the json object
27 pages = api_answer["query"]["pages"]
29 # for every pages (there should always be only one) get the revisions
30 for page in pages.keys():
31 query_revisions = pages[page]["revisions"]
33 # for every revision, we do first do cleaning up
34 for rev in query_revisions:
35 # lets continue/skip if the user is hidden
36 if rev.has_key("userhidden"):
39 # 1: add a title field for the article because we're going to mix them together
42 # 2: lets "recode" anon so it's true or false instead of present/missing
43 if rev.has_key("anon"):
48 # 3: letst recode "minor" in the same way
49 if rev.has_key("minor"):
54 # we're going to change the timestamp to make it work a little better in excel and similar
55 rev["timestamp"] = rev["timestamp"].replace("T", " ")
56 rev["timestamp"] = rev["timestamp"].replace("Z", "")
58 # finally save the revisions we've seen to a varaible
61 # if there is a query-continue, it means there are more
62 if 'query-continue' in api_answer.keys():
63 # we will grab the rvcontinue token, insert it, and head back to the start of the loop
64 rvcontinue = api_answer["query-continue"]["revisions"]["rvcontinue"]
65 wp_api_url = wp_api_base + "&rvcontinue=%(continue_from)s" % {'continue_from' : rvcontinue}
67 # no continue means we're done
70 # return all the revisions for this page
73 category = "Harry_Potter"
75 # we'll use another api called catscan2 to grab a list of pages in
76 # categories and subcategories. it works like all the other apis we've
78 url_catscan = 'http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories=%(category)s&doit=1&format=json'
79 url_catscan = url_catscan % {'category' : category}
80 call = requests.get(url_catscan)
81 articles = json.loads(call.content)
82 articles = articles["*"][0]["a"]["*"]
84 # open a filie to write all the output
85 output = codecs.open("hp_wiki.csv", "wb", "utf-8")
86 output.write(",".join(["title", "user", "timestamp", "size", "anon", "minor", "revid"]) + "\n")
89 for article in articles:
91 # first grab tht title
92 title = article["a"]["title"]
94 # get the list of revisions from our function and then interating through it printinig it out
95 revisions = get_article_revisions(title)
97 output.write(",".join(['"' + rev["title"] + '"', '"' + rev["user"] + '"',
98 rev["timestamp"], str(rev["size"]), str(rev["anon"]),
99 str(rev["minor"]), str(rev["revid"])]) + "\n")
101 # close the file, we're done here!