From: Benjamin Mako Hill Date: Sat, 15 Feb 2020 16:55:55 +0000 (-0800) Subject: update the harry potter for morning lecture X-Git-Url: https://projects.mako.cc/source/harrypotter-wikipedia-cdsw/commitdiff_plain/c3aed8c1b52241fa1ab6355a60a8327686a7ecd3 update the harry potter for morning lecture - gone through and removed .keys(), moved to print etc - removed old python files - added questions for the afternoon session in README.mdwn --- diff --git a/README.mdwn b/README.mdwn new file mode 100644 index 0000000..01f5dc5 --- /dev/null +++ b/README.mdwn @@ -0,0 +1,9 @@ +Challenge questions: + +1. What are the most edited articles on Harry Potter on Wikipedia? +2. Who are the 5 most active editors on articles in Harry Potter? How may edits have they made? +3. Create graphs in a spreadsheet of the trend lines (i.e., edits per day over time) for the three most active editors? +4. Create graphs in a spreadsheet of the trend lines (i.e., edits per day over time) for the three most popular articles? +5. Instead of "binning" your dataset by day, change to bin it by month for each of the two previous questions. +6. Pick a different topic in Wikipedia and download a new dataset. Answer the questions above for this other dataset. + diff --git a/build_harry_potter_dataset.ipynb b/build_harry_potter_dataset.ipynb index a9a62a5..e075e9a 100644 --- a/build_harry_potter_dataset.ipynb +++ b/build_harry_potter_dataset.ipynb @@ -21,7 +21,8 @@ " # create a base url for the api and then a normal url which is initially\n", " # just a copy of it\n", " # The following line is what the requests call is doing, basically.\n", - " # \"http://en.wikipedia.org/w/api.php/?action=query&titles={0}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\".format(title)\n", + " # f\"http://en.wikipedia.org/w/api.php/?action=query&titles={title}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\"\n", + " # e.g.: http://en.wikipedia.org/w/api.php/?action=query&titles=Harry_Potter&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\n", " wp_api_url = \"http://en.wikipedia.org/w/api.php/\"\n", "\n", " parameters = {'action' : 'query',\n", @@ -43,7 +44,7 @@ " pages = api_answer[\"query\"][\"pages\"]\n", "\n", " # for every page, (there should always be only one) get its revisions:\n", - " for page in pages.keys():\n", + " for page in pages:\n", " query_revisions = pages[page][\"revisions\"]\n", "\n", " # for every revision, first we do some cleaning up\n", @@ -95,20 +96,20 @@ "source": [ "category = \"Harry Potter\"\n", "\n", - "# we'll use another api called catscan2 to grab a list of pages in\n", + "# we'll use another api called petscan to grab a list of pages in\n", "# categories and subcategories. it works like all the other apis we've\n", "# studied!\n", "#\n", "# The following requests call basically does the same thing as this string:\n", - "# \"http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories={0}&doit=1&format=json\".format(category)\n", - "url_catscan = \"https://petscan.wmflabs.org/\"\n", + "# f\"https://petscan.wmflabs.org/?depth=10&categories={category}&format=json&doit=1\"\n", + "url_petscan = \"https://petscan.wmflabs.org/\"\n", "\n", "parameters = {'depth' : 10,\n", " 'categories' : category,\n", " 'format' : 'json',\n", " 'doit' : 1}\n", "\n", - "# r = requests.get(\"http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories=Harry Potter&doit=1&format=json\"\n" + "# r = requests.get(\"https://petscan.wmflabs.org/?depth=10&categories=Harry Potter&format=json&doit=1\")" ] }, { @@ -117,7 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "r = requests.get(url_catscan, params=parameters)" + "r = requests.get(url_petscan, params=parameters)" ] }, { @@ -138,7 +139,7 @@ "source": [ "# open a file to print the header\n", "output_file = open(\"hp_wiki.tsv\", \"w\", encoding='utf-8')\n", - "print(\"\\t\".join([\"title\", \"user\", \"timestamp\", \"size\", \"anon\", \"minor\", \"revid\"]), file=output_file)\n" + "print(\"\\t\".join([\"title\", \"user\", \"timestamp\", \"size\", \"anon\", \"minor\", \"revid\"]), file=output_file)" ] }, { @@ -148,7 +149,7 @@ "outputs": [], "source": [ "# for every article\n", - "for article in articles[0:10]:\n", + "for article in articles:\n", " # skip this until it's an article\n", " if article[\"namespace\"] != 0:\n", " continue\n", diff --git a/build_hpwp_dataset.py b/build_hpwp_dataset.py deleted file mode 100644 index b421072..0000000 --- a/build_hpwp_dataset.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 - -import encoding_fix -import requests - -# get_article_revisions is a function that takes an article title in -# wikipedia and return a list of all the revisions and metadata for -# that article -def get_article_revisions(title): - revisions = [] - - # create a base url for the api and then a normal url which is initially - # just a copy of it - # The following line is what the requests call is doing, basically. - # "http://en.wikipedia.org/w/api.php/?action=query&titles={0}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=".format(title) - wp_api_url = "http://en.wikipedia.org/w/api.php/" - - parameters = {'action' : 'query', - 'titles' : title, - 'prop' : 'revisions', - 'rvprop' : 'flags|timestamp|user|size|ids', - 'rvlimit' : 500, - 'format' : 'json', - 'continue' : '' } - - # we'll repeat this forever (i.e., we'll only stop when we find - # the "break" command) - while True: - # the first line open the urls but also handles unicode urls - call = requests.get(wp_api_url, params=parameters) - api_answer = call.json() - - # get the list of pages from the json object - pages = api_answer["query"]["pages"] - - # for every page, (there should always be only one) get its revisions: - for page in pages.keys(): - query_revisions = pages[page]["revisions"] - - # for every revision, first we do some cleaning up - for rev in query_revisions: - #print(rev) - # let's continue/skip this revision if the user is hidden - if "userhidden" in rev: - continue - - # 1: add a title field for the article because we're going to mix them together - rev["title"] = title - - # 2: let's "recode" anon so it's true or false instead of present/missing - if "anon" in rev: - rev["anon"] = True - else: - rev["anon"] = False - - # 3: let's recode "minor" in the same way - if "minor" in rev: - rev["minor"] = True - else: - rev["minor"] = False - - # we're going to change the timestamp to make it work a little better in excel/spreadsheets - rev["timestamp"] = rev["timestamp"].replace("T", " ") - rev["timestamp"] = rev["timestamp"].replace("Z", "") - - # finally, save the revisions we've seen to a varaible - revisions.append(rev) - - # 'continue' tells us there's more revisions to add - if 'continue' in api_answer: - # replace the 'continue' parameter with the contents of the - # api_answer dictionary. - parameters.update(api_answer['continue']) - else: - break - - # return all the revisions for this page - return(revisions) - -category = "Harry Potter" - -# we'll use another api called catscan2 to grab a list of pages in -# categories and subcategories. it works like all the other apis we've -# studied! -# -# The following requests call basically does the same thing as this string: -# "http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories={0}&doit=1&format=json".format(category) -url_catscan = "https://petscan.wmflabs.org/" - -parameters = {'depth' : 10, - 'categories' : category, - 'format' : 'json', - 'doit' : 1} - -# r = requests.get("http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories=Harry Potter&doit=1&format=json" - -r = requests.get(url_catscan, params=parameters) -articles_json = r.json() -articles = articles_json["*"][0]["a"]["*"] - -# open a file to write all the output -output = open("hp_wiki.tsv", "w", encoding="utf-8") -output.write("\t".join(["title", "user", "timestamp", "size", "anon", "minor", "revid"]) + "\n") - -# for every article -for article in articles: - # skip this until it's an article - if article["namespace"] != 0: - continue - - # first grab the article's title - title = article["title"] - print(title) - - # get the list of revisions from our function and then iterate through it, - # printing it to our output file - revisions = get_article_revisions(title) - for rev in revisions: - output.write("\t".join(['"' + rev["title"] + '"', '"' + rev["user"] + '"', - rev["timestamp"], str(rev["size"]), str(rev["anon"]), - str(rev["minor"]), str(rev["revid"])]) + "\n") - -# close the file, we're done here! -output.close() - diff --git a/harrypotter_edit_trend.ipynb b/harrypotter_edit_trend.ipynb index 13d3d11..028ff86 100644 --- a/harrypotter_edit_trend.ipynb +++ b/harrypotter_edit_trend.ipynb @@ -64,7 +64,7 @@ "outputs": [], "source": [ "# iterate through every day and print out data into the file\n", - "for day_string in edits_by_day.keys():\n", + "for day_string in edits_by_day:\n", " print(\"\\t\".join([day_string, str(edits_by_day[day_string])]), file=output_file)\n", "\n", "output_file.close()" diff --git a/hpwp-minor.py b/hpwp-minor.py deleted file mode 100644 index 79135a1..0000000 --- a/hpwp-minor.py +++ /dev/null @@ -1,18 +0,0 @@ -import encoding_fix - -from csv import DictReader - -input_file = open("hp_wiki.tsv", 'r', encoding="utf-8") - -num_edits = 0 -num_anon = 0 -for row in DictReader(input_file, delimiter="\t"): - num_edits = num_edits + 1 - if row["anon"] == "True": - num_anon = num_anon + 1 - -prop_anon = num_anon / num_edits - -print("total edits: {0}".format(num_edits)) -print("anon edits: {0}".format(num_anon)) -print("proportion anon: {0}".format(prop_anon)) diff --git a/hpwp-trend.py b/hpwp-trend.py deleted file mode 100644 index b9d1c7b..0000000 --- a/hpwp-trend.py +++ /dev/null @@ -1,29 +0,0 @@ -import encoding_fix - -from csv import DictReader - -# read in the input file and count by day -input_file = open("hp_wiki.tsv", 'r', encoding="utf-8") - -edits_by_day = {} -for row in DictReader(input_file, delimiter="\t"): - day_string = row['timestamp'][0:10] - - if day_string in edits_by_day: - edits_by_day[day_string] = edits_by_day[day_string] + 1 - else: - edits_by_day[day_string] = 1 - -input_file.close() - -# output the counts by day -output_file = open("hp_edits_by_day.tsv", "w", encoding='utf-8') - -# write a header -output_file.write("date\tedits\n") - -# iterate through every day and print out data into the file -for day_string in edits_by_day.keys(): - output_file.write("\t".join([day_string, str(edits_by_day[day_string])]) + "\n") - -output_file.close()