]> projects.mako.cc - harrypotter-wikipedia-cdsw/commitdiff
update the harry potter for morning lecture
authorBenjamin Mako Hill <mako@atdot.cc>
Sat, 15 Feb 2020 16:55:55 +0000 (08:55 -0800)
committerBenjamin Mako Hill <mako@atdot.cc>
Sat, 15 Feb 2020 16:55:55 +0000 (08:55 -0800)
- gone through and removed .keys(), moved to print etc
- removed old python files
- added questions for the afternoon session in README.mdwn

README.mdwn [new file with mode: 0644]
build_harry_potter_dataset.ipynb
build_hpwp_dataset.py [deleted file]
harrypotter_edit_trend.ipynb
hpwp-minor.py [deleted file]
hpwp-trend.py [deleted file]

diff --git a/README.mdwn b/README.mdwn
new file mode 100644 (file)
index 0000000..01f5dc5
--- /dev/null
@@ -0,0 +1,9 @@
+Challenge questions:
+
+1. What are the most edited articles on Harry Potter on Wikipedia?
+2. Who are the 5 most active editors on articles in Harry Potter? How may edits have they made?
+3. Create graphs in a spreadsheet of the trend lines (i.e., edits per day over time) for the three most active editors?
+4. Create graphs in a spreadsheet of the trend lines (i.e., edits per day over time) for the three most popular articles?
+5. Instead of "binning" your dataset by day, change to bin it by month for each of the two previous questions.
+6. Pick a different topic in Wikipedia and download a new dataset. Answer the questions above for this other dataset.
+
index a9a62a562f25c837cbcb214ca77d10f83cdcf747..e075e9a8d4f640e8968a08fdd5a00b75f1a4520f 100644 (file)
@@ -21,7 +21,8 @@
     "    # create a base url for the api and then a normal url which is initially\n",
     "    # just a copy of it\n",
     "    # The following line is what the requests call is doing, basically.\n",
     "    # create a base url for the api and then a normal url which is initially\n",
     "    # just a copy of it\n",
     "    # The following line is what the requests call is doing, basically.\n",
-    "    # \"http://en.wikipedia.org/w/api.php/?action=query&titles={0}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\".format(title)\n",
+    "    # f\"http://en.wikipedia.org/w/api.php/?action=query&titles={title}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\"\n",
+    "    # e.g.: http://en.wikipedia.org/w/api.php/?action=query&titles=Harry_Potter&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=\n",
     "    wp_api_url = \"http://en.wikipedia.org/w/api.php/\"\n",
     "\n",
     "    parameters = {'action' : 'query',\n",
     "    wp_api_url = \"http://en.wikipedia.org/w/api.php/\"\n",
     "\n",
     "    parameters = {'action' : 'query',\n",
@@ -43,7 +44,7 @@
     "        pages = api_answer[\"query\"][\"pages\"]\n",
     "\n",
     "        # for every page, (there should always be only one) get its revisions:\n",
     "        pages = api_answer[\"query\"][\"pages\"]\n",
     "\n",
     "        # for every page, (there should always be only one) get its revisions:\n",
-    "        for page in pages.keys():\n",
+    "        for page in pages:\n",
     "            query_revisions = pages[page][\"revisions\"]\n",
     "\n",
     "            # for every revision, first we do some cleaning up\n",
     "            query_revisions = pages[page][\"revisions\"]\n",
     "\n",
     "            # for every revision, first we do some cleaning up\n",
    "source": [
     "category = \"Harry Potter\"\n",
     "\n",
    "source": [
     "category = \"Harry Potter\"\n",
     "\n",
-    "# we'll use another api called catscan2 to grab a list of pages in\n",
+    "# we'll use another api called petscan to grab a list of pages in\n",
     "# categories and subcategories. it works like all the other apis we've\n",
     "# studied!\n",
     "#\n",
     "# The following requests call basically does the same thing as this string:\n",
     "# categories and subcategories. it works like all the other apis we've\n",
     "# studied!\n",
     "#\n",
     "# The following requests call basically does the same thing as this string:\n",
-    "# \"http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories={0}&doit=1&format=json\".format(category)\n",
-    "url_catscan = \"https://petscan.wmflabs.org/\"\n",
+    "# f\"https://petscan.wmflabs.org/?depth=10&categories={category}&format=json&doit=1\"\n",
+    "url_petscan = \"https://petscan.wmflabs.org/\"\n",
     "\n",
     "parameters = {'depth' : 10,\n",
     "              'categories' : category,\n",
     "              'format' : 'json',\n",
     "              'doit' : 1}\n",
     "\n",
     "\n",
     "parameters = {'depth' : 10,\n",
     "              'categories' : category,\n",
     "              'format' : 'json',\n",
     "              'doit' : 1}\n",
     "\n",
-    "# r = requests.get(\"http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories=Harry Potter&doit=1&format=json\"\n"
+    "# r = requests.get(\"https://petscan.wmflabs.org/?depth=10&categories=Harry Potter&format=json&doit=1\")"
    ]
   },
   {
    ]
   },
   {
    "metadata": {},
    "outputs": [],
    "source": [
    "metadata": {},
    "outputs": [],
    "source": [
-    "r = requests.get(url_catscan, params=parameters)"
+    "r = requests.get(url_petscan, params=parameters)"
    ]
   },
   {
    ]
   },
   {
    "source": [
     "# open a file to print the header\n",
     "output_file = open(\"hp_wiki.tsv\", \"w\", encoding='utf-8')\n",
    "source": [
     "# open a file to print the header\n",
     "output_file = open(\"hp_wiki.tsv\", \"w\", encoding='utf-8')\n",
-    "print(\"\\t\".join([\"title\", \"user\", \"timestamp\", \"size\", \"anon\", \"minor\", \"revid\"]), file=output_file)\n"
+    "print(\"\\t\".join([\"title\", \"user\", \"timestamp\", \"size\", \"anon\", \"minor\", \"revid\"]), file=output_file)"
    ]
   },
   {
    ]
   },
   {
    "outputs": [],
    "source": [
     "# for every article\n",
    "outputs": [],
    "source": [
     "# for every article\n",
-    "for article in articles[0:10]:\n",
+    "for article in articles:\n",
     "    # skip this until it's an article\n",
     "    if article[\"namespace\"] != 0:\n",
     "        continue\n",
     "    # skip this until it's an article\n",
     "    if article[\"namespace\"] != 0:\n",
     "        continue\n",
diff --git a/build_hpwp_dataset.py b/build_hpwp_dataset.py
deleted file mode 100644 (file)
index b421072..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-
-import encoding_fix
-import requests
-
-# get_article_revisions is a function that takes an article title in
-# wikipedia and return a list of all the revisions and metadata for
-# that article
-def get_article_revisions(title):
-    revisions = []
-
-    # create a base url for the api and then a normal url which is initially
-    # just a copy of it
-    # The following line is what the requests call is doing, basically.
-    # "http://en.wikipedia.org/w/api.php/?action=query&titles={0}&prop=revisions&rvprop=flags|timestamp|user|size|ids&rvlimit=500&format=json&continue=".format(title)
-    wp_api_url = "http://en.wikipedia.org/w/api.php/"
-
-    parameters = {'action' : 'query',
-                  'titles' : title,
-                  'prop' : 'revisions',
-                  'rvprop' : 'flags|timestamp|user|size|ids',
-                  'rvlimit' : 500,
-                  'format' : 'json',
-                  'continue' : '' }
-
-    # we'll repeat this forever (i.e., we'll only stop when we find
-    # the "break" command)
-    while True:
-        # the first line open the urls but also handles unicode urls
-        call = requests.get(wp_api_url, params=parameters)
-        api_answer = call.json()
-
-        # get the list of pages from the json object
-        pages = api_answer["query"]["pages"]
-
-        # for every page, (there should always be only one) get its revisions:
-        for page in pages.keys():
-            query_revisions = pages[page]["revisions"]
-
-            # for every revision, first we do some cleaning up
-            for rev in query_revisions:
-                #print(rev)
-                # let's continue/skip this revision if the user is hidden
-                if "userhidden" in rev:
-                    continue
-                
-                # 1: add a title field for the article because we're going to mix them together
-                rev["title"] = title
-
-                # 2: let's "recode" anon so it's true or false instead of present/missing
-                if "anon" in rev:
-                    rev["anon"] = True
-                else:
-                    rev["anon"] = False
-
-                # 3: let's recode "minor" in the same way
-                if "minor" in rev:
-                    rev["minor"] = True
-                else:
-                    rev["minor"] = False
-
-                # we're going to change the timestamp to make it work a little better in excel/spreadsheets
-                rev["timestamp"] = rev["timestamp"].replace("T", " ")
-                rev["timestamp"] = rev["timestamp"].replace("Z", "")
-
-                # finally, save the revisions we've seen to a varaible
-                revisions.append(rev)
-
-        # 'continue' tells us there's more revisions to add
-        if 'continue' in api_answer:
-            # replace the 'continue' parameter with the contents of the
-            # api_answer dictionary.
-            parameters.update(api_answer['continue'])
-        else:
-            break
-
-    # return all the revisions for this page
-    return(revisions)
-
-category = "Harry Potter"
-
-# we'll use another api called catscan2 to grab a list of pages in
-# categories and subcategories. it works like all the other apis we've
-# studied!
-#
-# The following requests call basically does the same thing as this string:
-# "http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories={0}&doit=1&format=json".format(category)
-url_catscan = "https://petscan.wmflabs.org/"
-
-parameters = {'depth' : 10,
-              'categories' : category,
-              'format' : 'json',
-              'doit' : 1}
-
-# r = requests.get("http://tools.wmflabs.org/catscan2/catscan2.php?depth=10&categories=Harry Potter&doit=1&format=json"
-
-r = requests.get(url_catscan, params=parameters)
-articles_json = r.json()
-articles = articles_json["*"][0]["a"]["*"]
-
-# open a file to write all the output
-output = open("hp_wiki.tsv", "w", encoding="utf-8")
-output.write("\t".join(["title", "user", "timestamp", "size", "anon", "minor", "revid"]) + "\n")
-
-# for every article
-for article in articles:
-    # skip this until it's an article
-    if article["namespace"] != 0:
-        continue
-
-    # first grab the article's title
-    title = article["title"]
-    print(title)
-
-    # get the list of revisions from our function and then iterate through it,
-    # printing it to our output file
-    revisions = get_article_revisions(title)
-    for rev in revisions:
-        output.write("\t".join(['"' + rev["title"] + '"', '"' + rev["user"] + '"',
-                               rev["timestamp"], str(rev["size"]), str(rev["anon"]),
-                               str(rev["minor"]), str(rev["revid"])]) + "\n")
-
-# close the file, we're done here!
-output.close()
-
index 13d3d1118e0d9c023374550ae81599aa97defe72..028ff863ccf5d95915a49dcc807ce01a44318504 100644 (file)
@@ -64,7 +64,7 @@
    "outputs": [],
    "source": [
     "# iterate through every day and print out data into the file\n",
    "outputs": [],
    "source": [
     "# iterate through every day and print out data into the file\n",
-    "for day_string in edits_by_day.keys():\n",
+    "for day_string in edits_by_day:\n",
     "    print(\"\\t\".join([day_string, str(edits_by_day[day_string])]), file=output_file)\n",
     "\n",
     "output_file.close()"
     "    print(\"\\t\".join([day_string, str(edits_by_day[day_string])]), file=output_file)\n",
     "\n",
     "output_file.close()"
diff --git a/hpwp-minor.py b/hpwp-minor.py
deleted file mode 100644 (file)
index 79135a1..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-import encoding_fix
-
-from csv import DictReader
-
-input_file = open("hp_wiki.tsv", 'r', encoding="utf-8")
-
-num_edits = 0
-num_anon = 0
-for row in DictReader(input_file, delimiter="\t"):
-    num_edits = num_edits + 1
-    if row["anon"] == "True":
-        num_anon = num_anon + 1
-
-prop_anon = num_anon / num_edits
-
-print("total edits: {0}".format(num_edits))
-print("anon edits: {0}".format(num_anon))
-print("proportion anon: {0}".format(prop_anon))
diff --git a/hpwp-trend.py b/hpwp-trend.py
deleted file mode 100644 (file)
index b9d1c7b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-import encoding_fix
-
-from csv import DictReader
-
-# read in the input file and count by day
-input_file = open("hp_wiki.tsv", 'r', encoding="utf-8")
-
-edits_by_day = {}
-for row in DictReader(input_file, delimiter="\t"):
-    day_string = row['timestamp'][0:10]
-
-    if day_string in edits_by_day:
-        edits_by_day[day_string] = edits_by_day[day_string] + 1
-    else:
-        edits_by_day[day_string] = 1
-
-input_file.close()
-
-# output the counts by day
-output_file = open("hp_edits_by_day.tsv", "w", encoding='utf-8')
-
-# write a header
-output_file.write("date\tedits\n")
-
-# iterate through every day and print out data into the file
-for day_string in edits_by_day.keys():
-    output_file.write("\t".join([day_string, str(edits_by_day[day_string])]) + "\n")
-
-output_file.close()

Benjamin Mako Hill || Want to submit a patch?