--- /dev/null
+disorder.o
+md5.o
+tmp.foo
+wikiq
+wikiq.o
+
CXXFLAGS = -O3
CFLAGS = $(CXXFLAGS)
-OBJECTS = wikiq.o md5.o disorder.o
+OBJECTS = wikiq.o md5.o
all: wikiq
wikiq: $(OBJECTS)
$(CXX) $(CXXFLAGS) $(OBJECTS) -lpcrecpp -lpcre -lexpat -o wikiq
-disorder.o: disorder.h
md5.o: md5.h
clean:
-wikiq: a WikiMedia XML data dump to .tsv parser
-
-author: Erik Garrison <erik@hypervolu.me>
+wikiq: a simple and fast stream-based MediaWiki XML dump parser
+authors: Erik Garrison <erik@hypervolu.me>
+ Benjamin Mako Hill <mako@atdot.cc>
overview:
-wikiq is written in C using expat. It is designed to enable researchers to
-rapidly extract revision histories (minus text and comments) from impossibly
-large XML datasets.
-
+wikiq is written in C++ using expat. It is designed to enable
+researchers to rapidly extract revision histories (minus text and
+comments) from large XML datasets.
use:
features:
-In addition to parsing WikiMedia XML data dumps into a tab-separated tabular
-format, wikiq extracts article diffs and can execute arbitrary Perl-compatible
-regular expressions against the additions and deletions which differentiate any
-revision from the previous. Any number of regular expressions may be supplied
-on the command line, and may be tagged using the '-n' option.
-
-MD5 checksums are used at runtime for precise detection of reversions.
+In addition to parsing WikiMedia XML data dumps into a tab-separated
+tabular format, wikiq can match Perl-compatible regular expressions
+against revision content, can extract article diffs, and can match
+regexes against the additions and deletions between revisions. Any
+number of regular expressions may be supplied on the command line, and
+may be tagged using the '-n' and -N options.
+MD5 checksums of revisions are used at runtime.
output:
wikiq generates these fields for each revision:
title, articleid, revid, timestamp, anon, editor, editorid, minor,
-text_length, text_entropy, text_md5, reversion, additions_size, deletions_size
-.... and additional fields for each regex executed against add/delete diffs
+text_length, text_md5, reversion, additions_size, deletions_size
+.... and additional fields for each regex executed against content or
+added/deleted diffs
Boolean fields are TRUE/FALSE except in the case of reversion, which is blank
unless the article is a revert to a previous revision, in which case, it
contains the revision ID of the revision which was reverted to.
-
-author: Erik Garrison <erik@hypervolu.me>
+++ /dev/null
-/***************************************************************************
- * libdisorder: A Library for Measuring Byte Stream Entropy
- * Copyright (C) 2010 Michael E. Locasto
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.
- * 59 Temple Place, Suite 330
- * Boston, MA 02111-1307 USA
- *
- * $Id$
- **************************************************************************/
-
-#include <math.h> //for log2()
-#include <stdio.h> //for NULL
-#include "disorder.h"
-
-#if defined(__FreeBSD__)
-#define log2(x) (log((x)) * (1./M_LN2))
-#endif
-
-/** Frequecies for each byte */
-static int m_token_freqs[LIBDO_MAX_BYTES]; //frequency of each token in sample
-static float m_token_probs[LIBDO_MAX_BYTES]; //P(each token appearing)
-static int m_num_tokens = 0; //actual number of `seen' tokens, max 256
-static float m_maxent = 0.0;
-static float m_ratio = 0.0;
-static int LIBDISORDER_INITIALIZED = 0;
-
-static void
-initialize_lib()
-{
- int i = 0;
- if(1==LIBDISORDER_INITIALIZED)
- return;
-
- m_num_tokens = 0;
-
- for(i=0;i<LIBDO_MAX_BYTES;i++)
- {
- m_token_freqs[i]=0;
- m_token_probs[i]=0.0;
- }
-
- LIBDISORDER_INITIALIZED = 1;
-}
-
-/**
- * Set m_num_tokens by iterating over m_token_freq[] and maintaining
- * a counter of each position that does not hold the value of zero.
- */
-static void
-count_num_tokens()
-{
- int i = 0;
- int counter = 0;
- for(i=0;i<LIBDO_MAX_BYTES;i++)
- {
- if(0!=m_token_freqs[i])
- {
- counter++;
- }
- }
- m_num_tokens = counter;
- return;
-}
-
-/**
- * Sum frequencies for each token (i.e., byte values 0 through 255)
- * We assume the `length' parameter is correct.
- *
- * This function is available only to functions in this file.
- */
-static void
-get_token_frequencies(char* buf,
- long long length)
-{
- int i=0;
- char* itr=NULL;
- unsigned char c=0;
-
- itr = buf;
-
- //reset number of tokens
- m_num_tokens = 0;
-
- //make sure freqency and probability arrays are cleared
- for(i=0;i<LIBDO_MAX_BYTES;i++)
- {
- m_token_freqs[i] = 0;
- m_token_probs[i] = 0.0;
- }
-
- for(i=0;i<length;i++)
- {
- c = (unsigned char)*itr;
- //assert(0<=c<LIBDO_MAX_BYTES);
- m_token_freqs[c]++;
- itr++;
- }
-}
-
-/**
- * Return entropy (in bits) of this buffer of bytes. We assume that the
- * `length' parameter is correct. This implementation is a translation
- * of the PHP code found here:
- *
- * http://onlamp.com/pub/a/php/2005/01/06/entropy.html
- *
- * with a helpful hint on the `foreach' statement from here:
- *
- * http://php.net/manual/en/control-structures.foreach.php
- */
-float
-shannon_H(char* buf,
- long long length)
-{
- int i = 0;
- float bits = 0.0;
- char* itr=NULL; //values of itr should be zero to 255
- unsigned char token;
- int num_events = 0; //`length' parameter
- float freq = 0.0; //loop variable for holding freq from m_token_freq[]
- float entropy = 0.0; //running entropy sum
-
- if(NULL==buf || 0==length)
- return 0.0;
-
- if(0==LIBDISORDER_INITIALIZED)
- initialize_lib();
-
- itr = buf;
- m_maxent = 0.0;
- m_ratio = 0.0;
- num_events = length;
- get_token_frequencies(itr, num_events); //modifies m_token_freqs[]
- //set m_num_tokens by counting unique m_token_freqs entries
- count_num_tokens();
-
- if(m_num_tokens>LIBDO_MAX_BYTES)
- {
- //report error somehow?
- return 0.0;
- }
-
- //iterate through whole m_token_freq array, but only count
- //spots that have a registered token (i.e., freq>0)
- for(i=0;i<LIBDO_MAX_BYTES;i++)
- {
- if(0!=m_token_freqs[i])
- {
- token = i;
- freq = ((float)m_token_freqs[token]);
- m_token_probs[token] = (freq / ((float)num_events));
- entropy += m_token_probs[token] * log2(m_token_probs[token]);
- }
- }
-
- bits = -1.0 * entropy;
- m_maxent = log2(m_num_tokens);
- m_ratio = bits / m_maxent;
-
- return bits;
-}
-
-int
-get_num_tokens()
-{
- return m_num_tokens;
-}
-
-float
-get_max_entropy()
-{
- return m_maxent;
-}
-
-float
-get_entropy_ratio()
-{
- return m_ratio;
-}
+++ /dev/null
-/***************************************************************************
- * libdisorder: A Library for Measuring Byte Stream Entropy
- * Copyright (C) 2010 Michael E. Locasto
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the:
- * Free Software Foundation, Inc.
- * 59 Temple Place, Suite 330
- * Boston, MA 02111-1307 USA
- *
- * $Id$
- **************************************************************************/
-
-#ifndef __DISORDER_H_
-#define __DISORDER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** Max number of bytes (i.e., tokens) */
-#define LIBDO_MAX_BYTES 256
-
-/** A convienance value for clients of this library. Feel free to change
- * if you plan to use a larger buffer. You can also safely ignore it, as
- * libdisorder does not use this value internally; it relies on the
- * client-supplied `length' parameter.
- *
- * NB: Might become deprecated because it is potentially misleading and
- * has zero relationship to any library internal state.
- */
-#define LIBDO_BUFFER_LEN 16384
-
-/**
- * Given a pointer to an array of bytes, return a float indicating the
- * level of entropy in bits (a number between zero and eight),
- * assuming a space of 256 possible byte values. The second argument
- * indicates the number of bytes in the sequence. If this sequence
- * runs into unallocated memory, this function should fail with a
- * SIGSEGV.
- */
-float shannon_H(char*, long long);
-
-/** Report the number of (unique) tokens seen. This is _not_ the
- number of individual events seen. For example, if the library sees
- the string `aaab', the number of events is 4 and the number of
- tokens is 2. */
-int get_num_tokens(void);
-
-/** Returns maximum entropy for byte distributions log2(256)=8 bits*/
-float get_max_entropy(void);
-
-/** Returns the ratio of entropy to maxentropy */
-float get_entropy_ratio(void);
-
-#ifdef __cplusplus
-};
-#endif
-
-#endif
#include <stdlib.h>
#include "expat.h"
#include <getopt.h>
-#include "disorder.h"
#include "md5.h"
#include "dtl/dtl.hpp"
#include <vector>
char *comment;
char *text;
vector<string> last_text_tokens;
- vector<pcrecpp::RE> regexes;
- vector<pcrecpp::RE> wp_namespace_res;
- vector<string> regex_names;
+
+ // title regexes
+ vector<pcrecpp::RE> title_regexes;
+
+ // regexes for checking with revisions
+ vector<string> content_regex_names;
+ vector<pcrecpp::RE> content_regexes;
+
+ // regexes for looking within diffs
+ vector<string> diff_regex_names;
+ vector<pcrecpp::RE> diff_regexes;
+
map<string, string> revision_md5; // used for detecting reversions
// track string size of the elements, to prevent O(N^2) processing in charhndl
++pos;
}
- // skip this if the wp_namespace is not in the proscribed list of
- // namespaces
- bool wp_namespace_found = false;
- if (!data->wp_namespace_res.empty()) {
- for (vector<pcrecpp::RE>::iterator r = data->wp_namespace_res.begin(); r != data->wp_namespace_res.end(); ++r) {
- pcrecpp::RE& wp_namespace_re = *r;
- if (wp_namespace_re.PartialMatch(data->title)) {
- wp_namespace_found = true;
+ // look to see if (a) we've passed in a list of /any/ title_regexes
+ // and (b) if all of the title_regex_matches match
+ // if (a) is true and (b) is not, we return
+ bool any_title_regex_match = false;
+ if (!data->title_regexes.empty()) {
+ for (vector<pcrecpp::RE>::iterator r = data->title_regexes.begin(); r != data->title_regexes.end(); ++r) {
+ pcrecpp::RE& title_regex = *r;
+ if (title_regex.PartialMatch(data->title)) {
+ any_title_regex_match = true;
break;
}
}
- if (!wp_namespace_found) {
+ if (!any_title_regex_match) {
return;
}
}
+ // search the content of the revision for a any of the regexes
+ vector<bool> content_regex_matches;
+ if (!data->content_regexes.empty()) {
+ for (vector<pcrecpp::RE>::iterator r = data->content_regexes.begin(); r != data->content_regexes.end(); ++r) {
+ pcrecpp::RE& content_regex = *r;
+ content_regex_matches.push_back(content_regex.PartialMatch(data->text));
+ }
+ }
+
//vector<string> additions;
//vector<string> deletions;
string additions;
string deletions;
- vector<bool> regex_matches_adds;
- vector<bool> regex_matches_dels;
+ vector<bool> diff_regex_matches_adds;
+ vector<bool> diff_regex_matches_dels;
if (data->last_text_tokens.empty()) {
additions = data->text;
if (!additions.empty()) {
//cout << "ADD: " << additions << endl;
- for (vector<pcrecpp::RE>::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) {
- pcrecpp::RE& regex = *r;
- regex_matches_adds.push_back(regex.PartialMatch(additions));
+ for (vector<pcrecpp::RE>::iterator r = data->diff_regexes.begin(); r != data->diff_regexes.end(); ++r) {
+ pcrecpp::RE& diff_regex = *r;
+ diff_regex_matches_adds.push_back(diff_regex.PartialMatch(additions));
}
}
if (!deletions.empty()) {
//cout << "DEL: " << deletions << endl;
- for (vector<pcrecpp::RE>::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) {
- pcrecpp::RE& regex = *r;
- regex_matches_dels.push_back(regex.PartialMatch(deletions));
+ for (vector<pcrecpp::RE>::iterator r = data->diff_regexes.begin(); r != data->diff_regexes.end(); ++r) {
+ pcrecpp::RE& diff_regex = *r;
+ diff_regex_matches_dels.push_back(diff_regex.PartialMatch(deletions));
}
}
<< data->editorid << "\t"
<< ((data->minor) ? "TRUE" : "FALSE") << "\t"
<< (unsigned int) data->text_size << "\t"
- << shannon_H(data->text, data->text_size) << "\t"
<< md5_hex_output << "\t"
<< reverted_to << "\t"
<< (int) additions.size() << "\t"
<< (int) deletions.size();
- for (int n = 0; n < data->regex_names.size(); ++n) {
- cout << "\t" << ((!regex_matches_adds.empty() && regex_matches_adds.at(n)) ? "TRUE" : "FALSE")
- << "\t" << ((!regex_matches_dels.empty() && regex_matches_dels.at(n)) ? "TRUE" : "FALSE");
+ for (int n = 0; n < data->content_regex_names.size(); ++n) {
+ cout << "\t" << ((!content_regex_matches.empty()
+ && content_regex_matches.at(n)) ? "TRUE" : "FALSE");
+ }
+
+ for (int n = 0; n < data->diff_regex_names.size(); ++n) {
+ cout << "\t" << ((!diff_regex_matches_adds.empty() && diff_regex_matches_adds.at(n)) ? "TRUE" : "FALSE")
+ << "\t" << ((!diff_regex_matches_dels.empty() && diff_regex_matches_dels.at(n)) ? "TRUE" : "FALSE");
}
cout << endl;
<< endl
<< "options:" << endl
<< " -v verbose mode prints text and comments after each line of tab separated data" << endl
- << " -n name of the following regex (e.g. -n name -r \"...\")" << endl
- << " -r regex to check against additions and deletions" << endl
- << " -t regex(es) to check title against as a way of limiting output to specific namespaces" << endl
+ << " -n name of the following regex for contet (e.g. -n name -r \"...\")" << endl
+ << " -r regex to check against content of the revision" << endl
+ << " -N name of the following regex for diffs (e.g. -N name -R \"...\")" << endl
+ << " -R regex to check against diffs (i.e., additions and deletions)" << endl
+ << " -t parse revisions only from pages whose titles match regex(es)" << endl
<< endl
<< "Takes a wikimedia data dump XML stream on standard in, and produces" << endl
<< "a tab-separated stream of revisions on standard out:" << endl
<< endl
<< "title, articleid, revid, timestamp, anon, editor, editorid, minor," << endl
- << "text_length, text_entropy, text_md5, reversion, additions_size, deletions_size" << endl
+ << "text_length, text_md5, reversion, additions_size, deletions_size" << endl
<< ".... and additional fields for each regex executed against add/delete diffs" << endl
<< endl
<< "Boolean fields are TRUE/FALSE except in the case of reversion, which is blank" << endl
<< "unless the article is a revert to a previous revision, in which case, it" << endl
<< "contains the revision ID of the revision which was reverted to." << endl
<< endl
- << "author: Erik Garrison <erik@hypervolu.me>" << endl;
+ << "authors: Erik Garrison <erik@hypervolu.me>" << endl;
+ << " Benjamin Mako Hill <mako@atdot.cc>" << endl;
}
// in "simple" output, we don't print text and comments
output_type = SIMPLE;
char c;
- string regex_name;
+ string diff_regex_name;
+ string content_regex_name;
// the user data struct which is passed to callback functions
revisionData data;
output_type = FULL;
break;
case 'n':
- regex_name = optarg;
+ content_regex_name = optarg;
break;
case 'r':
- data.regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8()));
- data.regex_names.push_back(regex_name);
- if (!regex_name.empty()) {
- regex_name.clear();
+ data.content_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8()));
+ data.content_regex_names.push_back(content_regex_name);
+ if (!content_regex_name.empty()) {
+ content_regex_name.clear();
+ }
+ break;
+ case 'N':
+ diff_regex_name = optarg;
+ break;
+ case 'R':
+ data.diff_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8()));
+ data.diff_regex_names.push_back(diff_regex_name);
+ if (!diff_regex_name.empty()) {
+ diff_regex_name.clear();
}
break;
case 'h':
exit(0);
break;
case 't':
- data.wp_namespace_res.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8()));
+ data.title_regexes.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8()));
break;
}
cout << "title" << "\t"
<< "articleid" << "\t"
<< "revid" << "\t"
- << "date" << " "
+ << "date" << "_"
<< "time" << "\t"
<< "anon" << "\t"
<< "editor" << "\t"
<< "editor_id" << "\t"
<< "minor" << "\t"
<< "text_size" << "\t"
- << "text_entropy" << "\t"
<< "text_md5" << "\t"
<< "reversion" << "\t"
<< "additions_size" << "\t"
<< "deletions_size";
int n = 0;
- if (!data.regexes.empty()) {
- for (vector<pcrecpp::RE>::iterator r = data.regexes.begin(); r != data.regexes.end(); ++r, ++n) {
- if (data.regex_names.at(n).empty()) {
+ if (!data.content_regexes.empty()) {
+ for (vector<pcrecpp::RE>::iterator r = data.content_regexes.begin();
+ r != data.content_regexes.end(); ++r, ++n) {
+ if (data.content_regex_names.at(n).empty()) {
+ cout << "\t" << "regex" << n;
+ } else {
+ cout << "\t" << data.content_regex_names.at(n);
+ }
+ }
+ }
+
+ if (!data.diff_regexes.empty()) {
+ for (vector<pcrecpp::RE>::iterator r = data.diff_regexes.begin(); r != data.diff_regexes.end(); ++r, ++n) {
+ if (data.diff_regex_names.at(n).empty()) {
cout << "\t" << "regex_" << n << "_add"
<< "\t" << "regex_" << n << "_del";
} else {
- cout << "\t" << data.regex_names.at(n) << "_add"
- << "\t" << data.regex_names.at(n) << "_del";
+ cout << "\t" << data.diff_regex_names.at(n) << "_add"
+ << "\t" << data.diff_regex_names.at(n) << "_del";
}
}
}
+
cout << endl;
// shovel data into the parser