X-Git-Url: https://projects.mako.cc/source/wikiq/blobdiff_plain/e861323a25caf19a4dbda73b69f6400b5f183da3..8e1c1afcc765c0c7df502b5451e6035e7272c8c6:/wikiq.cpp diff --git a/wikiq.cpp b/wikiq.cpp index bd895d4..e8ca4d2 100644 --- a/wikiq.cpp +++ b/wikiq.cpp @@ -28,9 +28,9 @@ using namespace std; #define MEGABYTE 1048576 #define FIELD_BUFFER_SIZE 1024 -// 2048 KB in bytes + 1 -//#define TEXT_BUFFER_SIZE 2097153 -//#define TEXT_BUFFER_SIZE 10485760 + +// this can be changed at runtime if we encounter an article larger than 10mb +size_t text_buffer_size = 10 * MEGABYTE; enum elements { TITLE, ARTICLEID, REVISION, REVID, TIMESTAMP, CONTRIBUTOR, @@ -57,6 +57,7 @@ typedef struct { char *text; vector last_text_tokens; vector regexes; + vector wp_namespace_res; vector regex_names; map revision_md5; // used for detecting reversions @@ -162,7 +163,7 @@ void cleanup_article(revisionData *data) { static void init_data(revisionData *data, outtype output_type) { - data->text = (char*) malloc(4 * MEGABYTE); // 2MB is the article length limit, 4MB is 'safe'? + data->text = (char*) malloc(text_buffer_size); data->comment = (char*) malloc(FIELD_BUFFER_SIZE); data->title = (char*) malloc(FIELD_BUFFER_SIZE); data->articleid = (char*) malloc(FIELD_BUFFER_SIZE); @@ -246,6 +247,22 @@ write_row(revisionData *data) ++pos; } + // skip this if the wp_namespace is not in the proscribed list of + // namespaces + bool wp_namespace_found = false; + if (!data->wp_namespace_res.empty()) { + for (vector::iterator r = data->wp_namespace_res.begin(); r != data->wp_namespace_res.end(); ++r) { + pcrecpp::RE& wp_namespace_re = *r; + if (wp_namespace_re.PartialMatch(data->title)) { + wp_namespace_found = true; + break; + } + } + if (!wp_namespace_found) { + return; + } + } + //vector additions; //vector deletions; string additions; @@ -254,7 +271,9 @@ write_row(revisionData *data) vector regex_matches_adds; vector regex_matches_dels; - if (!data->last_text_tokens.empty()) { + if (data->last_text_tokens.empty()) { + additions = data->text; + } else { // do the diff dtl::Diff< string, vector > d(data->last_text_tokens, text_tokens); @@ -274,25 +293,22 @@ write_row(revisionData *data) break; } } - - if (!additions.empty()) { - //cout << "ADD: " << additions << endl; - for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { - pcrecpp::RE& regex = *r; - regex_matches_adds.push_back(regex.PartialMatch(additions)); - } + } + + if (!additions.empty()) { + //cout << "ADD: " << additions << endl; + for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { + pcrecpp::RE& regex = *r; + regex_matches_adds.push_back(regex.PartialMatch(additions)); } + } - if (!deletions.empty()) { - //cout << "DEL: " << deletions << endl; - for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { - pcrecpp::RE& regex = *r; - regex_matches_dels.push_back(regex.PartialMatch(deletions)); - } + if (!deletions.empty()) { + //cout << "DEL: " << deletions << endl; + for (vector::iterator r = data->regexes.begin(); r != data->regexes.end(); ++r) { + pcrecpp::RE& regex = *r; + regex_matches_dels.push_back(regex.PartialMatch(deletions)); } - - // apply regex to the diff - } data->last_text_tokens = text_tokens; @@ -343,7 +359,6 @@ split_timestamp(revisionData *data) char* strlcatn(char *dest, const char *src, size_t dest_len, size_t n) { - //size_t dest_len = strlen(dest); size_t i; for (i = 0 ; i < n && src[i] != '\0' ; i++) @@ -357,15 +372,18 @@ static void charhndl(void* vdata, const XML_Char* s, int len) { revisionData* data = (revisionData*) vdata; + size_t bufsz; if (data->element != UNUSED && data->position != SKIP) { - //char t[len]; - //strncpy(t,s,len); - //t[len] = '\0'; // makes t a well-formed string switch (data->element) { case TEXT: - // printf("buffer length = %i, text: %s\n", len, t); + // check if we'd overflow our buffer + bufsz = data->text_size + len; + if (bufsz + 1 > text_buffer_size) { + data->text = (char*) realloc(data->text, bufsz + 1); + text_buffer_size = bufsz + 1; + } strlcatn(data->text, s, data->text_size, len); - data->text_size += len; + data->text_size = bufsz; break; case COMMENT: strlcatn(data->comment, s, data->comment_size, len); @@ -487,9 +505,10 @@ void print_usage(char* argv[]) { cerr << "usage: | " << argv[0] << "[options]" << endl << endl << "options:" << endl - << " -t print text and comments after each line of tab separated data" << endl - << " -n name of the following regex (e.g. -N name -r \"...\")" << endl + << " -v verbose mode prints text and comments after each line of tab separated data" << endl + << " -n name of the following regex (e.g. -n name -r \"...\")" << endl << " -r regex to check against additions and deletions" << endl + << " -t regex(es) to check title against as a way of limiting output to specific namespaces" << endl << endl << "Takes a wikimedia data dump XML stream on standard in, and produces" << endl << "a tab-separated stream of revisions on standard out:" << endl @@ -520,13 +539,13 @@ main(int argc, char *argv[]) // the user data struct which is passed to callback functions revisionData data; - while ((c = getopt(argc, argv, "htn:r:")) != -1) + while ((c = getopt(argc, argv, "hvn:r:t:")) != -1) switch (c) { case 'd': dry_run = 1; break; - case 't': + case 'v': output_type = FULL; break; case 'n': @@ -543,6 +562,9 @@ main(int argc, char *argv[]) print_usage(argv); exit(0); break; + case 't': + data.wp_namespace_res.push_back(pcrecpp::RE(optarg, pcrecpp::UTF8())); + break; } if (dry_run) { // lets us print initialization options @@ -613,7 +635,7 @@ main(int argc, char *argv[]) // passes the buffer of data to the parser and checks for error // (this is where the callbacks are invoked) if (XML_Parse(parser, buf, len, done) == XML_STATUS_ERROR) { - cerr << XML_ErrorString(XML_GetErrorCode(parser)) << " at line " + cerr << "XML ERROR: " << XML_ErrorString(XML_GetErrorCode(parser)) << " at line " << (int) XML_GetCurrentLineNumber(parser) << endl; return 1; }