-Run this awesome whatnot with:
- PYTHONPATH=$PWD/src bin/mw
+ _____________________________
+< Patches are always welcome! >
+ -----------------------------
+ \ ,__,
+ \ (oo)____
+ (__) )\
+ ||--|| *
+
+The preferred method of submitting patches is by forking the repository,
+committing changes, and then making the repository accessible. This is most
+easily done on GitHub, but you can put it anywhere I can get it.
Changing how something already works in the .mw metadir requires a damn
-good reason and we don't want to introduce incompatibilities at all in
-the tree. On the other hand, patches are greatly welcomed!
+good reason since we don't want to introduce incompatibilities at all in
+the tree.
+
+Code submitted should follow PEP 8. If it doesn't, I'll modify your changes (in
+later commits) until they are in line that style.
Copyright (C) 2010 Ian Weller <ian@ianweller.org>
== Basic workflow ==
-See HACKING on how to run this; a nice setup.py isn't done yet.
+We don't have a nice installation process yet, so set the following alias:
+ alias mw="PYTHONPATH=PATH_TO/mw/src PATH_TO/mw/bin/mw"
+where PATH_TO is the path to your local mw repository.
mw init http://example.com/w/api.php
mw login # if you need/want to
mw pull 'Main Page'
$EDITOR Main_Page.wiki
-mw fetch # check for newer revisions
-mw update # apply newer revisions
+mw fetch # check for newer revisions; this command doesn't exist yet
+mw update # apply newer revisions; this command doesn't exist yet
mw commit
== License ==
+++ /dev/null
-###
-# mw - VCS-like nonsense for MediaWiki websites
-# Copyright (C) 2010 Ian Weller <ian@ianweller.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-###
-
-import cookielib
-import gzip
-import json
-import mw
-import mw.metadir
-import os
-from StringIO import StringIO
-import urllib
-import urllib2
-
-
-class API(object):
-
- def __init__(self, api_url, metadir):
- self.api_url = api_url
- self.metadir = metadir
- self.cookiejar = cookielib.MozillaCookieJar(os.path.join(
- self.metadir.location, 'cookies'))
- try:
- self.cookiejar.load()
- except IOError:
- self.cookiejar.save()
- self.cookiejar.load()
- self.opener = urllib2.build_opener(
- urllib2.HTTPCookieProcessor(self.cookiejar))
- self._high_limits = None
-
- def call(self, data):
- data['format'] = 'json'
- user_agent = 'mw/%s +http://github.com/ianweller/mw' % mw.version
- request = urllib2.Request(self.api_url, urllib.urlencode(data),
- {'User-Agent': user_agent})
- request.add_header('Accept-encoding', 'gzip')
- response = self.opener.open(request)
- self.cookiejar.save()
- if response.headers.get('Content-Encoding') == 'gzip':
- compressed = StringIO(response.read())
- gzipper = gzip.GzipFile(fileobj=compressed)
- data = gzipper.read()
- else:
- data = response.read()
- the_data = json.loads(data)
- if 'error' in the_data.keys():
- raise APIError(the_data['error']['info'])
- return the_data
-
- def limits(self, low, high):
- if self._high_limits == None:
- result = self.call({'action': 'query',
- 'meta': 'userinfo',
- 'uiprop': 'rights'})
- self._high_limits = 'apihighlimits' in \
- result['query']['userinfo']['rights']
- if self._high_limits:
- return high
- else:
- return low
-
-
-class APIError(Exception):
-
- def __init__(self, info):
- self.info = info
-
- def __str__(self):
- return self.info
-
-
-def pagename_to_filename(name):
- name = name.replace(' ', '_')
- name = name.replace('/', '!')
- return name
-
-
-def filename_to_pagename(name):
- name = name.replace('!', '/')
- name = name.replace('_', ' ')
- return name
###
import codecs
+import cookielib
import getpass
import hashlib
-import mw.api
import mw.metadir
from optparse import OptionParser, OptionGroup
import os
+import simplemediawiki
import sys
import time
sys.exit(1)
def _api_setup(self):
+ cookie_file = os.path.join(self.metadir.location, 'cookies')
self.api_url = self.metadir.config.get('remote', 'api_url')
- self.api = mw.api.API(self.api_url, self.metadir)
+ self.api = simplemediawiki.MediaWiki(self.api_url,
+ cookie_file=cookie_file)
class InitCommand(CommandBase):
self.metadir.pagedict_add(pagename, pageid, revids[-1])
self.metadir.pages_add_rv(int(pageid),
response[pageid]['revisions'][0])
- filename = mw.api.pagename_to_filename(pagename)
+ filename = mw.metadir.pagename_to_filename(pagename)
with file(os.path.join(self.metadir.root, filename + '.wiki'),
'w') as fd:
data = response[pageid]['revisions'][0]['*']
for file in status:
if status[file] == 'U':
print self.metadir.diff_rv_to_working(
- mw.api.filename_to_pagename(file[:-5])),
+ mw.metadir.filename_to_pagename(file[:-5])),
class CommitCommand(CommandBase):
'action': 'query',
'prop': 'info|revisions',
'intoken': 'edit',
- 'titles': mw.api.filename_to_pagename(file[:-5]),
+ 'titles': mw.metadir.filename_to_pagename(file[:-5]),
}
response = self.api.call(data)
pageid = response['query']['pages'].keys()[0]
textmd5 = md5.hexdigest()
data = {
'action': 'edit',
- 'title': mw.api.filename_to_pagename(file[:-5]),
+ 'title': mw.metadir.filename_to_pagename(file[:-5]),
'token': edittoken,
'text': text,
'md5': textmd5,
import codecs
import ConfigParser
import json
-import mw.api
import os
from StringIO import StringIO
import sys
# create cache/pages/
os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
-
-
def clean_page(self, pagename):
- filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+ filename = pagename_to_filename(pagename) + '.wiki'
cur_content = codecs.open(filename, 'r', 'utf-8').read()
- if ( (len(cur_content) != 0) and (cur_content[-1] == '\n') ):
- cur_content = cur_content[:-1]
-
+ if len(cur_content) != 0 and cur_content[-1] == '\n':
+ cur_content = cur_content[:-1]
fd = file(filename, 'w')
- fd.write(cur_content.encode('utf-8'))
+ fd.write(cur_content.encode('utf-8'))
fd.close()
def pagedict_add(self, pagename, pageid, currentrv):
for full in check:
name = os.path.split(full)[1]
if name[-5:] == '.wiki':
- pagename = mw.api.filename_to_pagename(name[:-5])
+ pagename = filename_to_pagename(name[:-5])
pageid = self.get_pageid_from_pagename(pagename)
if not pageid:
status[os.path.relpath(full, self.root)] = '?'
def diff_rv_to_working(self, pagename, oldrvid=0, newrvid=0):
# oldrvid=0 means latest fetched revision
# newrvid=0 means working copy
- filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+ filename = pagename_to_filename(pagename) + '.wiki'
filename = filename.decode('utf-8')
pageid = self.get_pageid_from_pagename(pagename)
if not pageid:
oldrvid = self.pages_get_rv_list(pageid)[-1]
oldrv = self.pages_get_rv(pageid, oldrvid)
oldname = 'a/%s (revision %i)' % (filename, oldrvid)
- old = [i + '\n' for i in oldrv['content'].encode('utf-8').split('\n')]
+ old = [i + '\n' for i in \
+ oldrv['content'].encode('utf-8').split('\n')]
if newrvid == 0:
- cur_content = codecs.open(filename, 'r', 'utf-8').read().encode('utf-8')
+ cur_content = codecs.open(filename, 'r', 'utf-8').read()
+ cur_content = cur_content.encode('utf-8')
if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
cur_content = cur_content[:-1]
newname = 'b/%s (working copy)' % filename
if diff[-1] == '\n':
diff = diff[:-1]
return diff
+
+
+def pagename_to_filename(name):
+ name = name.replace(' ', '_')
+ name = name.replace('/', '!')
+ return name
+
+
+def filename_to_pagename(name):
+ name = name.replace('!', '/')
+ name = name.replace('_', ' ')
+ return name