+++ /dev/null
-###
-# mw - VCS-like nonsense for MediaWiki websites
-# Copyright (C) 2010 Ian Weller <ian@ianweller.org>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program. If not, see <http://www.gnu.org/licenses/>.
-###
-
-import cookielib
-import gzip
-import json
-import mw
-import mw.metadir
-import os
-from StringIO import StringIO
-import urllib
-import urllib2
-
-
-class API(object):
-
- def __init__(self, api_url, metadir):
- self.api_url = api_url
- self.metadir = metadir
- self.cookiejar = cookielib.MozillaCookieJar(os.path.join(
- self.metadir.location, 'cookies'))
- try:
- self.cookiejar.load()
- except IOError:
- self.cookiejar.save()
- self.cookiejar.load()
- self.opener = urllib2.build_opener(
- urllib2.HTTPCookieProcessor(self.cookiejar))
- self._high_limits = None
-
- def call(self, data):
- data['format'] = 'json'
- user_agent = 'mw/%s +http://github.com/ianweller/mw' % mw.version
- request = urllib2.Request(self.api_url, urllib.urlencode(data),
- {'User-Agent': user_agent})
- request.add_header('Accept-encoding', 'gzip')
- response = self.opener.open(request)
- self.cookiejar.save()
- if response.headers.get('Content-Encoding') == 'gzip':
- compressed = StringIO(response.read())
- gzipper = gzip.GzipFile(fileobj=compressed)
- data = gzipper.read()
- else:
- data = response.read()
- the_data = json.loads(data)
- if 'error' in the_data.keys():
- raise APIError(the_data['error']['info'])
- return the_data
-
- def limits(self, low, high):
- if self._high_limits == None:
- result = self.call({'action': 'query',
- 'meta': 'userinfo',
- 'uiprop': 'rights'})
- self._high_limits = 'apihighlimits' in \
- result['query']['userinfo']['rights']
- if self._high_limits:
- return high
- else:
- return low
-
-
-class APIError(Exception):
-
- def __init__(self, info):
- self.info = info
-
- def __str__(self):
- return self.info
-
-
-def pagename_to_filename(name):
- name = name.replace(' ', '_')
- name = name.replace('/', '!')
- return name
-
-
-def filename_to_pagename(name):
- name = name.replace('!', '/')
- name = name.replace('_', ' ')
- return name
###
import codecs
+import cookielib
import getpass
import hashlib
-import mw.api
import mw.metadir
from optparse import OptionParser, OptionGroup
import os
+import simplemediawiki
import sys
sys.exit(1)
def _api_setup(self):
+ cookie_file = os.path.join(self.metadir.location, 'cookies')
+ print cookie_file
self.api_url = self.metadir.config.get('remote', 'api_url')
- self.api = mw.api.API(self.api_url, self.metadir)
+ self.api = simplemediawiki.MediaWiki(self.api_url,
+ cookie_file=cookie_file)
class InitCommand(CommandBase):
self.metadir.pagedict_add(pagename, pageid, revids[-1])
self.metadir.pages_add_rv(int(pageid),
response[pageid]['revisions'][0])
- filename = mw.api.pagename_to_filename(pagename)
+ filename = mw.metadir.pagename_to_filename(pagename)
with file(os.path.join(self.metadir.root, filename + '.wiki'),
'w') as fd:
data = response[pageid]['revisions'][0]['*']
for file in status:
if status[file] == 'U':
print self.metadir.diff_rv_to_working(
- mw.api.filename_to_pagename(file[:-5])),
+ mw.metadir.filename_to_pagename(file[:-5])),
class CommitCommand(CommandBase):
'action': 'query',
'prop': 'info|revisions',
'intoken': 'edit',
- 'titles': mw.api.filename_to_pagename(file[:-5]),
+ 'titles': mw.metadir.filename_to_pagename(file[:-5]),
}
response = self.api.call(data)
pageid = response['query']['pages'].keys()[0]
textmd5 = md5.hexdigest()
data = {
'action': 'edit',
- 'title': mw.api.filename_to_pagename(file[:-5]),
+ 'title': mw.metadir.filename_to_pagename(file[:-5]),
'token': edittoken,
'text': text,
'md5': textmd5,
import codecs
import ConfigParser
import json
-import mw.api
import os
from StringIO import StringIO
import sys
os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
def clean_page(self, pagename):
- filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+ filename = pagename_to_filename(pagename) + '.wiki'
cur_content = codecs.open(filename, 'r', 'utf-8').read()
if len(cur_content) != 0 and cur_content[-1] == '\n':
cur_content = cur_content[:-1]
for full in check:
name = os.path.split(full)[1]
if name[-5:] == '.wiki':
- pagename = mw.api.filename_to_pagename(name[:-5])
+ pagename = filename_to_pagename(name[:-5])
pageid = self.get_pageid_from_pagename(pagename)
if not pageid:
status[os.path.relpath(full, self.root)] = '?'
def diff_rv_to_working(self, pagename, oldrvid=0, newrvid=0):
# oldrvid=0 means latest fetched revision
# newrvid=0 means working copy
- filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+ filename = pagename_to_filename(pagename) + '.wiki'
filename = filename.decode('utf-8')
pageid = self.get_pageid_from_pagename(pagename)
if not pageid:
if diff[-1] == '\n':
diff = diff[:-1]
return diff
+
+
+def pagename_to_filename(name):
+ name = name.replace(' ', '_')
+ name = name.replace('/', '!')
+ return name
+
+
+def filename_to_pagename(name):
+ name = name.replace('!', '/')
+ name = name.replace('_', ' ')
+ return name