X-Git-Url: https://projects.mako.cc/source/mw/blobdiff_plain/c5075c4a66b4f0f461489f1cee62068b67e01ff0..9b8e57d467b0a3c0ae3b38fd781f668d409ddac6:/src/mw/metadir.py diff --git a/src/mw/metadir.py b/src/mw/metadir.py index 49a91cf..7126872 100644 --- a/src/mw/metadir.py +++ b/src/mw/metadir.py @@ -1,6 +1,6 @@ ### # mw - VCS-like nonsense for MediaWiki websites -# Copyright (C) 2009 Ian Weller +# Copyright (C) 2010 Ian Weller # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -16,13 +16,16 @@ # with this program. If not, see . ### +import codecs import ConfigParser import json +import mw.api import os import sys -import time + class Metadir(object): + def __init__(self): self.me = os.path.basename(sys.argv[0]) root = os.getcwd() @@ -30,7 +33,7 @@ class Metadir(object): if '.mw' in os.listdir(root): self.root = root break - (head, tail) = os.path.split(root) + head = os.path.split(root)[0] if head == root: self.root = os.getcwd() break @@ -53,7 +56,7 @@ class Metadir(object): os.mkdir(self.location, 0755) # metadir versioning fd = file(os.path.join(self.location, 'version'), 'w') - fd.write('1') + fd.write('1') # XXX THIS API VERSION NOT LOCKED IN YET fd.close() # create config self.config = ConfigParser.RawConfigParser() @@ -70,27 +73,78 @@ class Metadir(object): # create cache/pages/ os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755) - def pagedict_add(self, pagename, pageid): + def pagedict_add(self, pagename, pageid, currentrv): fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+') pagedict = json.loads(fd.read()) - pagedict[pagename] = int(pageid) + pagedict[pagename] = {'id': int(pageid), 'currentrv': int(currentrv)} fd.seek(0) fd.write(json.dumps(pagedict)) fd.truncate() fd.close() - def pages_add_rev(self, pageid, rv): + def get_pageid_from_pagename(self, pagename): + fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r') + pagedict = json.loads(fd.read()) + if pagename in pagedict.keys(): + return pagedict[pagename] + else: + return None + + def pages_add_rv(self, pageid, rv): pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid)) fd = file(pagefile, 'w+') - pagedata = json.loads(fd.read()) + pagedata_raw = fd.read() + if pagedata_raw == '': + pagedata = {} + else: + pagedata = json.loads(pagedata_raw) rvid = int(rv['revid']) - if pageid not in pagedata.keys(): - pagedata[pageid] = {} - pagedata[pageid][rvid] = { - 'user': rv['user'], 'timestamp': rv['timestamp'], - 'content': rv['*'], + pagedata[rvid] = { + 'user': rv['user'], 'timestamp': rv['timestamp'] } + if '*' in rv.keys(): + pagedata[rvid]['content'] = rv['*'] fd.seek(0) fd.write(json.dumps(pagedata)) fd.truncate() fd.close() + + def pages_get_rv_list(self, pageid): + pagefile = os.path.join(self.location, 'cache', 'pages', + str(pageid['id'])) + fd = file(pagefile, 'r') + pagedata = json.loads(fd.read()) + rvs = [int(x) for x in pagedata.keys()] + rvs.sort() + return rvs + + def pages_get_rv(self, pageid, rvid): + pagefile = os.path.join(self.location, 'cache', 'pages', + str(pageid['id'])) + fd = file(pagefile, 'r') + pagedata = json.loads(fd.read()) + return pagedata[str(rvid)] + + def working_dir_status(self): + status = {} + check = [] + for root, dirs, files in os.walk(self.root): + if root == self.root: + dirs.remove('.mw') + for name in files: + check.append(os.path.join(root, name)) + check.sort() + for full in check: + name = os.path.split(full)[1] + if name[-5:] == '.wiki': + pagename = mw.api.filename_to_pagename(name[:-5]) + pageid = self.get_pageid_from_pagename(pagename) + if not pageid: + status[os.path.relpath(full, self.root)] = '?' + else: + rvid = self.pages_get_rv_list(pageid)[-1] + rv = self.pages_get_rv(pageid, rvid) + cur_content = codecs.open(full, 'r', 'utf-8').read() + if cur_content != rv['content']: + status[os.path.relpath(full, self.root)] = 'U' + return status