More restylizing crap (PEP 8 compliant)
[mw] / src / mw / metadir.py
index 67e0b0950a58b95c20cf8b11542c0a4346e1234c..05dcdb85abda5f4f45fb0021b841550ae7b652eb 100644 (file)
@@ -1,6 +1,6 @@
 ###
 # mw - VCS-like nonsense for MediaWiki websites
-# Copyright (C) 2009  Ian Weller <ian@ianweller.org>
+# Copyright (C) 2010  Ian Weller <ian@ianweller.org>
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # with this program.  If not, see <http://www.gnu.org/licenses/>.
 ###
 
+import bzrlib.diff
+import codecs
 import ConfigParser
 import json
+import mw.api
 import os
+from StringIO import StringIO
 import sys
-import time
+
 
 class Metadir(object):
+
     def __init__(self):
         self.me = os.path.basename(sys.argv[0])
         root = os.getcwd()
@@ -30,7 +35,7 @@ class Metadir(object):
             if '.mw' in os.listdir(root):
                 self.root = root
                 break
-            (head, tail) = os.path.split(root)
+            head = os.path.split(root)[0]
             if head == root:
                 self.root = os.getcwd()
                 break
@@ -44,6 +49,10 @@ class Metadir(object):
         else:
             self.config = None
 
+    def save_config(self):
+        with open(self.config_loc, 'wb') as config_file:
+            self.config.write(config_file)
+
     def create(self, api_url):
         # create the directory
         if os.path.isdir(self.location):
@@ -59,8 +68,7 @@ class Metadir(object):
         self.config = ConfigParser.RawConfigParser()
         self.config.add_section('remote')
         self.config.set('remote', 'api_url', api_url)
-        with open(self.config_loc, 'wb') as config_file:
-            self.config.write(config_file)
+        self.save_config()
         # create cache/
         os.mkdir(os.path.join(self.location, 'cache'))
         # create cache/pagedict
@@ -70,6 +78,15 @@ class Metadir(object):
         # create cache/pages/
         os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
 
+    def clean_page(self, pagename):
+        filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+        cur_content = codecs.open(filename, 'r', 'utf-8').read()
+        if len(cur_content) != 0 and cur_content[-1] == '\n':
+            cur_content = cur_content[:-1]
+        fd = file(filename, 'w')
+        fd.write(cur_content.encode('utf-8'))
+        fd.close()
+
     def pagedict_add(self, pagename, pageid, currentrv):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
         pagedict = json.loads(fd.read())
@@ -82,6 +99,7 @@ class Metadir(object):
     def get_pageid_from_pagename(self, pagename):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r')
         pagedict = json.loads(fd.read())
+        pagename = pagename.decode('utf-8')
         if pagename in pagedict.keys():
             return pagedict[pagename]
         else:
@@ -97,16 +115,19 @@ class Metadir(object):
             pagedata = json.loads(pagedata_raw)
         rvid = int(rv['revid'])
         pagedata[rvid] = {
-                'user': rv['user'], 'timestamp': rv['timestamp'],
-                'content': rv['*'],
+                'user': rv['user'],
+                'timestamp': rv['timestamp'],
         }
+        if '*' in rv.keys():
+            pagedata[rvid]['content'] = rv['*']
         fd.seek(0)
         fd.write(json.dumps(pagedata))
         fd.truncate()
         fd.close()
 
     def pages_get_rv_list(self, pageid):
-        pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
+        pagefile = os.path.join(self.location, 'cache', 'pages',
+                                str(pageid['id']))
         fd = file(pagefile, 'r')
         pagedata = json.loads(fd.read())
         rvs = [int(x) for x in pagedata.keys()]
@@ -114,7 +135,71 @@ class Metadir(object):
         return rvs
 
     def pages_get_rv(self, pageid, rvid):
-        pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
+        pagefile = os.path.join(self.location, 'cache', 'pages',
+                                str(pageid['id']))
         fd = file(pagefile, 'r')
         pagedata = json.loads(fd.read())
         return pagedata[str(rvid)]
+
+    def working_dir_status(self, files=None):
+        status = {}
+        check = []
+        if files == None or files == []:
+            for root, dirs, files in os.walk(self.root):
+                if root == self.root:
+                    dirs.remove('.mw')
+                for name in files:
+                    check.append(os.path.join(root, name))
+        else:
+            for file in files:
+                check.append(os.path.join(os.getcwd(), file))
+        check.sort()
+        for full in check:
+            name = os.path.split(full)[1]
+            if name[-5:] == '.wiki':
+                pagename = mw.api.filename_to_pagename(name[:-5])
+                pageid = self.get_pageid_from_pagename(pagename)
+                if not pageid:
+                    status[os.path.relpath(full, self.root)] = '?'
+                else:
+                    rvid = self.pages_get_rv_list(pageid)[-1]
+                    rv = self.pages_get_rv(pageid, rvid)
+                    cur_content = codecs.open(full, 'r', 'utf-8').read()
+                    if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
+                        cur_content = cur_content[:-1]
+                    if cur_content != rv['content']:
+                        status[os.path.relpath(full, self.root)] = 'U'
+        return status
+
+    def diff_rv_to_working(self, pagename, oldrvid=0, newrvid=0):
+        # oldrvid=0 means latest fetched revision
+        # newrvid=0 means working copy
+        filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+        filename = filename.decode('utf-8')
+        pageid = self.get_pageid_from_pagename(pagename)
+        if not pageid:
+            raise ValueError('page named %s has not been fetched' % pagename)
+        else:
+            if oldrvid == 0:
+                oldrvid = self.pages_get_rv_list(pageid)[-1]
+            oldrv = self.pages_get_rv(pageid, oldrvid)
+            oldname = 'a/%s (revision %i)' % (filename, oldrvid)
+            old = [i + '\n' for i in \
+                   oldrv['content'].encode('utf-8').split('\n')]
+            if newrvid == 0:
+                cur_content = codecs.open(filename, 'r', 'utf-8').read()
+                cur_content = cur_content.encode('utf-8')
+                if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
+                    cur_content = cur_content[:-1]
+                newname = 'b/%s (working copy)' % filename
+                new = [i + '\n' for i in cur_content.split('\n')]
+            else:
+                newrv = self.pages_get_rv(pageid, newrvid)
+                newname = 'b/%s (revision %i)' % (filename, newrvid)
+                new = [i + '\n' for i in newrv['content'].split('\n')]
+            diff_fd = StringIO()
+            bzrlib.diff.internal_diff(oldname, old, newname, new, diff_fd)
+            diff = diff_fd.getvalue()
+            if diff[-1] == '\n':
+                diff = diff[:-1]
+            return diff

Benjamin Mako Hill || Want to submit a patch?