]> projects.mako.cc - mw/blobdiff - src/mw/metadir.py
fix command that was preventing 'not a mw repo' check
[mw] / src / mw / metadir.py
index 7126872005c3bcfb95cdd5ad5450eb12f66626f2..0549c83b4232850e28754dd77db5308e0f4f36c6 100644 (file)
@@ -1,6 +1,6 @@
 ###
 # mw - VCS-like nonsense for MediaWiki websites
 ###
 # mw - VCS-like nonsense for MediaWiki websites
-# Copyright (C) 2010  Ian Weller <ian@ianweller.org>
+# Copyright (C) 2011  Ian Weller <ian@ianweller.org> and others
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # with this program.  If not, see <http://www.gnu.org/licenses/>.
 ###
 
 # with this program.  If not, see <http://www.gnu.org/licenses/>.
 ###
 
+import bzrlib.diff
 import codecs
 import ConfigParser
 import json
 import codecs
 import ConfigParser
 import json
-import mw.api
 import os
 import os
+from StringIO import StringIO
 import sys
 
 
 import sys
 
 
@@ -47,6 +48,10 @@ class Metadir(object):
         else:
             self.config = None
 
         else:
             self.config = None
 
+    def save_config(self):
+        with open(self.config_loc, 'wb') as config_file:
+            self.config.write(config_file)
+
     def create(self, api_url):
         # create the directory
         if os.path.isdir(self.location):
     def create(self, api_url):
         # create the directory
         if os.path.isdir(self.location):
@@ -56,14 +61,13 @@ class Metadir(object):
             os.mkdir(self.location, 0755)
         # metadir versioning
         fd = file(os.path.join(self.location, 'version'), 'w')
             os.mkdir(self.location, 0755)
         # metadir versioning
         fd = file(os.path.join(self.location, 'version'), 'w')
-        fd.write('1') # XXX THIS API VERSION NOT LOCKED IN YET
+        fd.write('1')  # XXX THIS API VERSION NOT LOCKED IN YET
         fd.close()
         # create config
         self.config = ConfigParser.RawConfigParser()
         self.config.add_section('remote')
         self.config.set('remote', 'api_url', api_url)
         fd.close()
         # create config
         self.config = ConfigParser.RawConfigParser()
         self.config.add_section('remote')
         self.config.set('remote', 'api_url', api_url)
-        with open(self.config_loc, 'wb') as config_file:
-            self.config.write(config_file)
+        self.save_config()
         # create cache/
         os.mkdir(os.path.join(self.location, 'cache'))
         # create cache/pagedict
         # create cache/
         os.mkdir(os.path.join(self.location, 'cache'))
         # create cache/pagedict
@@ -73,6 +77,15 @@ class Metadir(object):
         # create cache/pages/
         os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
 
         # create cache/pages/
         os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
 
+    def clean_page(self, pagename):
+        filename = pagename_to_filename(pagename) + '.wiki'
+        cur_content = codecs.open(filename, 'r', 'utf-8').read()
+        if len(cur_content) != 0 and cur_content[-1] == '\n':
+            cur_content = cur_content[:-1]
+        fd = file(filename, 'w')
+        fd.write(cur_content.encode('utf-8'))
+        fd.close()
+
     def pagedict_add(self, pagename, pageid, currentrv):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
         pagedict = json.loads(fd.read())
     def pagedict_add(self, pagename, pageid, currentrv):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
         pagedict = json.loads(fd.read())
@@ -85,6 +98,7 @@ class Metadir(object):
     def get_pageid_from_pagename(self, pagename):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r')
         pagedict = json.loads(fd.read())
     def get_pageid_from_pagename(self, pagename):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r')
         pagedict = json.loads(fd.read())
+        pagename = pagename.decode('utf-8')
         if pagename in pagedict.keys():
             return pagedict[pagename]
         else:
         if pagename in pagedict.keys():
             return pagedict[pagename]
         else:
@@ -100,7 +114,8 @@ class Metadir(object):
             pagedata = json.loads(pagedata_raw)
         rvid = int(rv['revid'])
         pagedata[rvid] = {
             pagedata = json.loads(pagedata_raw)
         rvid = int(rv['revid'])
         pagedata[rvid] = {
-                'user': rv['user'], 'timestamp': rv['timestamp']
+                'user': rv['user'],
+                'timestamp': rv['timestamp'],
         }
         if '*' in rv.keys():
             pagedata[rvid]['content'] = rv['*']
         }
         if '*' in rv.keys():
             pagedata[rvid]['content'] = rv['*']
@@ -112,32 +127,42 @@ class Metadir(object):
     def pages_get_rv_list(self, pageid):
         pagefile = os.path.join(self.location, 'cache', 'pages',
                                 str(pageid['id']))
     def pages_get_rv_list(self, pageid):
         pagefile = os.path.join(self.location, 'cache', 'pages',
                                 str(pageid['id']))
-        fd = file(pagefile, 'r')
-        pagedata = json.loads(fd.read())
-        rvs = [int(x) for x in pagedata.keys()]
-        rvs.sort()
-        return rvs
+        if os.path.exists(pagefile):
+            fd = file(pagefile, 'r')
+            pagedata = json.loads(fd.read())
+            rvs = [int(x) for x in pagedata.keys()]
+            rvs.sort()
+            return rvs
+        else:
+            return [None,]
 
     def pages_get_rv(self, pageid, rvid):
         pagefile = os.path.join(self.location, 'cache', 'pages',
                                 str(pageid['id']))
 
     def pages_get_rv(self, pageid, rvid):
         pagefile = os.path.join(self.location, 'cache', 'pages',
                                 str(pageid['id']))
-        fd = file(pagefile, 'r')
-        pagedata = json.loads(fd.read())
-        return pagedata[str(rvid)]
-
-    def working_dir_status(self):
+        if os.path.exists(pagefile):
+            fd = file(pagefile, 'r')
+            pagedata = json.loads(fd.read())
+            return pagedata[str(rvid)]
+        else:
+            return None
+            
+    def working_dir_status(self, files=None):
         status = {}
         check = []
         status = {}
         check = []
-        for root, dirs, files in os.walk(self.root):
-            if root == self.root:
-                dirs.remove('.mw')
-            for name in files:
-                check.append(os.path.join(root, name))
+        if files == None or files == []:
+            for root, dirs, files in os.walk(self.root):
+                if root == self.root:
+                    dirs.remove('.mw')
+                for name in files:
+                    check.append(os.path.join(root, name))
+        else:
+            for file in files:
+                check.append(os.path.join(os.getcwd(), file))
         check.sort()
         for full in check:
             name = os.path.split(full)[1]
             if name[-5:] == '.wiki':
         check.sort()
         for full in check:
             name = os.path.split(full)[1]
             if name[-5:] == '.wiki':
-                pagename = mw.api.filename_to_pagename(name[:-5])
+                pagename = filename_to_pagename(name[:-5])
                 pageid = self.get_pageid_from_pagename(pagename)
                 if not pageid:
                     status[os.path.relpath(full, self.root)] = '?'
                 pageid = self.get_pageid_from_pagename(pagename)
                 if not pageid:
                     status[os.path.relpath(full, self.root)] = '?'
@@ -145,6 +170,55 @@ class Metadir(object):
                     rvid = self.pages_get_rv_list(pageid)[-1]
                     rv = self.pages_get_rv(pageid, rvid)
                     cur_content = codecs.open(full, 'r', 'utf-8').read()
                     rvid = self.pages_get_rv_list(pageid)[-1]
                     rv = self.pages_get_rv(pageid, rvid)
                     cur_content = codecs.open(full, 'r', 'utf-8').read()
+                    if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
+                        cur_content = cur_content[:-1]
                     if cur_content != rv['content']:
                     if cur_content != rv['content']:
-                        status[os.path.relpath(full, self.root)] = 'U'
+                        status[os.path.relpath(full, self.root)] = 'M' # modified
+                    else:
+                        status[os.path.relpath(full, self.root)] = 'C' # clean
         return status
         return status
+
+    def diff_rv_to_working(self, pagename, oldrvid=0, newrvid=0):
+        # oldrvid=0 means latest fetched revision
+        # newrvid=0 means working copy
+        filename = pagename_to_filename(pagename) + '.wiki'
+        filename = filename.decode('utf-8')
+        pageid = self.get_pageid_from_pagename(pagename)
+        if not pageid:
+            raise ValueError('page named %s has not been fetched' % pagename)
+        else:
+            if oldrvid == 0:
+                oldrvid = self.pages_get_rv_list(pageid)[-1]
+            oldrv = self.pages_get_rv(pageid, oldrvid)
+            oldname = 'a/%s (revision %i)' % (filename, oldrvid)
+            old = [i + '\n' for i in \
+                   oldrv['content'].encode('utf-8').split('\n')]
+            if newrvid == 0:
+                cur_content = codecs.open(filename, 'r', 'utf-8').read()
+                cur_content = cur_content.encode('utf-8')
+                if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
+                    cur_content = cur_content[:-1]
+                newname = 'b/%s (working copy)' % filename
+                new = [i + '\n' for i in cur_content.split('\n')]
+            else:
+                newrv = self.pages_get_rv(pageid, newrvid)
+                newname = 'b/%s (revision %i)' % (filename, newrvid)
+                new = [i + '\n' for i in newrv['content'].split('\n')]
+            diff_fd = StringIO()
+            bzrlib.diff.internal_diff(oldname, old, newname, new, diff_fd)
+            diff = diff_fd.getvalue()
+            if diff[-1] == '\n':
+                diff = diff[:-1]
+            return diff
+
+
+def pagename_to_filename(name):
+    name = name.replace(' ', '_')
+    name = name.replace('/', '!')
+    return name
+
+
+def filename_to_pagename(name):
+    name = name.replace('!', '/')
+    name = name.replace('_', ' ')
+    return name

Benjamin Mako Hill || Want to submit a patch?