implementing edit conflict & collision detection
[mw] / src / mw / metadir.py
index b637b67a6a7024da56e1c30f67e5ce6a20b20c34..3f8d96782551c01bb2f4cf66c34b6b72bebb67ea 100644 (file)
@@ -78,6 +78,18 @@ class Metadir(object):
         # create cache/pages/
         os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
 
+
+
+    def clean_page(self, pagename):
+        filename = mw.api.pagename_to_filename(pagename) + '.wiki'
+        cur_content = codecs.open(filename, 'r', 'utf-8').read()
+        if ( (len(cur_content) != 0) and (cur_content[-1] == '\n') ):
+           cur_content = cur_content[:-1]
+
+        fd = file(filename, 'w')
+        fd.write(cur_content.encode('utf-8'))   
+        fd.close()
+
     def pagedict_add(self, pagename, pageid, currentrv):
         fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
         pagedict = json.loads(fd.read())
@@ -105,7 +117,8 @@ class Metadir(object):
             pagedata = json.loads(pagedata_raw)
         rvid = int(rv['revid'])
         pagedata[rvid] = {
-                'user': rv['user'], 'timestamp': rv['timestamp']
+                'user': rv['user'],
+                'timestamp': rv['timestamp'],
         }
         if '*' in rv.keys():
             pagedata[rvid]['content'] = rv['*']
@@ -130,14 +143,18 @@ class Metadir(object):
         pagedata = json.loads(fd.read())
         return pagedata[str(rvid)]
 
-    def working_dir_status(self):
+    def working_dir_status(self, files=None):
         status = {}
         check = []
-        for root, dirs, files in os.walk(self.root):
-            if root == self.root:
-                dirs.remove('.mw')
-            for name in files:
-                check.append(os.path.join(root, name))
+        if files == None or files == []:
+            for root, dirs, files in os.walk(self.root):
+                if root == self.root:
+                    dirs.remove('.mw')
+                for name in files:
+                    check.append(os.path.join(root, name))
+        else:
+            for file in files:
+                check.append(os.path.join(os.getcwd(), file))
         check.sort()
         for full in check:
             name = os.path.split(full)[1]
@@ -150,7 +167,7 @@ class Metadir(object):
                     rvid = self.pages_get_rv_list(pageid)[-1]
                     rv = self.pages_get_rv(pageid, rvid)
                     cur_content = codecs.open(full, 'r', 'utf-8').read()
-                    if cur_content[-1] == '\n':
+                    if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
                         cur_content = cur_content[:-1]
                     if cur_content != rv['content']:
                         status[os.path.relpath(full, self.root)] = 'U'
@@ -168,17 +185,17 @@ class Metadir(object):
                 oldrvid = self.pages_get_rv_list(pageid)[-1]
             oldrv = self.pages_get_rv(pageid, oldrvid)
             oldname = 'a/%s (revision %i)' % (filename, oldrvid)
-            old = [i+'\n' for i in oldrv['content'].split('\n')]
+            old = [i + '\n' for i in oldrv['content'].split('\n')]
             if newrvid == 0:
                 cur_content = codecs.open(filename, 'r', 'utf-8').read()
-                if cur_content[-1] == '\n':
+                if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
                     cur_content = cur_content[:-1]
                 newname = 'b/%s (working copy)' % filename
-                new = [i+'\n' for i in cur_content.split('\n')]
+                new = [i + '\n' for i in cur_content.split('\n')]
             else:
                 newrv = self.pages_get_rv(pageid, newrvid)
                 newname = 'b/%s (revision %i)' % (filename, newrvid)
-                new = [i+'\n' for i in newrv['content'].split('\n')]
+                new = [i + '\n' for i in newrv['content'].split('\n')]
             diff_fd = StringIO()
             bzrlib.diff.internal_diff(oldname, old, newname, new, diff_fd)
             diff_fd.seek(0)

Benjamin Mako Hill || Want to submit a patch?