X-Git-Url: https://projects.mako.cc/source/mw/blobdiff_plain/2c712c501ee2e538d09ad94cf25c5749471c7bfd..bc0f39341bcb3b29f9a78083891979e67db4e40c:/src/mw/metadir.py diff --git a/src/mw/metadir.py b/src/mw/metadir.py index 6c6bfeb..a08ddff 100644 --- a/src/mw/metadir.py +++ b/src/mw/metadir.py @@ -49,7 +49,11 @@ class Metadir(object): else: self.config = None - def create(self, api_url, username=None): + def save_config(self): + with open(self.config_loc, 'wb') as config_file: + self.config.write(config_file) + + def create(self, api_url): # create the directory if os.path.isdir(self.location): print '%s: you are already in a mw repo' % self.me @@ -64,10 +68,7 @@ class Metadir(object): self.config = ConfigParser.RawConfigParser() self.config.add_section('remote') self.config.set('remote', 'api_url', api_url) - if username != None: - self.config.set('remote', 'username', username) - with open(self.config_loc, 'wb') as config_file: - self.config.write(config_file) + self.save_config() # create cache/ os.mkdir(os.path.join(self.location, 'cache')) # create cache/pagedict @@ -77,6 +78,15 @@ class Metadir(object): # create cache/pages/ os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755) + def clean_page(self, pagename): + filename = mw.api.pagename_to_filename(pagename) + '.wiki' + cur_content = codecs.open(filename, 'r', 'utf-8').read() + if len(cur_content) != 0 and cur_content[-1] == '\n': + cur_content = cur_content[:-1] + fd = file(filename, 'w') + fd.write(cur_content.encode('utf-8')) + fd.close() + def pagedict_add(self, pagename, pageid, currentrv): fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+') pagedict = json.loads(fd.read()) @@ -89,6 +99,7 @@ class Metadir(object): def get_pageid_from_pagename(self, pagename): fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r') pagedict = json.loads(fd.read()) + pagename = pagename.decode('utf-8') if pagename in pagedict.keys(): return pagedict[pagename] else: @@ -104,7 +115,8 @@ class Metadir(object): pagedata = json.loads(pagedata_raw) rvid = int(rv['revid']) pagedata[rvid] = { - 'user': rv['user'], 'timestamp': rv['timestamp'] + 'user': rv['user'], + 'timestamp': rv['timestamp'], } if '*' in rv.keys(): pagedata[rvid]['content'] = rv['*'] @@ -129,14 +141,18 @@ class Metadir(object): pagedata = json.loads(fd.read()) return pagedata[str(rvid)] - def working_dir_status(self): + def working_dir_status(self, files=None): status = {} check = [] - for root, dirs, files in os.walk(self.root): - if root == self.root: - dirs.remove('.mw') - for name in files: - check.append(os.path.join(root, name)) + if files == None or files == []: + for root, dirs, files in os.walk(self.root): + if root == self.root: + dirs.remove('.mw') + for name in files: + check.append(os.path.join(root, name)) + else: + for file in files: + check.append(os.path.join(os.getcwd(), file)) check.sort() for full in check: name = os.path.split(full)[1] @@ -149,7 +165,7 @@ class Metadir(object): rvid = self.pages_get_rv_list(pageid)[-1] rv = self.pages_get_rv(pageid, rvid) cur_content = codecs.open(full, 'r', 'utf-8').read() - if cur_content[-1] == '\n': + if (len(cur_content) != 0) and (cur_content[-1] == '\n'): cur_content = cur_content[:-1] if cur_content != rv['content']: status[os.path.relpath(full, self.root)] = 'U' @@ -159,6 +175,7 @@ class Metadir(object): # oldrvid=0 means latest fetched revision # newrvid=0 means working copy filename = mw.api.pagename_to_filename(pagename) + '.wiki' + filename = filename.decode('utf-8') pageid = self.get_pageid_from_pagename(pagename) if not pageid: raise ValueError('page named %s has not been fetched' % pagename) @@ -167,21 +184,20 @@ class Metadir(object): oldrvid = self.pages_get_rv_list(pageid)[-1] oldrv = self.pages_get_rv(pageid, oldrvid) oldname = 'a/%s (revision %i)' % (filename, oldrvid) - old = [i+'\n' for i in oldrv['content'].split('\n')] + old = [i + '\n' for i in oldrv['content'].encode('utf-8').split('\n')] if newrvid == 0: - cur_content = codecs.open(filename, 'r', 'utf-8').read() - if cur_content[-1] == '\n': + cur_content = codecs.open(filename, 'r', 'utf-8').read().encode('utf-8') + if (len(cur_content) != 0) and (cur_content[-1] == '\n'): cur_content = cur_content[:-1] newname = 'b/%s (working copy)' % filename - new = [i+'\n' for i in cur_content.split('\n')] + new = [i + '\n' for i in cur_content.split('\n')] else: newrv = self.pages_get_rv(pageid, newrvid) newname = 'b/%s (revision %i)' % (filename, newrvid) - new = [i+'\n' for i in newrv['content'].split('\n')] + new = [i + '\n' for i in newrv['content'].split('\n')] diff_fd = StringIO() bzrlib.diff.internal_diff(oldname, old, newname, new, diff_fd) - diff_fd.seek(0) - diff = diff_fd.read() + diff = diff_fd.getvalue() if diff[-1] == '\n': diff = diff[:-1] return diff