2 # mw - VCS-like nonsense for MediaWiki websites
3 # Copyright (C) 2011 Ian Weller <ian@ianweller.org> and others
5 # This program is free software; you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation; either version 2 of the License, or
8 # (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License along
16 # with this program. If not, see <http://www.gnu.org/licenses/>.
24 from StringIO import StringIO
28 class Metadir(object):
31 self.me = os.path.basename(sys.argv[0])
34 if '.mw' in os.listdir(root):
37 head = os.path.split(root)[0]
39 self.root = os.getcwd()
42 self.location = os.path.join(self.root, '.mw')
43 self.config_loc = os.path.join(self.location, 'config')
44 if os.path.isdir(self.location) and \
45 os.path.isfile(self.config_loc):
46 self.config = ConfigParser.RawConfigParser()
47 self.config.read(self.config_loc)
51 def save_config(self):
52 with open(self.config_loc, 'wb') as config_file:
53 self.config.write(config_file)
55 def create(self, api_url):
56 # create the directory
57 if os.path.isdir(self.location):
58 print '%s: you are already in a mw repo' % self.me
61 os.mkdir(self.location, 0755)
63 fd = file(os.path.join(self.location, 'version'), 'w')
64 fd.write('1') # XXX THIS API VERSION NOT LOCKED IN YET
67 self.config = ConfigParser.RawConfigParser()
68 self.config.add_section('remote')
69 self.config.set('remote', 'api_url', api_url)
72 os.mkdir(os.path.join(self.location, 'cache'))
73 # create cache/pagedict
74 fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'w')
75 fd.write(json.dumps({}))
78 os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
80 def clean_page(self, pagename):
81 filename = pagename_to_filename(pagename) + '.wiki'
82 cur_content = codecs.open(filename, 'r', 'utf-8').read()
83 if len(cur_content) != 0 and cur_content[-1] == '\n':
84 cur_content = cur_content[:-1]
85 fd = file(filename, 'w')
86 fd.write(cur_content.encode('utf-8'))
89 def pagedict_add(self, pagename, pageid, currentrv):
90 fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
91 pagedict = json.loads(fd.read())
92 pagedict[pagename] = {'id': int(pageid), 'currentrv': int(currentrv)}
94 fd.write(json.dumps(pagedict))
98 def get_pageid_from_pagename(self, pagename):
99 fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r')
100 pagedict = json.loads(fd.read())
101 pagename = pagename.decode('utf-8')
102 if pagename in pagedict.keys():
103 return pagedict[pagename]
107 def pages_add_rv(self, pageid, rv):
108 pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
109 fd = file(pagefile, 'w+')
110 pagedata_raw = fd.read()
111 if pagedata_raw == '':
114 pagedata = json.loads(pagedata_raw)
115 rvid = int(rv['revid'])
118 'timestamp': rv['timestamp'],
121 pagedata[rvid]['content'] = rv['*']
123 fd.write(json.dumps(pagedata))
127 def pages_get_rv_list(self, pageid):
128 pagefile = os.path.join(self.location, 'cache', 'pages',
130 if os.path.exists(pagefile):
131 fd = file(pagefile, 'r')
132 pagedata = json.loads(fd.read())
133 rvs = [int(x) for x in pagedata.keys()]
139 def pages_get_rv(self, pageid, rvid):
140 pagefile = os.path.join(self.location, 'cache', 'pages',
142 if os.path.exists(pagefile):
143 fd = file(pagefile, 'r')
144 pagedata = json.loads(fd.read())
145 return pagedata[str(rvid)]
149 def working_dir_status(self, files=None):
152 if files == None or files == []:
153 for root, dirs, files in os.walk(self.root):
154 if root == self.root:
157 check.append(os.path.join(root, name))
160 check.append(os.path.join(os.getcwd(), file))
163 name = os.path.split(full)[1]
164 if name[-5:] == '.wiki':
165 pagename = filename_to_pagename(name[:-5])
166 pageid = self.get_pageid_from_pagename(pagename)
168 status[os.path.relpath(full, self.root)] = '?'
170 rvid = self.pages_get_rv_list(pageid)[-1]
171 rv = self.pages_get_rv(pageid, rvid)
172 cur_content = codecs.open(full, 'r', 'utf-8').read()
173 if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
174 cur_content = cur_content[:-1]
175 if cur_content != rv['content']:
176 status[os.path.relpath(full, self.root)] = 'M' # modified
178 status[os.path.relpath(full, self.root)] = 'C' # clean
181 def diff_rv_to_working(self, pagename, oldrvid=0, newrvid=0):
182 # oldrvid=0 means latest fetched revision
183 # newrvid=0 means working copy
184 filename = pagename_to_filename(pagename) + '.wiki'
185 filename = filename.decode('utf-8')
186 pageid = self.get_pageid_from_pagename(pagename)
188 raise ValueError('page named %s has not been fetched' % pagename)
191 oldrvid = self.pages_get_rv_list(pageid)[-1]
192 oldrv = self.pages_get_rv(pageid, oldrvid)
193 oldname = 'a/%s (revision %i)' % (filename, oldrvid)
194 old = [i + '\n' for i in \
195 oldrv['content'].encode('utf-8').split('\n')]
197 cur_content = codecs.open(filename, 'r', 'utf-8').read()
198 cur_content = cur_content.encode('utf-8')
199 if (len(cur_content) != 0) and (cur_content[-1] == '\n'):
200 cur_content = cur_content[:-1]
201 newname = 'b/%s (working copy)' % filename
202 new = [i + '\n' for i in cur_content.split('\n')]
204 newrv = self.pages_get_rv(pageid, newrvid)
205 newname = 'b/%s (revision %i)' % (filename, newrvid)
206 new = [i + '\n' for i in newrv['content'].split('\n')]
208 bzrlib.diff.internal_diff(oldname, old, newname, new, diff_fd)
209 diff = diff_fd.getvalue()
215 def pagename_to_filename(name):
216 name = name.replace(' ', '_')
217 name = name.replace('/', '!')
221 def filename_to_pagename(name):
222 name = name.replace('!', '/')
223 name = name.replace('_', ' ')