API definitely not locked in yet for version 1
[mw] / src / mw / metadir.py
index 89654b8b3a272b153b762c09755582c711fa6b32..4549a29ffc5b5d4ebba82abd03ab5bea1f2fa49e 100644 (file)
@@ -13,8 +13,7 @@
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# with this program.  If not, see <http://www.gnu.org/licenses/>.
 ###
 
 import ConfigParser
@@ -47,45 +46,75 @@ class Metadir(object):
 
     def create(self, api_url):
         # create the directory
-        try:
-            os.mkdir(self.location, 0755)
-        except OSError, e:
+        if os.path.isdir(self.location):
             print '%s: you are already in a mw repo' % self.me
             sys.exit(1)
+        else:
+            os.mkdir(self.location, 0755)
+        # metadir versioning
+        fd = file(os.path.join(self.location, 'version'), 'w')
+        fd.write('1') # XXX THIS API VERSION NOT LOCKED IN YET
+        fd.close()
         # create config
         self.config = ConfigParser.RawConfigParser()
         self.config.add_section('remote')
         self.config.set('remote', 'api_url', api_url)
         with open(self.config_loc, 'wb') as config_file:
             self.config.write(config_file)
-        # create cache
+        # create cache/
         os.mkdir(os.path.join(self.location, 'cache'))
-        # create cache/page
-        fd = file(os.path.join(self.location, 'cache', 'page'), 'w')
-        fd.write(json.dumps({}))
-        # create cache/rv
-        fd = file(os.path.join(self.location, 'cache', 'rv'), 'w')
+        # create cache/pagedict
+        fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'w')
         fd.write(json.dumps({}))
+        fd.close()
+        # create cache/pages/
+        os.mkdir(os.path.join(self.location, 'cache', 'pages'), 0755)
+
+    def pagedict_add(self, pagename, pageid):
+        fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r+')
+        pagedict = json.loads(fd.read())
+        pagedict[pagename] = int(pageid)
+        fd.seek(0)
+        fd.write(json.dumps(pagedict))
+        fd.truncate()
+        fd.close()
 
-    def add_page_info(self, pageid, pagename, rvids):
-        lulz = file(os.path.join(self.location, 'cache', 'page'), 'r')
-        conf = json.loads(lulz.read())
-        conf[pageid] = {'name': pagename, 'rv': rvids}
-        fd = file(os.path.join(self.location, 'cache', 'page'), 'w')
-        fd.write(json.dumps(conf))
+    def get_pageid_from_pagename(self, pagename):
+        fd = file(os.path.join(self.location, 'cache', 'pagedict'), 'r')
+        pagedict = json.loads(fd.read())
+        if pagename in pagedict.keys():
+            return pagedict[pagename]
+        else:
+            return None
 
-    def add_rv_info(self, rv):
-        lulz = file(os.path.join(self.location, 'cache', 'rv'), 'r')
-        conf = json.loads(lulz.read())
+    def pages_add_rv(self, pageid, rv):
+        pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
+        fd = file(pagefile, 'w+')
+        pagedata_raw = fd.read()
+        if pagedata_raw == '':
+            pagedata = {}
+        else:
+            pagedata = json.loads(pagedata_raw)
         rvid = int(rv['revid'])
-        conf[rvid] = {
+        pagedata[rvid] = {
                 'user': rv['user'], 'timestamp': rv['timestamp'],
-                'content': rv['*']
+                'content': rv['*'],
         }
-        conf[rvid]['minor'] = 'minor' in rv
-        if 'comment' in rv:
-            conf[rvid]['comment'] = rv['comment']
-        else:
-            conf[rvid]['comment'] = None
-        fd = file(os.path.join(self.location, 'cache', 'rv'), 'w')
-        fd.write(json.dumps(conf))
+        fd.seek(0)
+        fd.write(json.dumps(pagedata))
+        fd.truncate()
+        fd.close()
+
+    def pages_get_rv_list(self, pageid):
+        pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
+        fd = file(pagefile, 'r')
+        pagedata = json.loads(fd.read())
+        rvs = [int(x) for x in pagedata.keys()]
+        rvs.sort()
+        return rvs
+
+    def pages_get_rv(self, pageid, rvid):
+        pagefile = os.path.join(self.location, 'cache', 'pages', str(pageid))
+        fd = file(pagefile, 'r')
+        pagedata = json.loads(fd.read())
+        return pagedata[str(rvid)]

Benjamin Mako Hill || Want to submit a patch?