#!/usr/bin/env python2 # -*- coding: utf-8 -*- # TODO author/copyright """generate reference database and index for tests""" from ZODB.FileStorage import FileStorage from ZODB import DB from persistent import Persistent import transaction import struct import time import random import logging # convert numeric oid to/from str def p64(num): return struct.pack('>Q', num) def unpack64(packed): return struct.unpack('>Q', packed)[0] def hex64(packed): return '0x%016x' % unpack64(packed) # make time.time() predictable _xtime = time.mktime(time.strptime("04 Jan 1979", "%d %b %Y")) def xtime(): global _xtime _xtime += 1.1 return _xtime time.time = xtime def commit(user, description, extension): txn = transaction.get() txn.user = user txn.description = description txn.extension = extension txn.commit() class Object(Persistent): # .value def __init__(self, value): self.value = value def __getstate__(self): return self.value def __setstate__(self, state): self.value = state def main(): logging.basicConfig() outfs = "testdata/1.fs" stor = FileStorage(outfs, create=True) db = DB(stor) conn = db.open() root = conn.root() assert root._p_oid == p64(0), `root._p_oid` # generate random changes to objects hooked to top-level root by a/b/c/... key random.seed(0) namev = [_ for _ in "abcdefg"] for i in range(2): for j in range(25): name = random.choice(namev) if name in root: obj = root[name] else: root[name] = obj = Object(None) obj.value = "%s%i.%i" % (name, i, j) commit(u"user%i.%i" % (i,j), u"step %i.%i" % (i, j), {"x-generator": "zodb/py2 (%s)" % name}) # undo a transaction one step before a latest one a couple of times for j in range(2): ul = db.undoLog(1, 2)[0] db.undo(ul["id"]) commit(u"root%i.%i\nYour\nMagesty " % (i, j), u"undo %i.%i\nmore detailed description\n\nzzz ..." % (i, j) + "\t"*(i+j), {"x-generator": "zodb/py2 (undo %s)" % ul["id"]}) conn.close() db.close() stor.close() # dump to go what to expect with open("testdata_expect_test.go", "w") as f: def emit(v): print >>f, v emit("// DO NOT EDIT - AUTOGENERATED (by py/gen-testdata)") emit("package fs1\n") # index emit("const _1fs_indexTopPos = %i" % stor._pos) emit("var _1fs_indexEntryv = [...]indexEntry{") for k, v in stor._index.iteritems(): emit("\t{%8i, %8i}," % (unpack64(k), v)) emit("}") # database records stor = FileStorage(outfs, read_only=True) oidPrevPos = {} # oid -> pos of last drec(oid) emit("\nvar _1fs_dbEntryv = [...]dbEntry{") for txn in stor.iterator(): # txn is TransactionRecord # txn.extension is already depickled dict - we want to put raw data from file # also we need to access txn record legth which is not provided by higher-level iterator # do deep-dive into FileStorage h = stor._read_txn_header(txn._tpos) assert h.tid == txn.tid # -> TxnHeader emit("\tTxnHeader{") emit("\t\tTid:\t%s," % hex64(txn.tid)) emit("\t\tRecLenm8:\t%i," % h.tlen) emit("\t\tStatus:\t'%s'," % txn.status) emit("\t\tUser:\t\t[]byte(\"%s\")," % txn.user.encode('string_escape')) emit("\t\tDescription:\t[]byte(\"%s\")," % txn.description.encode('string_escape')) emit("\t\tExtension:\t[]byte(\"%s\")," % h.ext.encode('string_escape')) # txn: ._pos ._tend ._tpos # -> DataHeader + payload for drec in txn: # drec is itemof(TransactionRecordIterator) = Record emit("\n\t\tDataHeader{") emit("\t\t\tOid:\t%i," % unpack64(drec.oid)) emit("\t\t\tTid:\t%s," % hex64(drec.tid)) # drec: .data .prev (=prev_txn) .pos emit("\t\t\tPrevDataRecPos:\t%i," % oidPrevPos.get(drec.oid, 0)) emit("\t\t\tTxnPos:\t%i," % txn._tpos) assert drec.version == '' # DataLen #.data .data_txn emit("\t\t},") oidPrevPos[drec.oid] = drec.pos emit("\t},") emit("}") if __name__ == '__main__': main()