Commit 683f7faf authored by Jim Fulton's avatar Jim Fulton Committed by GitHub

Merge pull request #95 from zopefoundation/fix-84

Changed DB root-object-initialization to use an open connection and more
parents a3f7d3e4 af20d2f6
...@@ -190,7 +190,6 @@ class Connection(ExportImport, object): ...@@ -190,7 +190,6 @@ class Connection(ExportImport, object):
self._reader = ObjectReader(self, self._cache, self._db.classFactory) self._reader = ObjectReader(self, self._cache, self._db.classFactory)
def add(self, obj): def add(self, obj):
"""Add a new object 'obj' to the database and assign it an oid.""" """Add a new object 'obj' to the database and assign it an oid."""
if self.opened is None: if self.opened is None:
...@@ -202,19 +201,22 @@ class Connection(ExportImport, object): ...@@ -202,19 +201,22 @@ class Connection(ExportImport, object):
raise TypeError("Only first-class persistent objects may be" raise TypeError("Only first-class persistent objects may be"
" added to a Connection.", obj) " added to a Connection.", obj)
elif obj._p_jar is None: elif obj._p_jar is None:
assert obj._p_oid is None self._add(obj, self.new_oid())
oid = obj._p_oid = self.new_oid()
obj._p_jar = self
if self._added_during_commit is not None:
self._added_during_commit.append(obj)
self._register(obj)
# Add to _added after calling register(), so that _added
# can be used as a test for whether the object has been
# registered with the transaction.
self._added[oid] = obj
elif obj._p_jar is not self: elif obj._p_jar is not self:
raise InvalidObjectReference(obj, obj._p_jar) raise InvalidObjectReference(obj, obj._p_jar)
def _add(self, obj, oid):
assert obj._p_oid is None
oid = obj._p_oid = oid
obj._p_jar = self
if self._added_during_commit is not None:
self._added_during_commit.append(obj)
self._register(obj)
# Add to _added after calling register(), so that _added
# can be used as a test for whether the object has been
# registered with the transaction.
self._added[oid] = obj
def get(self, oid): def get(self, oid):
"""Return the persistent object with oid 'oid'.""" """Return the persistent object with oid 'oid'."""
if self.opened is None: if self.opened is None:
......
...@@ -39,6 +39,7 @@ import transaction ...@@ -39,6 +39,7 @@ import transaction
from persistent.TimeStamp import TimeStamp from persistent.TimeStamp import TimeStamp
import six import six
from . import POSException
logger = logging.getLogger('ZODB.DB') logger = logging.getLogger('ZODB.DB')
...@@ -211,7 +212,8 @@ class ConnectionPool(AbstractConnectionPool): ...@@ -211,7 +212,8 @@ class ConnectionPool(AbstractConnectionPool):
"""Perform garbage collection on available connections. """Perform garbage collection on available connections.
If a connection is no longer viable because it has timed out, it is If a connection is no longer viable because it has timed out, it is
garbage collected.""" garbage collected.
"""
threshhold = time.time() - self.timeout threshhold = time.time() - self.timeout
to_remove = () to_remove = ()
...@@ -442,30 +444,6 @@ class DB(object): ...@@ -442,30 +444,6 @@ class DB(object):
DeprecationWarning, 2) DeprecationWarning, 2)
storage.tpc_vote = lambda *args: None storage.tpc_vote = lambda *args: None
temp_storage = self._mvcc_storage.new_instance()
try:
try:
temp_storage.poll_invalidations()
temp_storage.load(z64)
except KeyError:
# Create the database's root in the storage if it doesn't exist
from persistent.mapping import PersistentMapping
root = PersistentMapping()
# Manually create a pickle for the root to put in the storage.
# The pickle must be in the special ZODB format.
file = BytesIO()
p = Pickler(file, _protocol)
p.dump((root.__class__, None))
p.dump(root.__getstate__())
t = transaction.Transaction()
t.description = 'initial database creation'
temp_storage.tpc_begin(t)
temp_storage.store(z64, None, file.getvalue(), '', t)
temp_storage.tpc_vote(t)
temp_storage.tpc_finish(t)
finally:
temp_storage.release()
# Multi-database setup. # Multi-database setup.
if databases is None: if databases is None:
databases = {} databases = {}
...@@ -479,6 +457,15 @@ class DB(object): ...@@ -479,6 +457,15 @@ class DB(object):
self.large_record_size = large_record_size self.large_record_size = large_record_size
# Make sure we have a root:
with self.transaction('initial database creation') as conn:
try:
conn.get(z64)
except KeyError:
from persistent.mapping import PersistentMapping
root = PersistentMapping()
conn._add(root, z64)
@property @property
def _storage(self): # Backward compatibility def _storage(self): # Backward compatibility
return self.storage return self.storage
...@@ -906,8 +893,8 @@ class DB(object): ...@@ -906,8 +893,8 @@ class DB(object):
""" """
self.undoMultiple([id], txn) self.undoMultiple([id], txn)
def transaction(self): def transaction(self, note=None):
return ContextManager(self) return ContextManager(self, note)
def new_oid(self): def new_oid(self):
return self.storage.new_oid() return self.storage.new_oid()
...@@ -926,12 +913,16 @@ class ContextManager: ...@@ -926,12 +913,16 @@ class ContextManager:
"""PEP 343 context manager """PEP 343 context manager
""" """
def __init__(self, db): def __init__(self, db, note=None):
self.db = db self.db = db
self.note = note
def __enter__(self): def __enter__(self):
self.tm = transaction.TransactionManager() self.tm = tm = transaction.TransactionManager()
self.conn = self.db.open(self.tm) self.conn = self.db.open(self.tm)
t = tm.begin()
if self.note:
t.note(self.note)
return self.conn return self.conn
def __exit__(self, t, v, tb): def __exit__(self, t, v, tb):
......
...@@ -63,28 +63,32 @@ seems best and set the next record to that: ...@@ -63,28 +63,32 @@ seems best and set the next record to that:
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[1]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[1])
Scan forward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xd06\x9c\xcc' ... # doctest: +ELLIPSIS
Scan forward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[1] >>> it.next().tid == tids[1]
True True
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[30]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[30])
Scan forward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xdc\x96.\xcc' ... # doctest: +ELLIPSIS
Scan forward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[30] >>> it.next().tid == tids[30]
True True
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[70]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[70])
Scan backward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xed\xa7>\xcc' ... # doctest: +ELLIPSIS
Scan backward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[70] >>> it.next().tid == tids[70]
True True
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[-2]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[-2])
Scan backward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xfa\x06\xd0\xcc' ... # doctest: +ELLIPSIS
Scan backward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[-2] >>> it.next().tid == tids[-2]
True True
...@@ -118,14 +122,16 @@ starting point, or just pick up where another iterator left off: ...@@ -118,14 +122,16 @@ starting point, or just pick up where another iterator left off:
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[50], pos=poss[50]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[50], pos=poss[50])
Scan backward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xe5\x1e\xb6\xcc' ... # doctest: +ELLIPSIS
Scan backward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[50] >>> it.next().tid == tids[50]
True True
>>> it.close() >>> it.close()
>>> it = ZODB.FileStorage.FileIterator('data.fs', tids[49], pos=poss[50]) >>> it = ZODB.FileStorage.FileIterator('data.fs', tids[49], pos=poss[50])
Scan backward data.fs:<OFFSET> looking for '\x03z\xbd\xd8\xe4\xb1|\xcc' ... # doctest: +ELLIPSIS
Scan backward data.fs:<OFFSET> looking for ...
>>> it.next().tid == tids[49] >>> it.next().tid == tids[49]
True True
......
...@@ -126,6 +126,7 @@ returned are distinct: ...@@ -126,6 +126,7 @@ returned are distinct:
>>> st = Storage() >>> st = Storage()
>>> db = DB(st) >>> db = DB(st)
>>> c1 = db.open() >>> c1 = db.open()
>>> c1.cacheMinimize()
>>> c2 = db.open() >>> c2 = db.open()
>>> c3 = db.open() >>> c3 = db.open()
>>> c1 is c2 or c1 is c3 or c2 is c3 >>> c1 is c2 or c1 is c3 or c2 is c3
...@@ -260,6 +261,7 @@ closed one out of the available connection stack. ...@@ -260,6 +261,7 @@ closed one out of the available connection stack.
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=3) >>> db = DB(st, pool_size=3)
>>> conns = [db.open() for dummy in range(6)] >>> conns = [db.open() for dummy in range(6)]
>>> conns[0].cacheMinimize()
>>> len(handler.records) # 3 warnings for the "excess" connections >>> len(handler.records) # 3 warnings for the "excess" connections
3 3
>>> pool = db.pool >>> pool = db.pool
...@@ -328,6 +330,7 @@ resources (like RDB connections), for the duration. ...@@ -328,6 +330,7 @@ resources (like RDB connections), for the duration.
>>> st = Storage() >>> st = Storage()
>>> db = DB(st, pool_size=2) >>> db = DB(st, pool_size=2)
>>> conn0 = db.open() >>> conn0 = db.open()
>>> conn0.cacheMinimize(); import gc; _ = gc.collect() # See fix84.rst
>>> len(conn0._cache) # empty now >>> len(conn0._cache) # empty now
0 0
>>> import transaction >>> import transaction
......
A change in the way databases were initialized affected tests
=============================================================
Originally, databases added root objects by interacting directly with
storages, rather than using connections. As storages transaction
interaction became more complex, interacting directly with storages
let to duplicated code (and buggy) code.
See: https://github.com/zopefoundation/ZODB/issues/84
Fixing this had some impacts that affected tests:
- New databases now have a connection with a single object in it's cache.
This is a very slightly good thing, but it broke some tests expectations.
- Tests that manipulated time, had their clocks off because of new time calls.
This led to some test fixes, in many cases adding a mysterious
``cacheMinimize()`` call.
...@@ -21,6 +21,7 @@ Make a change locally: ...@@ -21,6 +21,7 @@ Make a change locally:
>>> st = SimpleStorage() >>> st = SimpleStorage()
>>> db = ZODB.DB(st) >>> db = ZODB.DB(st)
>>> st.sync_called = False
>>> cn = db.open() >>> cn = db.open()
>>> rt = cn.root() >>> rt = cn.root()
>>> rt['a'] = 1 >>> rt['a'] = 1
......
...@@ -237,8 +237,8 @@ class LRUCacheTests(CacheTestBase): ...@@ -237,8 +237,8 @@ class LRUCacheTests(CacheTestBase):
# not bother to check this # not bother to check this
def testSize(self): def testSize(self):
self.db.cacheMinimize()
self.assertEqual(self.db.cacheSize(), 0) self.assertEqual(self.db.cacheSize(), 0)
self.assertEqual(self.db.cacheDetailSize(), [])
CACHE_SIZE = 10 CACHE_SIZE = 10
self.db.setCacheSize(CACHE_SIZE) self.db.setCacheSize(CACHE_SIZE)
...@@ -444,6 +444,7 @@ def test_basic_cache_size_estimation(): ...@@ -444,6 +444,7 @@ def test_basic_cache_size_estimation():
>>> import ZODB.MappingStorage >>> import ZODB.MappingStorage
>>> db = ZODB.MappingStorage.DB() >>> db = ZODB.MappingStorage.DB()
>>> conn = db.open() >>> conn = db.open()
>>> conn.cacheMinimize(); _ = gc.collect() # See fix84.rst
>>> def check_cache_size(cache, expected): >>> def check_cache_size(cache, expected):
... actual = cache.total_estimated_size ... actual = cache.total_estimated_size
......
...@@ -230,6 +230,7 @@ class UserMethodTests(unittest.TestCase): ...@@ -230,6 +230,7 @@ class UserMethodTests(unittest.TestCase):
>>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>") >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
>>> cn = db.open() >>> cn = db.open()
>>> cn.cacheMinimize() # See fix84.rst
>>> obj = cn.get(p64(0)) >>> obj = cn.get(p64(0))
>>> obj._p_oid >>> obj._p_oid
'\x00\x00\x00\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x00\x00'
......
...@@ -125,7 +125,7 @@ def connectionDebugInfo(): ...@@ -125,7 +125,7 @@ def connectionDebugInfo():
r"""DB.connectionDebugInfo provides information about connections. r"""DB.connectionDebugInfo provides information about connections.
>>> import time >>> import time
>>> now = 1228423244.5 >>> now = 1228423244.1
>>> def faux_time(): >>> def faux_time():
... global now ... global now
... now += .1 ... now += .1
...@@ -154,8 +154,8 @@ def connectionDebugInfo(): ...@@ -154,8 +154,8 @@ def connectionDebugInfo():
>>> before = [x['before'] for x in info] >>> before = [x['before'] for x in info]
>>> opened = [x['opened'] for x in info] >>> opened = [x['opened'] for x in info]
>>> infos = [x['info'] for x in info] >>> infos = [x['info'] for x in info]
>>> before >>> before == [None, c1.root()._p_serial, None]
[None, '\x03zY\xd8\xc0m9\xdd', None] True
>>> opened >>> opened
['2008-12-04T20:40:44Z (1.30s)', '2008-12-04T20:40:46Z (0.10s)', None] ['2008-12-04T20:40:44Z (1.30s)', '2008-12-04T20:40:46Z (0.10s)', None]
>>> infos >>> infos
...@@ -351,6 +351,7 @@ def minimally_test_connection_timeout(): ...@@ -351,6 +351,7 @@ def minimally_test_connection_timeout():
>>> db = ZODB.DB(None, pool_timeout=.01) >>> db = ZODB.DB(None, pool_timeout=.01)
>>> c1 = db.open() >>> c1 = db.open()
>>> c1.cacheMinimize() # See fix84.rst
>>> c2 = db.open() >>> c2 = db.open()
>>> c1.close() >>> c1.close()
>>> c2.close() >>> c2.close()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment