Commit 8a7f79e9 authored by Jim Fulton's avatar Jim Fulton

Fixed a bug in object-cache size accounting. New objects weren't

counted properly.
parent d628565b
......@@ -8,6 +8,10 @@
Bugs Fixed
----------
- Sizes of new objects weren't added to the object cache size
estimation, causing the object-cache size limiting feature to let
the cache grow too large when many objects were added.
- Deleted records weren't removed when packing file storages.
- Fixed intermittent failures in the MVCCMappingStorage tests.
......
......@@ -656,10 +656,7 @@ class Connection(ExportImport, object):
obj._p_invalidate()
else:
s = self._storage.store(oid, serial, p, '', transaction)
self._cache.update_object_size_estimation(oid,
len(p)
)
obj._p_estimated_size = len(p)
self._store_count += 1
# Put the object in the cache before handling the
# response, just in case the response contains the
......@@ -674,6 +671,9 @@ class Connection(ExportImport, object):
else:
raise
self._cache.update_object_size_estimation(oid, len(p))
obj._p_estimated_size = len(p)
self._handle_serial(s, oid)
def _handle_serial(self, store_return, oid=None, change=1):
......@@ -901,9 +901,7 @@ class Connection(ExportImport, object):
self._reader.setGhostState(obj, p)
obj._p_serial = serial
self._cache.update_object_size_estimation(obj._p_oid,
len(p)
)
self._cache.update_object_size_estimation(obj._p_oid, len(p))
obj._p_estimated_size = len(p)
# Blob support
......@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object):
data, serial = src.load(oid, src)
obj = self._cache.get(oid, None)
if obj is not None:
self._cache.update_object_size_estimation(obj._p_oid,
len(data)
)
self._cache.update_object_size_estimation(obj._p_oid, len(data))
obj._p_estimated_size = len(data)
if isinstance(self._reader.getGhost(data), Blob):
blobfilename = src.loadBlob(oid, serial)
......
......@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling. It also keeps recent
objects in memory under the assumption that they may be used again.
"""
import gc
import time
import unittest
import threading
from persistent.cPickleCache import PickleCache
from persistent import Persistent
from persistent.mapping import PersistentMapping
from ZODB.tests.MinPO import MinPO
from ZODB.utils import p64
from zope.testing import doctest
import gc
import threading
import time
import transaction
import unittest
import ZODB
import ZODB.MappingStorage
from ZODB.tests.MinPO import MinPO
import ZODB.tests.util
from ZODB.utils import p64
from persistent import Persistent
class CacheTestBase(ZODB.tests.util.TestCase):
......@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase):
else:
self.fail("two objects with the same oid should have failed")
def check_basic_cache_size_estimation():
"""Make sure the basic accounting is correct:
>>> import ZODB.MappingStorage
>>> db = ZODB.MappingStorage.DB()
>>> conn = db.open()
The cache is empty initially:
>>> conn._cache.total_estimated_size
0
We force the root to be loaded and the cache grows:
>>> getattr(conn.root, 'z', None)
>>> conn._cache.total_estimated_size
128
We add some data and the cache grows:
>>> conn.root.z = ZODB.tests.util.P('x'*100)
>>> import transaction
>>> transaction.commit()
>>> conn._cache.total_estimated_size
320
Loading the objects in another connection gets the same sizes:
>>> conn2 = db.open()
>>> conn2._cache.total_estimated_size
0
>>> getattr(conn2.root, 'x', None)
>>> conn2._cache.total_estimated_size
128
>>> _ = conn2.root.z.name
>>> conn2._cache.total_estimated_size
320
If we deactivate, the size goes down:
>>> conn2.root.z._p_deactivate()
>>> conn2._cache.total_estimated_size
128
Loading data directly, rather than through traversal updates the cache
size correctly:
>>> conn3 = db.open()
>>> _ = conn3.get(conn2.root.z._p_oid).name
>>> conn3._cache.total_estimated_size
192
"""
def test_suite():
s = unittest.makeSuite(DBMethods, 'check')
s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
s.addTest(unittest.makeSuite(CacheErrors, 'check'))
s.addTest(doctest.DocTestSuite())
return s
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment