Commit 8a7f79e9 authored by Jim Fulton's avatar Jim Fulton

Fixed a bug in object-cache size accounting. New objects weren't

counted properly.
parent d628565b
...@@ -8,6 +8,10 @@ ...@@ -8,6 +8,10 @@
Bugs Fixed Bugs Fixed
---------- ----------
- Sizes of new objects weren't added to the object cache size
estimation, causing the object-cache size limiting feature to let
the cache grow too large when many objects were added.
- Deleted records weren't removed when packing file storages. - Deleted records weren't removed when packing file storages.
- Fixed intermittent failures in the MVCCMappingStorage tests. - Fixed intermittent failures in the MVCCMappingStorage tests.
......
...@@ -656,10 +656,7 @@ class Connection(ExportImport, object): ...@@ -656,10 +656,7 @@ class Connection(ExportImport, object):
obj._p_invalidate() obj._p_invalidate()
else: else:
s = self._storage.store(oid, serial, p, '', transaction) s = self._storage.store(oid, serial, p, '', transaction)
self._cache.update_object_size_estimation(oid,
len(p)
)
obj._p_estimated_size = len(p)
self._store_count += 1 self._store_count += 1
# Put the object in the cache before handling the # Put the object in the cache before handling the
# response, just in case the response contains the # response, just in case the response contains the
...@@ -674,6 +671,9 @@ class Connection(ExportImport, object): ...@@ -674,6 +671,9 @@ class Connection(ExportImport, object):
else: else:
raise raise
self._cache.update_object_size_estimation(oid, len(p))
obj._p_estimated_size = len(p)
self._handle_serial(s, oid) self._handle_serial(s, oid)
def _handle_serial(self, store_return, oid=None, change=1): def _handle_serial(self, store_return, oid=None, change=1):
...@@ -901,9 +901,7 @@ class Connection(ExportImport, object): ...@@ -901,9 +901,7 @@ class Connection(ExportImport, object):
self._reader.setGhostState(obj, p) self._reader.setGhostState(obj, p)
obj._p_serial = serial obj._p_serial = serial
self._cache.update_object_size_estimation(obj._p_oid, self._cache.update_object_size_estimation(obj._p_oid, len(p))
len(p)
)
obj._p_estimated_size = len(p) obj._p_estimated_size = len(p)
# Blob support # Blob support
...@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object): ...@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object):
data, serial = src.load(oid, src) data, serial = src.load(oid, src)
obj = self._cache.get(oid, None) obj = self._cache.get(oid, None)
if obj is not None: if obj is not None:
self._cache.update_object_size_estimation(obj._p_oid, self._cache.update_object_size_estimation(obj._p_oid, len(data))
len(data)
)
obj._p_estimated_size = len(data) obj._p_estimated_size = len(data)
if isinstance(self._reader.getGhost(data), Blob): if isinstance(self._reader.getGhost(data), Blob):
blobfilename = src.loadBlob(oid, serial) blobfilename = src.loadBlob(oid, serial)
......
...@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling. It also keeps recent ...@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling. It also keeps recent
objects in memory under the assumption that they may be used again. objects in memory under the assumption that they may be used again.
""" """
import gc
import time
import unittest
import threading
from persistent.cPickleCache import PickleCache from persistent.cPickleCache import PickleCache
from persistent import Persistent
from persistent.mapping import PersistentMapping from persistent.mapping import PersistentMapping
from ZODB.tests.MinPO import MinPO
from ZODB.utils import p64
from zope.testing import doctest
import gc
import threading
import time
import transaction import transaction
import unittest
import ZODB import ZODB
import ZODB.MappingStorage import ZODB.MappingStorage
from ZODB.tests.MinPO import MinPO
import ZODB.tests.util import ZODB.tests.util
from ZODB.utils import p64
from persistent import Persistent
class CacheTestBase(ZODB.tests.util.TestCase): class CacheTestBase(ZODB.tests.util.TestCase):
...@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase): ...@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase):
else: else:
self.fail("two objects with the same oid should have failed") self.fail("two objects with the same oid should have failed")
def check_basic_cache_size_estimation():
"""Make sure the basic accounting is correct:
>>> import ZODB.MappingStorage
>>> db = ZODB.MappingStorage.DB()
>>> conn = db.open()
The cache is empty initially:
>>> conn._cache.total_estimated_size
0
We force the root to be loaded and the cache grows:
>>> getattr(conn.root, 'z', None)
>>> conn._cache.total_estimated_size
128
We add some data and the cache grows:
>>> conn.root.z = ZODB.tests.util.P('x'*100)
>>> import transaction
>>> transaction.commit()
>>> conn._cache.total_estimated_size
320
Loading the objects in another connection gets the same sizes:
>>> conn2 = db.open()
>>> conn2._cache.total_estimated_size
0
>>> getattr(conn2.root, 'x', None)
>>> conn2._cache.total_estimated_size
128
>>> _ = conn2.root.z.name
>>> conn2._cache.total_estimated_size
320
If we deactivate, the size goes down:
>>> conn2.root.z._p_deactivate()
>>> conn2._cache.total_estimated_size
128
Loading data directly, rather than through traversal updates the cache
size correctly:
>>> conn3 = db.open()
>>> _ = conn3.get(conn2.root.z._p_oid).name
>>> conn3._cache.total_estimated_size
192
"""
def test_suite(): def test_suite():
s = unittest.makeSuite(DBMethods, 'check') s = unittest.makeSuite(DBMethods, 'check')
s.addTest(unittest.makeSuite(LRUCacheTests, 'check')) s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
s.addTest(unittest.makeSuite(CacheErrors, 'check')) s.addTest(unittest.makeSuite(CacheErrors, 'check'))
s.addTest(doctest.DocTestSuite())
return s return s
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment