Commit ed81c6f7 authored by I want to see all private code of nexedi's avatar I want to see all private code of nexedi Committed by GitHub

Merge pull request #106 from zopefoundation/cache_size_parameter_enhancement

Allow sweeping cache without cache_size and with cache_size_bytes only.
parents 7b7ce089 071c2cd5
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
- The Python implementation raises ``AttributeError`` if a - The Python implementation raises ``AttributeError`` if a
persistent class doesn't have a ``p_jar`` attribute. persistent class doesn't have a ``p_jar`` attribute.
- Allow sweeping cache without ``cache_size``. ``cache_size_bytes``
works with ``cache_size=0``, no need to set ``cache_size`` to a
large value.
4.4.3 (2018-10-22) 4.4.3 (2018-10-22)
------------------ ------------------
......
...@@ -193,9 +193,23 @@ scan_gc_items(ccobject *self, int target, Py_ssize_t target_bytes) ...@@ -193,9 +193,23 @@ scan_gc_items(ccobject *self, int target, Py_ssize_t target_bytes)
*/ */
insert_after(&before_original_home, self->ring_home.r_prev); insert_after(&before_original_home, self->ring_home.r_prev);
here = self->ring_home.r_next; /* least recently used object */ here = self->ring_home.r_next; /* least recently used object */
/* All objects should be deactivated when the objects count parameter
* (target) is zero and the size limit parameter in bytes(target_bytes)
* is also zero.
*
* Otherwise the objects should be collect while one of the following
* conditions are True:
* - the ghost count is bigger than the number of objects limit(target).
* - the estimated size in bytes is bigger than the size limit in
* bytes(target_bytes).
*/
while (here != &before_original_home && while (here != &before_original_home &&
(self->non_ghost_count > target (
|| (target_bytes && self->total_estimated_size > target_bytes) (!target && !target_bytes) ||
(
(target && self->non_ghost_count > target) ||
(target_bytes && self->total_estimated_size > target_bytes)
)
) )
) )
{ {
......
...@@ -336,7 +336,10 @@ class PickleCache(object): ...@@ -336,7 +336,10 @@ class PickleCache(object):
i = -1 i = -1
to_eject = [] to_eject = []
for value in self.ring: for value in self.ring:
if self.non_ghost_count <= target and (self.total_estimated_size <= target_size_bytes or not target_size_bytes): if ((target or target_size_bytes)
and (not target or self.non_ghost_count <= target)
and (self.total_estimated_size <= target_size_bytes
or not target_size_bytes)):
break break
i += 1 i += 1
if value._p_state == UPTODATE: if value._p_state == UPTODATE:
......
...@@ -1070,6 +1070,50 @@ class PythonPickleCacheTests(PickleCacheTests): ...@@ -1070,6 +1070,50 @@ class PythonPickleCacheTests(PickleCacheTests):
candidate._p_jar = None candidate._p_jar = None
self.assertRaises(KeyError, cache.new_ghost, key, candidate) self.assertRaises(KeyError, cache.new_ghost, key, candidate)
@with_deterministic_gc
def test_cache_garbage_collection_bytes_with_cache_size_0(
self, force_collect=_is_pypy or _is_jython):
class MyPersistent(self._getDummyPersistentClass()):
def _p_deactivate(self):
# mimic what the real persistent object does to update
# the cache size; if we don't get deactivated by
# sweeping, the cache size won't shrink so this also
# validates that _p_deactivate gets called when
# ejecting an object.
cache.update_object_size_estimation(self._p_oid, -1)
cache = self._makeOne()
cache.cache_size = 0
cache.cache_size_bytes = 400
oids = []
for i in range(100):
oid = self._numbered_oid(i)
oids.append(oid)
o = cache[oid] = self._makePersist(oid=oid,
kind=MyPersistent,
state=UPTODATE)
# must start 0, ZODB sets it AFTER updating the size
o._Persistent__size = 0
cache.update_object_size_estimation(oid, 1)
o._Persistent__size = 1
del o # leave it only in the cache
self.assertEqual(cache.cache_non_ghost_count, 100)
self.assertEqual(cache.total_estimated_size, 64 * 100)
cache.incrgc()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.total_estimated_size, 64 * 6)
self.assertEqual(cache.cache_non_ghost_count, 6)
self.assertEqual(len(cache), 6)
cache.full_sweep()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.total_estimated_size, 0)
self.assertEqual(cache.cache_non_ghost_count, 0)
self.assertEqual(len(cache), 0)
@skipIfNoCExtension @skipIfNoCExtension
class CPickleCacheTests(PickleCacheTests): class CPickleCacheTests(PickleCacheTests):
...@@ -1088,6 +1132,49 @@ class CPickleCacheTests(PickleCacheTests): ...@@ -1088,6 +1132,49 @@ class CPickleCacheTests(PickleCacheTests):
cache = super(CPickleCacheTests, self).test___setitem___persistent_class() cache = super(CPickleCacheTests, self).test___setitem___persistent_class()
self.assertEqual(_len(cache.items()), 1) self.assertEqual(_len(cache.items()), 1)
def test_cache_garbage_collection_bytes_with_cache_size_0(self):
class DummyConnection(object):
def register(self, obj):
pass
dummy_connection = DummyConnection()
dummy_connection.register(1) # for coveralls
def makePersistent(oid):
persist = self._getDummyPersistentClass()()
persist._p_oid = oid
persist._p_jar = dummy_connection
return persist
cache = self._getTargetClass()(dummy_connection)
dummy_connection._cache = cache
cache.cache_size = 0
cache.cache_size_bytes = 400
oids = []
for i in range(100):
oid = self._numbered_oid(i)
oids.append(oid)
o = cache[oid] = makePersistent(oid)
cache.update_object_size_estimation(oid, 1)
o._p_estimated_size = 1
del o # leave it only in the cache
self.assertEqual(cache.cache_non_ghost_count, 100)
self.assertEqual(cache.total_estimated_size, 64 * 100)
cache.incrgc()
self.assertEqual(cache.total_estimated_size, 64 * 6)
self.assertEqual(cache.cache_non_ghost_count, 6)
self.assertEqual(len(cache), 6)
cache.full_sweep()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.total_estimated_size, 0)
self.assertEqual(cache.cache_non_ghost_count, 0)
self.assertEqual(len(cache), 0)
class DummyPersistent(object): class DummyPersistent(object):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment