Commit f0b26152 authored by Tres Seaver's avatar Tres Seaver

Give up on an argument lost 10 years ago.

parent 266cadf9
...@@ -96,12 +96,12 @@ class DBMethods(CacheTestBase): ...@@ -96,12 +96,12 @@ class DBMethods(CacheTestBase):
for i in range(4): for i in range(4):
self.noodle_new_connection() self.noodle_new_connection()
def checkCacheDetail(self): def testCacheDetail(self):
for name, count in self.db.cacheDetail(): for name, count in self.db.cacheDetail():
self.assertEqual(isinstance(name, str), True) self.assertEqual(isinstance(name, str), True)
self.assertEqual(isinstance(count, int), True) self.assertEqual(isinstance(count, int), True)
def checkCacheExtremeDetail(self): def testCacheExtremeDetail(self):
expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state'] expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
for dict in self.db.cacheExtremeDetail(): for dict in self.db.cacheExtremeDetail():
for k, v in dict.items(): for k, v in dict.items():
...@@ -110,19 +110,19 @@ class DBMethods(CacheTestBase): ...@@ -110,19 +110,19 @@ class DBMethods(CacheTestBase):
# TODO: not really sure how to do a black box test of the cache. # TODO: not really sure how to do a black box test of the cache.
# Should the full sweep and minimize calls always remove things? # Should the full sweep and minimize calls always remove things?
def checkFullSweep(self): def testFullSweep(self):
old_size = self.db.cacheSize() old_size = self.db.cacheSize()
self.db.cacheFullSweep() self.db.cacheFullSweep()
new_size = self.db.cacheSize() new_size = self.db.cacheSize()
self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size)) self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
def checkMinimize(self): def testMinimize(self):
old_size = self.db.cacheSize() old_size = self.db.cacheSize()
self.db.cacheMinimize() self.db.cacheMinimize()
new_size = self.db.cacheSize() new_size = self.db.cacheSize()
self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size)) self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
def checkMinimizeTerminates(self): def testMinimizeTerminates(self):
# This is tricky. cPickleCache had a case where it could get into # This is tricky. cPickleCache had a case where it could get into
# an infinite loop, but we don't want the test suite to hang # an infinite loop, but we don't want the test suite to hang
# if this bug reappears. So this test spawns a thread to run the # if this bug reappears. So this test spawns a thread to run the
...@@ -179,18 +179,18 @@ class DBMethods(CacheTestBase): ...@@ -179,18 +179,18 @@ class DBMethods(CacheTestBase):
# connection and database call it internally. # connection and database call it internally.
# Same for the get and invalidate methods. # Same for the get and invalidate methods.
def checkLRUitems(self): def testLRUitems(self):
# get a cache # get a cache
c = self.conns[0]._cache c = self.conns[0]._cache
c.lru_items() c.lru_items()
def checkClassItems(self): def testClassItems(self):
c = self.conns[0]._cache c = self.conns[0]._cache
c.klass_items() c.klass_items()
class LRUCacheTests(CacheTestBase): class LRUCacheTests(CacheTestBase):
def checkLRU(self): def testLRU(self):
# verify the LRU behavior of the cache # verify the LRU behavior of the cache
dataset_size = 5 dataset_size = 5
CACHE_SIZE = dataset_size*2+1 CACHE_SIZE = dataset_size*2+1
...@@ -229,7 +229,7 @@ class LRUCacheTests(CacheTestBase): ...@@ -229,7 +229,7 @@ class LRUCacheTests(CacheTestBase):
# the root, depending on precise order of access. We do # the root, depending on precise order of access. We do
# not bother to check this # not bother to check this
def checkSize(self): def testSize(self):
self.assertEqual(self.db.cacheSize(), 0) self.assertEqual(self.db.cacheSize(), 0)
self.assertEqual(self.db.cacheDetailSize(), []) self.assertEqual(self.db.cacheDetailSize(), [])
...@@ -253,7 +253,7 @@ class LRUCacheTests(CacheTestBase): ...@@ -253,7 +253,7 @@ class LRUCacheTests(CacheTestBase):
#self.assertEquals(d['size'], CACHE_SIZE) #self.assertEquals(d['size'], CACHE_SIZE)
def checkDetail(self): def testDetail(self):
CACHE_SIZE = 10 CACHE_SIZE = 10
self.db.setCacheSize(CACHE_SIZE) self.db.setCacheSize(CACHE_SIZE)
...@@ -314,7 +314,7 @@ class CacheErrors(unittest.TestCase): ...@@ -314,7 +314,7 @@ class CacheErrors(unittest.TestCase):
self.jar = StubDataManager() self.jar = StubDataManager()
self.cache = PickleCache(self.jar) self.cache = PickleCache(self.jar)
def checkGetBogusKey(self): def testGetBogusKey(self):
self.assertEqual(self.cache.get(p64(0)), None) self.assertEqual(self.cache.get(p64(0)), None)
try: try:
self.cache[12] self.cache[12]
...@@ -335,7 +335,7 @@ class CacheErrors(unittest.TestCase): ...@@ -335,7 +335,7 @@ class CacheErrors(unittest.TestCase):
else: else:
self.fail("expected TypeError") self.fail("expected TypeError")
def checkBogusObject(self): def testBogusObject(self):
def add(key, obj): def add(key, obj):
self.cache[key] = obj self.cache[key] = obj
...@@ -366,7 +366,7 @@ class CacheErrors(unittest.TestCase): ...@@ -366,7 +366,7 @@ class CacheErrors(unittest.TestCase):
self.assertEqual(sys.getrefcount(None), nones) self.assertEqual(sys.getrefcount(None), nones)
def checkTwoCaches(self): def testTwoCaches(self):
jar2 = StubDataManager() jar2 = StubDataManager()
cache2 = PickleCache(jar2) cache2 = PickleCache(jar2)
...@@ -383,7 +383,7 @@ class CacheErrors(unittest.TestCase): ...@@ -383,7 +383,7 @@ class CacheErrors(unittest.TestCase):
else: else:
self.fail("expected ValueError because object already in cache") self.fail("expected ValueError because object already in cache")
def checkReadOnlyAttrsWhenCached(self): def testReadOnlyAttrsWhenCached(self):
o = StubObject() o = StubObject()
key = o._p_oid = p64(1) key = o._p_oid = p64(1)
o._p_jar = self.jar o._p_jar = self.jar
...@@ -401,7 +401,7 @@ class CacheErrors(unittest.TestCase): ...@@ -401,7 +401,7 @@ class CacheErrors(unittest.TestCase):
else: else:
self.fail("expect that you can't delete jar of cached object") self.fail("expect that you can't delete jar of cached object")
def checkTwoObjsSameOid(self): def testTwoObjsSameOid(self):
# Try to add two distinct objects with the same oid to the cache. # Try to add two distinct objects with the same oid to the cache.
# This has always been an error, but the error message prior to # This has always been an error, but the error message prior to
# ZODB 3.2.6 didn't make sense. This test verifies that (a) an # ZODB 3.2.6 didn't make sense. This test verifies that (a) an
...@@ -423,7 +423,7 @@ class CacheErrors(unittest.TestCase): ...@@ -423,7 +423,7 @@ class CacheErrors(unittest.TestCase):
else: else:
self.fail("two objects with the same oid should have failed") self.fail("two objects with the same oid should have failed")
def check_basic_cache_size_estimation(): def test_basic_cache_size_estimation():
"""Make sure the basic accounting is correct: """Make sure the basic accounting is correct:
>>> import ZODB.MappingStorage >>> import ZODB.MappingStorage
...@@ -481,8 +481,8 @@ size correctly: ...@@ -481,8 +481,8 @@ size correctly:
def test_suite(): def test_suite():
s = unittest.makeSuite(DBMethods, 'check') s = unittest.makeSuite(DBMethods)
s.addTest(unittest.makeSuite(LRUCacheTests, 'check')) s.addTest(unittest.makeSuite(LRUCacheTests))
s.addTest(unittest.makeSuite(CacheErrors, 'check')) s.addTest(unittest.makeSuite(CacheErrors))
s.addTest(doctest.DocTestSuite()) s.addTest(doctest.DocTestSuite())
return s return s
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment