Commit a3b47217 authored by Jim Fulton's avatar Jim Fulton

Allow client caches larger than 4G.

parent 911e8f5d
......@@ -37,9 +37,9 @@ New Features
- Object saves are a little faster
- The previous (ZODB 3.8) ZEO clien-cache format is supported.
- The previous (ZODB 3.8) ZEO client-cache format is supported.
The newer cache format introduced in ZODB 3.9.0a1 is no-longer
supported and cache files are limited to 4G.
supported. Cache files can still be larger than 4G.
3.9.0a4 (2008-11-06)
====================
......
......@@ -82,6 +82,12 @@ logger = logging.getLogger("ZEO.cache")
magic = "ZEC3"
ZEC_HEADER_SIZE = 12
# Maximum block size. Note that while we are doing a store, we may
# need to write a free block that is almost twice as big. If we die
# in the middle of a store, then we need to split the large free records
# while opening.
max_block_size = (1<<31) - 1
# After the header, the file contains a contiguous sequence of blocks. All
# blocks begin with a one-byte status indicator:
#
......@@ -203,8 +209,12 @@ class ClientCache(object):
self.f.seek(0)
self.f.write(magic)
self.f.write(z64)
# and one free block.
self.f.write('f' + pack(">I", self.maxsize - ZEC_HEADER_SIZE))
# add as many free blocks as are needed to fill the space
nfree = self.maxsize - ZEC_HEADER_SIZE
for i in range(0, nfree, max_block_size):
block_size = min(max_block_size, nfree-i)
self.f.write('f' + pack(">I", block_size))
self.f.seek(block_size-5, 1)
sync(self.f)
# Statistics: _n_adds, _n_added_bytes,
......@@ -273,6 +283,14 @@ class ClientCache(object):
l += 1
elif status == 'f':
size, = unpack(">I", read(4))
if size > max_block_size:
# Oops, we either have an old cache, or a we
# crashed while storing. Split this block into two.
assert size <= max_block_size*2
seek(ofs+max_block_size)
self.f.write('f'+pack(">I", size-max_block_size))
seek(ofs)
self.f.write('f'+pack(">I", max_block_size))
elif status in '1234':
size = int(status)
else:
......@@ -506,7 +524,7 @@ class ClientCache(object):
# 2nd-level ZEO cache got a much higher hit rate if "very large"
# objects simply weren't cached. For now, we ignore the request
# only if the entire cache file is too small to hold the object.
if size > self.maxsize - ZEC_HEADER_SIZE:
if size > min(max_block_size, self.maxsize - ZEC_HEADER_SIZE):
return
self._n_adds += 1
......
......@@ -18,10 +18,12 @@ from zope.testing import doctest
import os
import random
import string
import struct
import sys
import tempfile
import unittest
import ZEO.cache
import ZODB.tests.util
import zope.testing.setupstack
import ZEO.cache
......@@ -61,17 +63,19 @@ def oid(o):
return repr_to_oid(repr)
tid = oid
class CacheTests(unittest.TestCase):
class CacheTests(ZODB.tests.util.TestCase):
def setUp(self):
# The default cache size is much larger than we need here. Since
# testSerialization reads the entire file into a string, it's not
# good to leave it that big.
ZODB.tests.util.TestCase.setUp(self)
self.cache = ZEO.cache.ClientCache(size=1024**2)
def tearDown(self):
if self.cache.path:
os.remove(self.cache.path)
ZODB.tests.util.TestCase.tearDown(self)
def testLastTid(self):
self.assertEqual(self.cache.getLastTid(), None)
......@@ -192,6 +196,35 @@ class CacheTests(unittest.TestCase):
# recorded as non-current.
self.assert_(1 not in cache.noncurrent)
def testVeryLargeCaches(self):
cache = ZEO.cache.ClientCache('cache', size=(1<<33))
cache.store(n1, n2, None, "x")
cache.close()
cache = ZEO.cache.ClientCache('cache', size=(1<<33))
self.assertEquals(cache.load(n1), ('x', n2))
cache.close()
def testConversionOfLargeFreeBlocks(self):
f = open('cache', 'wb')
f.write(ZEO.cache.magic+
'\0'*8 +
'f'+struct.pack(">I", (1<<32)-12)
)
f.seek((1<<32)-1)
f.write('x')
f.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close()
f = open('cache', 'rb')
f.seek(12)
self.assertEquals(f.read(1), 'f')
self.assertEquals(struct.unpack(">I", f.read(4))[0],
ZEO.cache.max_block_size)
f.close()
__test__ = dict(
kill_does_not_cause_cache_corruption =
r"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment