Commit adcfd0ad authored by Jim Fulton's avatar Jim Fulton

Bug Fixed:

  Saving indexes for large file storages failed (with the error:
  RuntimeError: maximum recursion depth exceeded).  This can cause a
  FileStorage to fail to start because it gets an error trying to save
  its index.
parent 51ed08be
...@@ -8,6 +8,11 @@ ...@@ -8,6 +8,11 @@
Bugs Fixed Bugs Fixed
---------- ----------
- Saving indexes for large file storages failed (with the error:
RuntimeError: maximum recursion depth exceeded). This can cause a
FileStorage to fail to start because it gets an error trying to save
its index.
- Sizes of new objects weren't added to the object cache size - Sizes of new objects weren't added to the object cache size
estimation, causing the object-cache size limiting feature to let estimation, causing the object-cache size limiting feature to let
the cache grow too large when many objects were added. the cache grow too large when many objects were added.
......
...@@ -33,6 +33,7 @@ from ZODB.POSException import UndoError, POSKeyError, MultipleUndoErrors ...@@ -33,6 +33,7 @@ from ZODB.POSException import UndoError, POSKeyError, MultipleUndoErrors
from ZODB.utils import p64, u64, z64 from ZODB.utils import p64, u64, z64
import base64 import base64
import BTrees.OOBTree
import errno import errno
import logging import logging
import os import os
...@@ -248,7 +249,16 @@ class FileStorage( ...@@ -248,7 +249,16 @@ class FileStorage(
f=open(tmp_name,'wb') f=open(tmp_name,'wb')
p=Pickler(f,1) p=Pickler(f,1)
info={'index': self._index, 'pos': self._pos} # Pickle the index buckets first to avoid deep recursion:
buckets = []
bucket = self._index._data._firstbucket
while bucket is not None:
buckets.append(bucket)
bucket = bucket._next
buckets.reverse()
info=BTrees.OOBTree.Bucket(dict(
_buckets=buckets, index=self._index, pos=self._pos))
p.dump(info) p.dump(info)
f.flush() f.flush()
......
...@@ -125,6 +125,49 @@ def pack_with_repeated_blob_records(): ...@@ -125,6 +125,49 @@ def pack_with_repeated_blob_records():
>>> db.close() >>> db.close()
""" """
def _save_index():
"""
_save_index can fail for large indexes.
>>> import ZODB.utils
>>> fs = ZODB.FileStorage.FileStorage('data.fs')
>>> t = transaction.begin()
>>> fs.tpc_begin(t)
>>> oid = 0
>>> for i in range(5000):
... oid += (1<<16)
... _ = fs.store(ZODB.utils.p64(oid), ZODB.utils.z64, 'x', '', t)
>>> fs.tpc_vote(t)
>>> fs.tpc_finish(t)
>>> import sys
>>> old_limit = sys.getrecursionlimit()
>>> sys.setrecursionlimit(50)
>>> fs._save_index()
Make sure we can restore:
>>> import logging
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger = logging.getLogger('ZODB.FileStorage')
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> index, pos, tid = fs._restore_index()
>>> index.items() == fs._index.items()
True
>>> pos, tid = fs._pos, fs._tid
cleanup
>>> logger.setLevel(logging.NOTSET)
>>> logger.removeHandler(handler)
>>> sys.setrecursionlimit(old_limit)
"""
def test_suite(): def test_suite():
return unittest.TestSuite(( return unittest.TestSuite((
doctest.DocFileSuite( doctest.DocFileSuite(
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment