Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
zodb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Joshua
zodb
Commits
8a7f79e9
Commit
8a7f79e9
authored
Jun 09, 2009
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixed a bug in object-cache size accounting. New objects weren't
counted properly.
parent
d628565b
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
74 additions
and
19 deletions
+74
-19
src/CHANGES.txt
src/CHANGES.txt
+4
-0
src/ZODB/Connection.py
src/ZODB/Connection.py
+6
-10
src/ZODB/tests/testCache.py
src/ZODB/tests/testCache.py
+64
-9
No files found.
src/CHANGES.txt
View file @
8a7f79e9
...
@@ -8,6 +8,10 @@
...
@@ -8,6 +8,10 @@
Bugs Fixed
Bugs Fixed
----------
----------
- Sizes of new objects weren't added to the object cache size
estimation, causing the object-cache size limiting feature to let
the cache grow too large when many objects were added.
- Deleted records weren't removed when packing file storages.
- Deleted records weren't removed when packing file storages.
- Fixed intermittent failures in the MVCCMappingStorage tests.
- Fixed intermittent failures in the MVCCMappingStorage tests.
...
...
src/ZODB/Connection.py
View file @
8a7f79e9
...
@@ -656,10 +656,7 @@ class Connection(ExportImport, object):
...
@@ -656,10 +656,7 @@ class Connection(ExportImport, object):
obj
.
_p_invalidate
()
obj
.
_p_invalidate
()
else
:
else
:
s
=
self
.
_storage
.
store
(
oid
,
serial
,
p
,
''
,
transaction
)
s
=
self
.
_storage
.
store
(
oid
,
serial
,
p
,
''
,
transaction
)
self
.
_cache
.
update_object_size_estimation
(
oid
,
len
(
p
)
)
obj
.
_p_estimated_size
=
len
(
p
)
self
.
_store_count
+=
1
self
.
_store_count
+=
1
# Put the object in the cache before handling the
# Put the object in the cache before handling the
# response, just in case the response contains the
# response, just in case the response contains the
...
@@ -674,6 +671,9 @@ class Connection(ExportImport, object):
...
@@ -674,6 +671,9 @@ class Connection(ExportImport, object):
else
:
else
:
raise
raise
self
.
_cache
.
update_object_size_estimation
(
oid
,
len
(
p
))
obj
.
_p_estimated_size
=
len
(
p
)
self
.
_handle_serial
(
s
,
oid
)
self
.
_handle_serial
(
s
,
oid
)
def
_handle_serial
(
self
,
store_return
,
oid
=
None
,
change
=
1
):
def
_handle_serial
(
self
,
store_return
,
oid
=
None
,
change
=
1
):
...
@@ -901,9 +901,7 @@ class Connection(ExportImport, object):
...
@@ -901,9 +901,7 @@ class Connection(ExportImport, object):
self
.
_reader
.
setGhostState
(
obj
,
p
)
self
.
_reader
.
setGhostState
(
obj
,
p
)
obj
.
_p_serial
=
serial
obj
.
_p_serial
=
serial
self
.
_cache
.
update_object_size_estimation
(
obj
.
_p_oid
,
self
.
_cache
.
update_object_size_estimation
(
obj
.
_p_oid
,
len
(
p
))
len
(
p
)
)
obj
.
_p_estimated_size
=
len
(
p
)
obj
.
_p_estimated_size
=
len
(
p
)
# Blob support
# Blob support
...
@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object):
...
@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object):
data
,
serial
=
src
.
load
(
oid
,
src
)
data
,
serial
=
src
.
load
(
oid
,
src
)
obj
=
self
.
_cache
.
get
(
oid
,
None
)
obj
=
self
.
_cache
.
get
(
oid
,
None
)
if
obj
is
not
None
:
if
obj
is
not
None
:
self
.
_cache
.
update_object_size_estimation
(
obj
.
_p_oid
,
self
.
_cache
.
update_object_size_estimation
(
obj
.
_p_oid
,
len
(
data
))
len
(
data
)
)
obj
.
_p_estimated_size
=
len
(
data
)
obj
.
_p_estimated_size
=
len
(
data
)
if
isinstance
(
self
.
_reader
.
getGhost
(
data
),
Blob
):
if
isinstance
(
self
.
_reader
.
getGhost
(
data
),
Blob
):
blobfilename
=
src
.
loadBlob
(
oid
,
serial
)
blobfilename
=
src
.
loadBlob
(
oid
,
serial
)
...
...
src/ZODB/tests/testCache.py
View file @
8a7f79e9
...
@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling. It also keeps recent
...
@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling. It also keeps recent
objects in memory under the assumption that they may be used again.
objects in memory under the assumption that they may be used again.
"""
"""
import
gc
import
time
import
unittest
import
threading
from
persistent.cPickleCache
import
PickleCache
from
persistent.cPickleCache
import
PickleCache
from
persistent
import
Persistent
from
persistent.mapping
import
PersistentMapping
from
persistent.mapping
import
PersistentMapping
from
ZODB.tests.MinPO
import
MinPO
from
ZODB.utils
import
p64
from
zope.testing
import
doctest
import
gc
import
threading
import
time
import
transaction
import
transaction
import
unittest
import
ZODB
import
ZODB
import
ZODB.MappingStorage
import
ZODB.MappingStorage
from
ZODB.tests.MinPO
import
MinPO
import
ZODB.tests.util
import
ZODB.tests.util
from
ZODB.utils
import
p64
from
persistent
import
Persistent
class
CacheTestBase
(
ZODB
.
tests
.
util
.
TestCase
):
class
CacheTestBase
(
ZODB
.
tests
.
util
.
TestCase
):
...
@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase):
...
@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase):
else
:
else
:
self
.
fail
(
"two objects with the same oid should have failed"
)
self
.
fail
(
"two objects with the same oid should have failed"
)
def
check_basic_cache_size_estimation
():
"""Make sure the basic accounting is correct:
>>> import ZODB.MappingStorage
>>> db = ZODB.MappingStorage.DB()
>>> conn = db.open()
The cache is empty initially:
>>> conn._cache.total_estimated_size
0
We force the root to be loaded and the cache grows:
>>> getattr(conn.root, 'z', None)
>>> conn._cache.total_estimated_size
128
We add some data and the cache grows:
>>> conn.root.z = ZODB.tests.util.P('x'*100)
>>> import transaction
>>> transaction.commit()
>>> conn._cache.total_estimated_size
320
Loading the objects in another connection gets the same sizes:
>>> conn2 = db.open()
>>> conn2._cache.total_estimated_size
0
>>> getattr(conn2.root, 'x', None)
>>> conn2._cache.total_estimated_size
128
>>> _ = conn2.root.z.name
>>> conn2._cache.total_estimated_size
320
If we deactivate, the size goes down:
>>> conn2.root.z._p_deactivate()
>>> conn2._cache.total_estimated_size
128
Loading data directly, rather than through traversal updates the cache
size correctly:
>>> conn3 = db.open()
>>> _ = conn3.get(conn2.root.z._p_oid).name
>>> conn3._cache.total_estimated_size
192
"""
def
test_suite
():
def
test_suite
():
s
=
unittest
.
makeSuite
(
DBMethods
,
'check'
)
s
=
unittest
.
makeSuite
(
DBMethods
,
'check'
)
s
.
addTest
(
unittest
.
makeSuite
(
LRUCacheTests
,
'check'
))
s
.
addTest
(
unittest
.
makeSuite
(
LRUCacheTests
,
'check'
))
s
.
addTest
(
unittest
.
makeSuite
(
CacheErrors
,
'check'
))
s
.
addTest
(
unittest
.
makeSuite
(
CacheErrors
,
'check'
))
s
.
addTest
(
doctest
.
DocTestSuite
())
return
s
return
s
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment