Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Nicolas Wavrant
ZODB
Commits
a245abee
Commit
a245abee
authored
Dec 22, 2009
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Cleaned up whitespace and try to avoid a close race.
parent
f65ea6d8
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
21 additions
and
19 deletions
+21
-19
src/ZEO/cache.py
src/ZEO/cache.py
+21
-19
No files found.
src/ZEO/cache.py
View file @
a245abee
...
...
@@ -39,19 +39,19 @@ from ZODB.utils import p64, u64, z64
logger
=
logging
.
getLogger
(
"ZEO.cache"
)
# A disk-based cache for ZEO clients.
#
#
# This class provides an interface to a persistent, disk-based cache
# used by ZEO clients to store copies of database records from the
# server.
#
#
# The details of the constructor as unspecified at this point.
#
#
# Each entry in the cache is valid for a particular range of transaction
# ids. The lower bound is the transaction that wrote the data. The
# upper bound is the next transaction that wrote a revision of the
# object. If the data is current, the upper bound is stored as None;
# the data is considered current until an invalidate() call is made.
#
#
# It is an error to call store() twice with the same object without an
# intervening invalidate() to set the upper bound on the first cache
# entry. Perhaps it will be necessary to have a call the removes
...
...
@@ -59,12 +59,12 @@ logger = logging.getLogger("ZEO.cache")
# entry.
# Cache verification
#
#
# When the client is connected to the server, it receives
# invalidations every time an object is modified. When the client is
# disconnected then reconnects, it must perform cache verification to make
# sure its cached data is synchronized with the storage's current state.
#
#
# quick verification
# full verification
#
...
...
@@ -206,7 +206,7 @@ class ClientCache(object):
self
.
f
=
tempfile
.
TemporaryFile
()
self
.
f
.
write
(
magic
+
z64
)
logger
.
info
(
"created temporary cache file %r"
,
self
.
f
.
name
)
self
.
_initfile
(
fsize
)
# Statistics: _n_adds, _n_added_bytes,
...
...
@@ -328,7 +328,7 @@ class ClientCache(object):
seek
(
block_size
-
5
,
1
)
sync
(
self
.
f
)
# There is always data to read and
# There is always data to read and
assert
last
and
status
in
' f1234'
first_free_offset
=
last
else
:
...
...
@@ -358,7 +358,7 @@ class ClientCache(object):
except
KeyError
:
logger
.
error
(
"Couldn't find non-current %r"
,
(
oid
,
tid
))
def
clearStats
(
self
):
self
.
_n_adds
=
self
.
_n_added_bytes
=
0
self
.
_n_evicts
=
self
.
_n_evicted_bytes
=
0
...
...
@@ -379,10 +379,12 @@ class ClientCache(object):
# Close the underlying file. No methods accessing the cache should be
# used after this.
def
close
(
self
):
if
self
.
f
:
sync
(
self
.
f
)
self
.
f
.
close
()
self
.
f
=
None
f
=
self
.
f
self
.
f
=
None
if
f
is
not
None
:
sync
(
f
)
f
.
close
()
if
hasattr
(
self
,
'_lock_file'
):
self
.
_lock_file
.
close
()
...
...
@@ -479,7 +481,7 @@ class ClientCache(object):
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
(),
oid
,
len
(
data
),
ldata
)
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
self
.
_n_accesses
+=
1
self
.
_trace
(
0x22
,
oid
,
tid
,
end_tid
,
ldata
)
...
...
@@ -517,11 +519,11 @@ class ClientCache(object):
data
=
read
(
ldata
)
assert
len
(
data
)
==
ldata
,
(
ofs
,
self
.
f
.
tell
())
assert
read
(
8
)
==
oid
,
(
ofs
,
self
.
f
.
tell
(),
oid
)
if
end_tid
<
before_tid
:
self
.
_trace
(
0x24
,
oid
,
""
,
before_tid
)
return
None
self
.
_n_accesses
+=
1
self
.
_trace
(
0x26
,
oid
,
""
,
saved_tid
)
return
data
,
saved_tid
,
end_tid
...
...
@@ -613,7 +615,7 @@ class ClientCache(object):
else
:
self
.
current
[
oid
]
=
ofs
self
.
_trace
(
0x52
,
oid
,
start_tid
,
dlen
=
len
(
data
))
self
.
currentofs
+=
size
##
...
...
@@ -635,7 +637,7 @@ class ClientCache(object):
# to threading issues, that when applying a local
# invalidation after a store, that later invalidations from
# the server may already have arrived.
@
locked
def
invalidate
(
self
,
oid
,
tid
,
server_invalidation
=
True
):
if
tid
is
not
None
:
...
...
@@ -750,7 +752,7 @@ class ClientCache(object):
except
:
print
`tid`
,
`end_tid`
raise
self
.
_trace
=
_trace
_trace
(
0x00
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment