Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kirill Smelkov
ZODB
Commits
e261193a
Commit
e261193a
authored
Sep 17, 2008
by
Jim Fulton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Removed the transaction id cache.
parent
d92a6d06
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
21 additions
and
87 deletions
+21
-87
src/ZODB/FileStorage/FileStorage.py
src/ZODB/FileStorage/FileStorage.py
+21
-87
No files found.
src/ZODB/FileStorage/FileStorage.py
View file @
e261193a
...
@@ -44,8 +44,6 @@ from ZODB.FileStorage.format import CorruptedDataError
...
@@ -44,8 +44,6 @@ from ZODB.FileStorage.format import CorruptedDataError
from
ZODB.loglevels
import
BLATHER
from
ZODB.loglevels
import
BLATHER
from
ZODB.fsIndex
import
fsIndex
from
ZODB.fsIndex
import
fsIndex
import
BTrees.OOBTree
packed_version
=
"FS21"
packed_version
=
"FS21"
logger
=
logging
.
getLogger
(
'ZODB.FileStorage'
)
logger
=
logging
.
getLogger
(
'ZODB.FileStorage'
)
...
@@ -125,8 +123,8 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -125,8 +123,8 @@ class FileStorage(BaseStorage.BaseStorage,
BaseStorage
.
BaseStorage
.
__init__
(
self
,
file_name
)
BaseStorage
.
BaseStorage
.
__init__
(
self
,
file_name
)
(
index
,
tindex
,
oid2tid
,
toid2tid
,
toid2tid_delete
)
=
self
.
_newIndexes
()
index
,
tindex
=
self
.
_newIndexes
()
self
.
_initIndex
(
index
,
tindex
,
oid2tid
,
toid2tid
,
toid2tid_delete
)
self
.
_initIndex
(
index
,
tindex
)
# Now open the file
# Now open the file
...
@@ -160,7 +158,7 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -160,7 +158,7 @@ class FileStorage(BaseStorage.BaseStorage,
self
.
_used_index
=
1
# Marker for testing
self
.
_used_index
=
1
# Marker for testing
index
,
start
,
ltid
=
r
index
,
start
,
ltid
=
r
self
.
_initIndex
(
index
,
tindex
,
oid2tid
,
toid2tid
,
toid2tid_delete
)
self
.
_initIndex
(
index
,
tindex
)
self
.
_pos
,
self
.
_oid
,
tid
=
read_index
(
self
.
_pos
,
self
.
_oid
,
tid
=
read_index
(
self
.
_file
,
file_name
,
index
,
tindex
,
stop
,
self
.
_file
,
file_name
,
index
,
tindex
,
stop
,
ltid
=
ltid
,
start
=
start
,
read_only
=
read_only
,
ltid
=
ltid
,
start
=
start
,
read_only
=
read_only
,
...
@@ -194,37 +192,17 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -194,37 +192,17 @@ class FileStorage(BaseStorage.BaseStorage,
self
.
_quota
=
quota
self
.
_quota
=
quota
# tid cache statistics.
def
_initIndex
(
self
,
index
,
tindex
):
self
.
_oid2tid_nlookups
=
self
.
_oid2tid_nhits
=
0
def
_initIndex
(
self
,
index
,
tindex
,
oid2tid
,
toid2tid
,
toid2tid_delete
):
self
.
_index
=
index
self
.
_index
=
index
self
.
_tindex
=
tindex
self
.
_tindex
=
tindex
self
.
_index_get
=
index
.
get
self
.
_index_get
=
index
.
get
# .store() needs to compare the passed-in serial to the
# current tid in the database. _oid2tid caches the oid ->
# current tid mapping. The point is that otherwise seeking into the
# storage is needed to extract the current tid, and that's
# an expensive operation. For example, if a transaction
# stores 4000 objects, and each random seek + read takes 7ms
# (that was approximately true on Linux and Windows tests in
# mid-2003), that's 28 seconds just to find the old tids.
# TODO: Probably better to junk this and redefine _index as mapping
# oid to (offset, tid) pair, via a new memory-efficient BTree type.
self
.
_oid2tid
=
oid2tid
# oid->tid map to transactionally add to _oid2tid.
self
.
_toid2tid
=
toid2tid
# Set of oids to transactionally delete from _oid2tid (e.g.,
# oids reverted by undo).
self
.
_toid2tid_delete
=
toid2tid_delete
def
__len__
(
self
):
def
__len__
(
self
):
return
len
(
self
.
_index
)
return
len
(
self
.
_index
)
def
_newIndexes
(
self
):
def
_newIndexes
(
self
):
# hook to use something other than builtin dict
# hook to use something other than builtin dict
return
fsIndex
(),
{}
,
BTrees
.
OOBTree
.
OOBTree
(),
{},
{}
return
fsIndex
(),
{}
_saved
=
0
_saved
=
0
def
_save_index
(
self
):
def
_save_index
(
self
):
...
@@ -394,27 +372,6 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -394,27 +372,6 @@ class FileStorage(BaseStorage.BaseStorage,
# Log the error and continue
# Log the error and continue
logger
.
error
(
"Error saving index on close()"
,
exc_info
=
True
)
logger
.
error
(
"Error saving index on close()"
,
exc_info
=
True
)
# Return tid of most recent record for oid if that's in the
# _oid2tid cache. Else return None. It's important to use this
# instead of indexing _oid2tid directly so that cache statistics
# can be logged.
def
_get_cached_tid
(
self
,
oid
):
self
.
_oid2tid_nlookups
+=
1
result
=
self
.
_oid2tid
.
get
(
oid
)
if
result
is
not
None
:
self
.
_oid2tid_nhits
+=
1
# Log a msg every ~8000 tries.
if
self
.
_oid2tid_nlookups
&
0x1fff
==
0
:
logger
.
log
(
BLATHER
,
"_oid2tid size %s lookups %s hits %s rate %.1f%%"
,
len
(
self
.
_oid2tid
),
self
.
_oid2tid_nlookups
,
self
.
_oid2tid_nhits
,
100.0
*
self
.
_oid2tid_nhits
/
self
.
_oid2tid_nlookups
)
return
result
def
getSize
(
self
):
def
getSize
(
self
):
return
self
.
_pos
return
self
.
_pos
...
@@ -499,20 +456,19 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -499,20 +456,19 @@ class FileStorage(BaseStorage.BaseStorage,
if
oid
>
self
.
_oid
:
if
oid
>
self
.
_oid
:
self
.
set_max_oid
(
oid
)
self
.
set_max_oid
(
oid
)
old
=
self
.
_index_get
(
oid
,
0
)
old
=
self
.
_index_get
(
oid
,
0
)
c
ach
ed_tid
=
None
c
ommitt
ed_tid
=
None
pnv
=
None
pnv
=
None
if
old
:
if
old
:
cached_tid
=
self
.
_get_cached_tid
(
oid
)
h
=
self
.
_read_data_header
(
old
,
oid
)
if
cached_tid
is
None
:
committed_tid
=
h
.
tid
h
=
self
.
_read_data_header
(
old
,
oid
)
cached_tid
=
h
.
tid
if
oldserial
!=
c
ach
ed_tid
:
if
oldserial
!=
c
ommitt
ed_tid
:
rdata
=
self
.
tryToResolveConflict
(
oid
,
c
ach
ed_tid
,
rdata
=
self
.
tryToResolveConflict
(
oid
,
c
ommitt
ed_tid
,
oldserial
,
data
)
oldserial
,
data
)
if
rdata
is
None
:
if
rdata
is
None
:
raise
POSException
.
ConflictError
(
raise
POSException
.
ConflictError
(
oid
=
oid
,
serials
=
(
cached_tid
,
oldserial
),
data
=
data
)
oid
=
oid
,
serials
=
(
committed_tid
,
oldserial
),
data
=
data
)
else
:
else
:
data
=
rdata
data
=
rdata
...
@@ -521,8 +477,6 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -521,8 +477,6 @@ class FileStorage(BaseStorage.BaseStorage,
self
.
_tindex
[
oid
]
=
here
self
.
_tindex
[
oid
]
=
here
new
=
DataHeader
(
oid
,
self
.
_tid
,
old
,
pos
,
0
,
len
(
data
))
new
=
DataHeader
(
oid
,
self
.
_tid
,
old
,
pos
,
0
,
len
(
data
))
self
.
_toid2tid
[
oid
]
=
self
.
_tid
self
.
_tfile
.
write
(
new
.
asString
())
self
.
_tfile
.
write
(
new
.
asString
())
self
.
_tfile
.
write
(
data
)
self
.
_tfile
.
write
(
data
)
...
@@ -531,7 +485,7 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -531,7 +485,7 @@ class FileStorage(BaseStorage.BaseStorage,
raise
FileStorageQuotaError
(
raise
FileStorageQuotaError
(
"The storage quota has been exceeded."
)
"The storage quota has been exceeded."
)
if
old
and
oldserial
!=
c
ach
ed_tid
:
if
old
and
oldserial
!=
c
ommitt
ed_tid
:
return
ConflictResolution
.
ResolvedSerial
return
ConflictResolution
.
ResolvedSerial
else
:
else
:
return
self
.
_tid
return
self
.
_tid
...
@@ -634,7 +588,6 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -634,7 +588,6 @@ class FileStorage(BaseStorage.BaseStorage,
# Write the recovery data record
# Write the recovery data record
new
=
DataHeader
(
oid
,
serial
,
old
,
self
.
_pos
,
0
,
dlen
)
new
=
DataHeader
(
oid
,
serial
,
old
,
self
.
_pos
,
0
,
dlen
)
self
.
_toid2tid
[
oid
]
=
serial
self
.
_tfile
.
write
(
new
.
asString
())
self
.
_tfile
.
write
(
new
.
asString
())
...
@@ -656,8 +609,6 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -656,8 +609,6 @@ class FileStorage(BaseStorage.BaseStorage,
def
_clear_temp
(
self
):
def
_clear_temp
(
self
):
self
.
_tindex
.
clear
()
self
.
_tindex
.
clear
()
self
.
_toid2tid
.
clear
()
self
.
_toid2tid_delete
.
clear
()
if
self
.
_tfile
is
not
None
:
if
self
.
_tfile
is
not
None
:
self
.
_tfile
.
seek
(
0
)
self
.
_tfile
.
seek
(
0
)
...
@@ -727,13 +678,7 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -727,13 +678,7 @@ class FileStorage(BaseStorage.BaseStorage,
self
.
_pos
=
nextpos
self
.
_pos
=
nextpos
self
.
_index
.
update
(
self
.
_tindex
)
self
.
_index
.
update
(
self
.
_tindex
)
self
.
_oid2tid
.
update
(
self
.
_toid2tid
)
for
oid
in
self
.
_toid2tid_delete
.
keys
():
try
:
del
self
.
_oid2tid
[
oid
]
except
KeyError
:
pass
# Update the number of records that we've written
# Update the number of records that we've written
# +1 for the transaction record
# +1 for the transaction record
self
.
_records_written
+=
len
(
self
.
_tindex
)
+
1
self
.
_records_written
+=
len
(
self
.
_tindex
)
+
1
...
@@ -778,17 +723,12 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -778,17 +723,12 @@ class FileStorage(BaseStorage.BaseStorage,
def
getTid
(
self
,
oid
):
def
getTid
(
self
,
oid
):
self
.
_lock_acquire
()
self
.
_lock_acquire
()
try
:
try
:
result
=
self
.
_get_cached_tid
(
oid
)
pos
=
self
.
_lookup_pos
(
oid
)
if
result
is
None
:
h
=
self
.
_read_data_header
(
pos
,
oid
)
pos
=
self
.
_lookup_pos
(
oid
)
if
h
.
plen
==
0
and
h
.
back
==
0
:
h
=
self
.
_read_data_header
(
pos
,
oid
)
# Undone creation
if
h
.
plen
==
0
and
h
.
back
==
0
:
raise
POSKeyError
(
oid
)
# Undone creation
return
h
.
tid
raise
POSKeyError
(
oid
)
else
:
result
=
h
.
tid
self
.
_oid2tid
[
oid
]
=
result
return
result
finally
:
finally
:
self
.
_lock_release
()
self
.
_lock_release
()
...
@@ -925,10 +865,6 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -925,10 +865,6 @@ class FileStorage(BaseStorage.BaseStorage,
tpos
=
self
.
_txn_find
(
tid
,
1
)
tpos
=
self
.
_txn_find
(
tid
,
1
)
tindex
=
self
.
_txn_undo_write
(
tpos
)
tindex
=
self
.
_txn_undo_write
(
tpos
)
self
.
_tindex
.
update
(
tindex
)
self
.
_tindex
.
update
(
tindex
)
# Arrange to clear the affected oids from the oid2tid cache.
# It's too painful to try to update them to correct current
# values instead.
self
.
_toid2tid_delete
.
update
(
tindex
)
return
self
.
_tid
,
tindex
.
keys
()
return
self
.
_tid
,
tindex
.
keys
()
def
_txn_find
(
self
,
tid
,
stop_at_pack
):
def
_txn_find
(
self
,
tid
,
stop_at_pack
):
...
@@ -1098,9 +1034,7 @@ class FileStorage(BaseStorage.BaseStorage,
...
@@ -1098,9 +1034,7 @@ class FileStorage(BaseStorage.BaseStorage,
# OK, we're beyond the point of no return
# OK, we're beyond the point of no return
os
.
rename
(
self
.
_file_name
+
'.pack'
,
self
.
_file_name
)
os
.
rename
(
self
.
_file_name
+
'.pack'
,
self
.
_file_name
)
self
.
_file
=
open
(
self
.
_file_name
,
'r+b'
)
self
.
_file
=
open
(
self
.
_file_name
,
'r+b'
)
self
.
_initIndex
(
p
.
index
,
p
.
tindex
,
self
.
_initIndex
(
p
.
index
,
p
.
tindex
)
p
.
oid2tid
,
p
.
toid2tid
,
p
.
toid2tid_delete
)
self
.
_pos
=
opos
self
.
_pos
=
opos
self
.
_save_index
()
self
.
_save_index
()
finally
:
finally
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment