Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
ZODB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kirill Smelkov
ZODB
Commits
4d86e4e0
Commit
4d86e4e0
authored
Nov 18, 2002
by
Jeremy Hylton
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Merge ZODB 3.1 changes to the trunk.
XXX Not sure if berkeley still works.
parent
e650766b
Changes
19
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
562 additions
and
374 deletions
+562
-374
src/ZEO/ClientStorage.py
src/ZEO/ClientStorage.py
+36
-0
src/ZEO/StorageServer.py
src/ZEO/StorageServer.py
+12
-92
src/ZEO/start.py
src/ZEO/start.py
+2
-0
src/ZEO/tests/CommitLockTests.py
src/ZEO/tests/CommitLockTests.py
+4
-4
src/ZEO/zrpc/connection.py
src/ZEO/zrpc/connection.py
+11
-2
src/ZEO/zrpc/smac.py
src/ZEO/zrpc/smac.py
+3
-0
src/ZEO/zrpc/trigger.py
src/ZEO/zrpc/trigger.py
+5
-0
src/ZODB/BaseStorage.py
src/ZODB/BaseStorage.py
+10
-1
src/ZODB/ConflictResolution.py
src/ZODB/ConflictResolution.py
+1
-1
src/ZODB/Connection.py
src/ZODB/Connection.py
+11
-30
src/ZODB/DB.py
src/ZODB/DB.py
+6
-2
src/ZODB/FileStorage.py
src/ZODB/FileStorage.py
+90
-31
src/ZODB/Transaction.py
src/ZODB/Transaction.py
+197
-111
src/ZODB/fsdump.py
src/ZODB/fsdump.py
+77
-0
src/ZODB/tests/StorageTestBase.py
src/ZODB/tests/StorageTestBase.py
+0
-1
src/ZODB/tests/TransactionalUndoVersionStorage.py
src/ZODB/tests/TransactionalUndoVersionStorage.py
+37
-61
src/ZODB/tests/VersionStorage.py
src/ZODB/tests/VersionStorage.py
+0
-16
src/ZODB/tests/testFileStorage.py
src/ZODB/tests/testFileStorage.py
+60
-0
src/ZODB/tests/testZODB.py
src/ZODB/tests/testZODB.py
+0
-22
No files found.
src/ZEO/ClientStorage.py
View file @
4d86e4e0
...
...
@@ -28,9 +28,11 @@ ClientDisconnected -- exception raised by ClientStorage
import
cPickle
import
os
import
socket
import
tempfile
import
threading
import
time
import
types
from
ZEO
import
ClientCache
,
ServerStub
from
ZEO.TransactionBuffer
import
TransactionBuffer
...
...
@@ -204,6 +206,8 @@ class ClientStorage:
self
.
_storage
=
storage
self
.
_read_only_fallback
=
read_only_fallback
self
.
_connection
=
None
# _server_addr is used by sortKey()
self
.
_server_addr
=
None
self
.
_info
=
{
'length'
:
0
,
'size'
:
0
,
'name'
:
'ZEO Client'
,
'supportsUndo'
:
0
,
'supportsVersions'
:
0
,
...
...
@@ -339,6 +343,7 @@ class ClientStorage:
log2
(
INFO
,
"Reconnected to storage"
)
else
:
log2
(
INFO
,
"Connected to storage"
)
self
.
set_server_addr
(
conn
.
get_addr
())
stub
=
self
.
StorageServerStubClass
(
conn
)
self
.
_oids
=
[]
self
.
_info
.
update
(
stub
.
get_info
())
...
...
@@ -350,6 +355,33 @@ class ClientStorage:
self
.
_connection
=
conn
self
.
_server
=
stub
def
set_server_addr
(
self
,
addr
):
# Normalize server address and convert to string
if
isinstance
(
addr
,
types
.
StringType
):
self
.
_server_addr
=
addr
else
:
assert
isinstance
(
addr
,
types
.
TupleType
)
# If the server is on a remote host, we need to guarantee
# that all clients used the same name for the server. If
# they don't, the sortKey() may be different for each client.
# The best solution seems to be the official name reported
# by gethostbyaddr().
host
=
addr
[
0
]
try
:
canonical
,
aliases
,
addrs
=
socket
.
gethostbyaddr
(
host
)
except
socket
.
error
,
err
:
log2
(
BLATHER
,
"Error resoving host: %s (%s)"
%
(
host
,
err
))
canonical
=
host
self
.
_server_addr
=
str
((
canonical
,
addr
[
1
]))
def
sortKey
(
self
):
# If the client isn't connected to anything, it can't have a
# valid sortKey(). Raise an error to stop the transaction early.
if
self
.
_server_addr
is
None
:
raise
ClientDisconnected
else
:
return
self
.
_server_addr
def
verify_cache
(
self
,
server
):
"""Internal routine called to verify the cache."""
# XXX beginZeoVerify ends up calling back to beginVerify() below.
...
...
@@ -622,11 +654,15 @@ class ClientStorage:
"""Internal helper to end a transaction."""
# the right way to set self._transaction to None
# calls notify() on _tpc_cond in case there are waiting threads
self
.
_ltid
=
self
.
_serial
self
.
_tpc_cond
.
acquire
()
self
.
_transaction
=
None
self
.
_tpc_cond
.
notify
()
self
.
_tpc_cond
.
release
()
def
lastTransaction
(
self
):
return
self
.
_ltid
def
tpc_abort
(
self
,
transaction
):
"""Storage API: abort a transaction."""
if
transaction
is
not
self
.
_transaction
:
...
...
src/ZEO/StorageServer.py
View file @
4d86e4e0
...
...
@@ -206,17 +206,16 @@ class ZEOStorage:
def
__init__
(
self
,
server
,
read_only
=
0
):
self
.
server
=
server
self
.
connection
=
None
self
.
client
=
None
self
.
storage
=
None
self
.
storage_id
=
"uninitialized"
self
.
transaction
=
None
self
.
read_only
=
read_only
self
.
timeout
=
TimeoutThread
()
self
.
timeout
.
start
()
def
notifyConnected
(
self
,
conn
):
self
.
connection
=
conn
# For restart_other() below
self
.
client
=
self
.
ClientStorageStubClass
(
conn
)
self
.
timeout
.
notifyConnected
(
conn
)
def
notifyDisconnected
(
self
):
# When this storage closes, we must ensure that it aborts
...
...
@@ -226,7 +225,6 @@ class ZEOStorage:
self
.
abort
()
else
:
self
.
log
(
"disconnected"
)
self
.
timeout
.
notifyDisconnected
()
def
__repr__
(
self
):
tid
=
self
.
transaction
and
repr
(
self
.
transaction
.
id
)
...
...
@@ -416,11 +414,6 @@ class ZEOStorage:
" requests from one client."
)
# (This doesn't require a lock because we're using asyncore)
if
self
.
storage
.
_transaction
is
None
:
self
.
strategy
=
self
.
ImmediateCommitStrategyClass
(
self
.
storage
,
self
.
client
)
self
.
timeout
.
begin
()
else
:
self
.
strategy
=
self
.
DelayedCommitStrategyClass
(
self
.
storage
,
self
.
wait
)
...
...
@@ -436,7 +429,6 @@ class ZEOStorage:
def
tpc_finish
(
self
,
id
):
if
not
self
.
check_tid
(
id
):
return
self
.
timeout
.
end
()
invalidated
=
self
.
strategy
.
tpc_finish
()
if
invalidated
:
self
.
server
.
invalidate
(
self
,
self
.
storage_id
,
...
...
@@ -448,7 +440,6 @@ class ZEOStorage:
def
tpc_abort
(
self
,
id
):
if
not
self
.
check_tid
(
id
):
return
self
.
timeout
.
end
()
strategy
=
self
.
strategy
strategy
.
tpc_abort
()
self
.
transaction
=
None
...
...
@@ -469,9 +460,7 @@ class ZEOStorage:
def
vote
(
self
,
id
):
self
.
check_tid
(
id
,
exc
=
StorageTransactionError
)
r
=
self
.
strategy
.
tpc_vote
()
self
.
timeout
.
begin
()
return
r
return
self
.
strategy
.
tpc_vote
()
def
abortVersion
(
self
,
src
,
id
):
self
.
check_tid
(
id
,
exc
=
StorageTransactionError
)
...
...
@@ -503,8 +492,10 @@ class ZEOStorage:
"Clients waiting: %d."
%
len
(
self
.
storage
.
_waiting
))
return
d
else
:
self
.
restart
()
return
None
return
self
.
restart
()
def
dontwait
(
self
):
return
self
.
restart
()
def
handle_waiting
(
self
):
while
self
.
storage
.
_waiting
:
...
...
@@ -526,7 +517,7 @@ class ZEOStorage:
except
:
self
.
log
(
"Unexpected error handling waiting transaction"
,
level
=
zLOG
.
WARNING
,
error
=
sys
.
exc_info
())
zeo_storage
.
_con
n
.
close
()
zeo_storage
.
connectio
n
.
close
()
return
0
else
:
return
1
...
...
@@ -539,6 +530,8 @@ class ZEOStorage:
resp
=
old_strategy
.
restart
(
self
.
strategy
)
if
delay
is
not
None
:
delay
.
reply
(
resp
)
else
:
return
resp
# A ZEOStorage instance can use different strategies to commit a
# transaction. The current implementation uses different strategies
...
...
@@ -768,79 +761,6 @@ class SlowMethodThread(threading.Thread):
else
:
self
.
delay
.
reply
(
result
)
class
TimeoutThread
(
threading
.
Thread
):
# A TimeoutThread is associated with a ZEOStorage. It trackes
# how long transactions take to commit. If a transaction takes
# too long, it will close the connection.
TIMEOUT
=
30
def
__init__
(
self
):
threading
.
Thread
.
__init__
(
self
)
self
.
_lock
=
threading
.
Lock
()
self
.
_timestamp
=
None
self
.
_conn
=
None
def
begin
(
self
):
self
.
_lock
.
acquire
()
try
:
self
.
_timestamp
=
time
.
time
()
finally
:
self
.
_lock
.
release
()
def
end
(
self
):
self
.
_lock
.
acquire
()
try
:
self
.
_timestamp
=
None
finally
:
self
.
_lock
.
release
()
# There's a race here, but I hope it is harmless.
def
notifyConnected
(
self
,
conn
):
self
.
_conn
=
conn
def
notifyDisconnected
(
self
):
self
.
_conn
=
None
def
run
(
self
):
timeout
=
self
.
TIMEOUT
while
self
.
_conn
is
not
None
:
time
.
sleep
(
timeout
)
self
.
_lock
.
acquire
()
try
:
if
self
.
_timestamp
is
not
None
:
deadline
=
self
.
_timestamp
+
self
.
TIMEOUT
else
:
log
(
"TimeoutThread no current transaction"
,
zLOG
.
BLATHER
)
timeout
=
self
.
TIMEOUT
continue
finally
:
self
.
_lock
.
release
()
timeout
=
deadline
-
time
.
time
()
if
deadline
<
time
.
time
():
self
.
_abort
()
break
else
:
elapsed
=
self
.
TIMEOUT
-
timeout
log
(
"TimeoutThread transaction has %0.2f sec to complete"
" (%.2f elapsed)"
%
(
timeout
,
elapsed
),
zLOG
.
BLATHER
)
log
(
"TimeoutThread exiting. Connection closed."
,
zLOG
.
BLATHER
)
def
_abort
(
self
):
# It's possible for notifyDisconnected to remove the connection
# just before we use it. I think that's harmless, since it means
# the connection was closed.
log
(
"TimeoutThread aborting transaction"
,
zLOG
.
WARNING
)
try
:
self
.
_conn
.
close
()
except
AttributeError
,
msg
:
log
(
msg
)
# Patch up class references
StorageServer
.
ZEOStorageClass
=
ZEOStorage
ZEOStorage
.
DelayedCommitStrategyClass
=
DelayedCommitStrategy
...
...
src/ZEO/start.py
View file @
4d86e4e0
...
...
@@ -17,6 +17,8 @@ from __future__ import nested_scopes
import
sys
,
os
,
getopt
import
types
import
errno
import
socket
def
directory
(
p
,
n
=
1
):
d
=
p
...
...
src/ZEO/tests/CommitLockTests.py
View file @
4d86e4e0
...
...
@@ -124,9 +124,12 @@ class CommitLockTests:
# started, but before it finishes. The dowork() function
# executes after the first transaction has completed.
# Start on transaction normally.
# Start on transaction normally
and get the lock
.
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oid
=
self
.
_storage
.
new_oid
()
self
.
_storage
.
store
(
oid
,
ZERO
,
zodb_pickle
(
MinPO
(
1
)),
''
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
# Start a second transaction on a different connection without
# blocking the test thread.
...
...
@@ -141,9 +144,6 @@ class CommitLockTests:
else
:
self
.
_storages
.
append
((
storage2
,
t2
))
oid
=
self
.
_storage
.
new_oid
()
self
.
_storage
.
store
(
oid
,
ZERO
,
zodb_pickle
(
MinPO
(
1
)),
''
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
if
method_name
==
"tpc_finish"
:
self
.
_storage
.
tpc_finish
(
t
)
self
.
_storage
.
load
(
oid
,
''
)
...
...
src/ZEO/zrpc/connection.py
View file @
4d86e4e0
...
...
@@ -348,13 +348,22 @@ class Connection(smac.SizedMessageAsyncConnection):
else
:
return
0
def
_pull_trigger
(
self
,
tryagain
=
10
):
try
:
self
.
trigger
.
pull_trigger
()
except
OSError
,
e
:
self
.
trigger
.
close
()
self
.
trigger
=
trigger
()
if
tryagain
>
0
:
self
.
_pull_trigger
(
tryagain
=
tryagain
-
1
)
def
wait
(
self
,
msgid
):
"""Invoke asyncore mainloop and wait for reply."""
if
__debug__
:
log
(
"wait(%d), async=%d"
%
(
msgid
,
self
.
is_async
()),
level
=
zLOG
.
TRACE
)
if
self
.
is_async
():
self
.
trigger
.
pull_trigger
()
self
.
_
pull_trigger
()
# Delay used when we call asyncore.poll() directly.
# Start with a 1 msec delay, double until 1 sec.
...
...
@@ -398,7 +407,7 @@ class Connection(smac.SizedMessageAsyncConnection):
if
__debug__
:
log
(
"poll(), async=%d"
%
self
.
is_async
(),
level
=
zLOG
.
TRACE
)
if
self
.
is_async
():
self
.
trigger
.
pull_trigger
()
self
.
_
pull_trigger
()
else
:
asyncore
.
poll
(
0.0
,
self
.
_map
)
...
...
src/ZEO/zrpc/smac.py
View file @
4d86e4e0
...
...
@@ -77,6 +77,9 @@ class SizedMessageAsyncConnection(asyncore.dispatcher):
self
.
__closed
=
0
self
.
__super_init
(
sock
,
map
)
def
get_addr
(
self
):
return
self
.
addr
# XXX avoid expensive getattr calls? Can't remember exactly what
# this comment was supposed to mean, but it has something to do
# with the way asyncore uses getattr and uses if sock:
...
...
src/ZEO/zrpc/trigger.py
View file @
4d86e4e0
...
...
@@ -16,6 +16,7 @@ import asyncore
import
os
import
socket
import
thread
import
errno
if
os
.
name
==
'posix'
:
...
...
@@ -71,6 +72,7 @@ if os.name == 'posix':
self
.
del_channel
()
for
fd
in
self
.
_fds
:
os
.
close
(
fd
)
self
.
_fds
=
[]
def
__repr__
(
self
):
return
'<select-trigger (pipe) at %x>'
%
id
(
self
)
...
...
@@ -84,6 +86,9 @@ if os.name == 'posix':
def
handle_connect
(
self
):
pass
def
handle_close
(
self
):
self
.
close
()
def
pull_trigger
(
self
,
thunk
=
None
):
if
thunk
:
self
.
lock
.
acquire
()
...
...
src/ZODB/BaseStorage.py
View file @
4d86e4e0
...
...
@@ -15,7 +15,7 @@
"""
# Do this portably in the face of checking out with -kv
import
string
__version__
=
string
.
split
(
'$Revision: 1.2
7
$'
)[
-
2
:][
0
]
__version__
=
string
.
split
(
'$Revision: 1.2
8
$'
)[
-
2
:][
0
]
import
cPickle
import
ThreadLock
,
bpthread
...
...
@@ -63,6 +63,15 @@ class BaseStorage(UndoLogCompatible.UndoLogCompatible):
def
close
(
self
):
pass
def
sortKey
(
self
):
"""Return a string that can be used to sort storage instances.
The key must uniquely identify a storage and must be the same
across multiple instantiations of the same storage.
"""
# name may not be sufficient, e.g. ZEO has a user-definable name.
return
self
.
__name__
def
getName
(
self
):
return
self
.
__name__
...
...
src/ZODB/ConflictResolution.py
View file @
4d86e4e0
...
...
@@ -78,7 +78,7 @@ def load_class(class_tuple):
except
(
ImportError
,
AttributeError
):
zLOG
.
LOG
(
"Conflict Resolution"
,
zLOG
.
BLATHER
,
"Unable to load class"
,
error
=
sys
.
exc_info
())
bad_class
[
class_tuple
]
=
1
bad_class
es
[
class_tuple
]
=
1
return
None
return
klass
...
...
src/ZODB/Connection.py
View file @
4d86e4e0
...
...
@@ -13,7 +13,7 @@
##############################################################################
"""Database connection support
$Id: Connection.py,v 1.7
8 2002/10/23 19:18:35
jeremy Exp $"""
$Id: Connection.py,v 1.7
9 2002/11/18 23:17:40
jeremy Exp $"""
from
cPickleCache
import
PickleCache
from
POSException
import
ConflictError
,
ReadConflictError
...
...
@@ -184,6 +184,14 @@ class Connection(ExportImport.ExportImport):
return
obj
return
self
[
oid
]
def
sortKey
(
self
):
# XXX will raise an exception if the DB hasn't been set
storage_key
=
self
.
_sortKey
()
# If two connections use the same storage, give them a
# consistent order using id(). This is unique for the
# lifetime of a connection, which is good enough.
return
"%s:%s"
%
(
storage_key
,
id
(
self
))
def
_setDB
(
self
,
odb
):
"""Begin a new transaction.
...
...
@@ -191,6 +199,7 @@ class Connection(ExportImport.ExportImport):
"""
self
.
_db
=
odb
self
.
_storage
=
s
=
odb
.
_storage
self
.
_sortKey
=
odb
.
_storage
.
sortKey
self
.
new_oid
=
s
.
new_oid
if
self
.
_code_timestamp
!=
global_code_timestamp
:
# New code is in place. Start a new cache.
...
...
@@ -261,27 +270,8 @@ class Connection(ExportImport.ExportImport):
self
.
__onCommitActions
.
append
((
method_name
,
args
,
kw
))
get_transaction
().
register
(
self
)
# NB: commit() is responsible for calling tpc_begin() on the storage.
# It uses self._begun to track whether it has been called. When
# self._begun is 0, it has not been called.
# This arrangement allows us to handle the special case of a
# transaction with no modified objects. It is possible for
# registration to be occur unintentionally and for a persistent
# object to compensate by making itself as unchanged. When this
# happens, it's possible to commit a transaction with no modified
# objects.
# Since tpc_begin() may raise a ReadOnlyError, don't call it if there
# are no objects. This avoids spurious (?) errors when working with
# a read-only storage.
def
commit
(
self
,
object
,
transaction
):
if
object
is
self
:
if
not
self
.
_begun
:
self
.
_storage
.
tpc_begin
(
transaction
)
self
.
_begun
=
1
# We registered ourself. Execute a commit action, if any.
if
self
.
__onCommitActions
is
not
None
:
method_name
,
args
,
kw
=
self
.
__onCommitActions
.
pop
(
0
)
...
...
@@ -306,10 +296,6 @@ class Connection(ExportImport.ExportImport):
# Nothing to do
return
if
not
self
.
_begun
:
self
.
_storage
.
tpc_begin
(
transaction
)
self
.
_begun
=
1
stack
=
[
object
]
# Create a special persistent_id that passes T and the subobject
...
...
@@ -616,8 +602,6 @@ class Connection(ExportImport.ExportImport):
def
tpc_begin
(
self
,
transaction
,
sub
=
None
):
self
.
_invalidating
=
[]
self
.
_creating
=
[]
self
.
_begun
=
0
if
sub
:
# Sub-transaction!
if
self
.
_tmp
is
None
:
...
...
@@ -626,10 +610,7 @@ class Connection(ExportImport.ExportImport):
self
.
_storage
=
_tmp
_tmp
.
registerDB
(
self
.
_db
,
0
)
# It's okay to always call tpc_begin() for a sub-transaction
# because this isn't the real storage.
self
.
_storage
.
tpc_begin
(
transaction
)
self
.
_begun
=
1
def
tpc_vote
(
self
,
transaction
):
if
self
.
__onCommitActions
is
not
None
:
...
...
src/ZODB/DB.py
View file @
4d86e4e0
...
...
@@ -13,8 +13,8 @@
##############################################################################
"""Database objects
$Id: DB.py,v 1.4
4 2002/10/23 19:08:36
jeremy Exp $"""
__version__
=
'$Revision: 1.4
4
$'
[
11
:
-
2
]
$Id: DB.py,v 1.4
5 2002/11/18 23:17:40
jeremy Exp $"""
__version__
=
'$Revision: 1.4
5
$'
[
11
:
-
2
]
import
cPickle
,
cStringIO
,
sys
,
POSException
,
UndoLogCompatible
from
Connection
import
Connection
...
...
@@ -578,8 +578,12 @@ class CommitVersion:
self
.
tpc_begin
=
s
.
tpc_begin
self
.
tpc_vote
=
s
.
tpc_vote
self
.
tpc_finish
=
s
.
tpc_finish
self
.
_sortKey
=
s
.
sortKey
get_transaction
().
register
(
self
)
def
sortKey
(
self
):
return
"%s:%s"
%
(
self
.
_sortKey
(),
id
(
self
))
def
abort
(
self
,
reallyme
,
t
):
pass
def
commit
(
self
,
reallyme
,
t
):
...
...
src/ZODB/FileStorage.py
View file @
4d86e4e0
...
...
@@ -115,7 +115,7 @@
# may have a back pointer to a version record or to a non-version
# record.
#
__version__
=
'$Revision: 1.11
5
$'
[
11
:
-
2
]
__version__
=
'$Revision: 1.11
6
$'
[
11
:
-
2
]
import
base64
from
cPickle
import
Pickler
,
Unpickler
,
loads
...
...
@@ -316,9 +316,6 @@ class FileStorage(BaseStorage.BaseStorage,
# hook to use something other than builtin dict
return
{},
{},
{},
{}
def
abortVersion
(
self
,
src
,
transaction
):
return
self
.
commitVersion
(
src
,
''
,
transaction
,
abort
=
1
)
def
_save_index
(
self
):
"""Write the database index to a file to support quick startup
"""
...
...
@@ -446,6 +443,9 @@ class FileStorage(BaseStorage.BaseStorage,
# XXX should log the error, though
pass
# We don't care if this fails.
def
abortVersion
(
self
,
src
,
transaction
):
return
self
.
commitVersion
(
src
,
''
,
transaction
,
abort
=
1
)
def
commitVersion
(
self
,
src
,
dest
,
transaction
,
abort
=
None
):
# We are going to commit by simply storing back pointers.
if
self
.
_is_read_only
:
...
...
@@ -526,6 +526,9 @@ class FileStorage(BaseStorage.BaseStorage,
here
+=
heredelta
current_oids
[
oid
]
=
1
# Once we've found the data we are looking for,
# we can stop chasing backpointers.
break
else
:
# Hm. This is a non-current record. Is there a
...
...
@@ -768,9 +771,13 @@ class FileStorage(BaseStorage.BaseStorage,
if
vl
:
self
.
_file
.
read
(
vl
+
16
)
# Make sure this looks like the right data record
if
dl
==
0
:
# This is also a backpointer. Gotta trust it.
return
pos
if
dl
!=
len
(
data
):
# XXX what if this data record also has a backpointer?
# I don't think that's possible, but I'm not sure.
# The expected data doesn't match what's in the
# backpointer. Something is wrong.
error
(
"Mismatch between data and backpointer at %d"
,
pos
)
return
0
_data
=
self
.
_file
.
read
(
dl
)
if
data
!=
_data
:
...
...
@@ -828,20 +835,7 @@ class FileStorage(BaseStorage.BaseStorage,
# We need to write some version information if this revision is
# happening in a version.
if
version
:
pnv
=
None
# We need to write the position of the non-version data.
# If the previous revision of the object was in a version,
# then it will contain a pnv record. Otherwise, the
# previous record is the non-version data.
if
old
:
self
.
_file
.
seek
(
old
)
h
=
self
.
_file
.
read
(
42
)
doid
,
x
,
y
,
z
,
vlen
,
w
=
unpack
(
DATA_HDR
,
h
)
if
doid
!=
oid
:
raise
CorruptedDataError
,
h
# XXX assert versions match?
if
vlen
>
0
:
pnv
=
self
.
_file
.
read
(
8
)
pnv
=
self
.
_restore_pnv
(
oid
,
old
,
version
,
prev_pos
)
if
pnv
:
self
.
_tfile
.
write
(
pnv
)
else
:
...
...
@@ -853,20 +847,65 @@ class FileStorage(BaseStorage.BaseStorage,
self
.
_tfile
.
write
(
p64
(
pv
))
self
.
_tvindex
[
version
]
=
here
self
.
_tfile
.
write
(
version
)
# And finally, write the data
# And finally, write the data
or a backpointer
if
data
is
None
:
if
prev_pos
:
self
.
_tfile
.
write
(
p64
(
prev_pos
))
else
:
# Write a zero backpointer, which indicates an
# un-creation transaction.
# write a backpointer instead of data
self
.
_tfile
.
write
(
z64
)
else
:
self
.
_tfile
.
write
(
data
)
finally
:
self
.
_lock_release
()
def
_restore_pnv
(
self
,
oid
,
prev
,
version
,
bp
):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if
not
prev
:
return
None
pnv
=
None
# Load the record pointed to be prev
self
.
_file
.
seek
(
prev
)
h
=
self
.
_file
.
read
(
DATA_HDR_LEN
)
doid
,
x
,
y
,
z
,
vlen
,
w
=
unpack
(
DATA_HDR
,
h
)
if
doid
!=
oid
:
raise
CorruptedDataError
,
h
# If the previous record is for a version, it must have
# a valid pnv.
if
vlen
>
0
:
pnv
=
self
.
_file
.
read
(
8
)
pv
=
self
.
_file
.
read
(
8
)
v
=
self
.
_file
.
read
(
vlen
)
elif
bp
:
# XXX Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
self
.
_file
.
seek
(
bp
)
h2
=
self
.
_file
.
read
(
DATA_HDR_LEN
)
doid2
,
x
,
y
,
z
,
vlen2
,
sdl
=
unpack
(
DATA_HDR
,
h2
)
dl
=
U64
(
sdl
)
if
oid
!=
doid2
:
raise
CorruptedDataError
,
h2
if
vlen2
>
0
:
pnv
=
self
.
_file
.
read
(
8
)
pv
=
self
.
_file
.
read
(
8
)
v
=
self
.
_file
.
read
(
8
)
else
:
warn
(
"restore could not find previous non-version data "
"at %d or %d"
%
(
prev
,
bp
))
return
pnv
def
supportsUndo
(
self
):
return
1
def
supportsVersions
(
self
):
return
1
...
...
@@ -2097,7 +2136,8 @@ def _loadBack_impl(file, oid, back):
doid
,
serial
,
prev
,
tloc
,
vlen
,
plen
=
unpack
(
DATA_HDR
,
h
)
if
vlen
:
file
.
seek
(
vlen
+
16
,
1
)
file
.
read
(
16
)
version
=
file
.
read
(
vlen
)
if
plen
!=
z64
:
return
file
.
read
(
U64
(
plen
)),
serial
,
old
,
tloc
back
=
file
.
read
(
8
)
# We got a back pointer!
...
...
@@ -2120,6 +2160,17 @@ def _loadBackTxn(file, oid, back):
tid
=
h
[:
8
]
return
data
,
serial
,
tid
def
getTxnFromData
(
file
,
oid
,
back
):
"""Return transaction id for data at back."""
file
.
seek
(
U64
(
back
))
h
=
file
.
read
(
DATA_HDR_LEN
)
doid
,
serial
,
prev
,
stloc
,
vlen
,
plen
=
unpack
(
DATA_HDR
,
h
)
assert
oid
==
doid
tloc
=
U64
(
stloc
)
file
.
seek
(
tloc
)
# seek to transaction header, where tid is first 8 bytes
return
file
.
read
(
8
)
def
_truncate
(
file
,
name
,
pos
):
seek
=
file
.
seek
seek
(
0
,
2
)
...
...
@@ -2336,40 +2387,48 @@ class RecordIterator(Iterator, BaseStorage.TransactionRecord):
self
.
_file
.
seek
(
pos
)
h
=
self
.
_file
.
read
(
DATA_HDR_LEN
)
oid
,
serial
,
sprev
,
stloc
,
vlen
,
splen
=
unpack
(
DATA_HDR
,
h
)
prev
=
U64
(
sprev
)
tloc
=
U64
(
stloc
)
plen
=
U64
(
splen
)
dlen
=
DATA_HDR_LEN
+
(
plen
or
8
)
if
vlen
:
dlen
+=
(
16
+
vlen
)
self
.
_file
.
read
(
16
)
# move to the right location
tmp
=
self
.
_file
.
read
(
16
)
pv
=
U64
(
tmp
[
8
:
16
])
version
=
self
.
_file
.
read
(
vlen
)
else
:
version
=
''
datapos
=
pos
+
DATA_HDR_LEN
if
vlen
:
datapos
+=
16
+
vlen
assert
self
.
_file
.
tell
()
==
datapos
,
(
self
.
_file
.
tell
(),
datapos
)
if
pos
+
dlen
>
self
.
_tend
or
tloc
!=
self
.
_tpos
:
warn
(
"%s data record exceeds transaction record at %s"
,
file
.
name
,
pos
)
break
self
.
_pos
=
pos
+
dlen
tid
=
None
prev_txn
=
None
if
plen
:
p
=
self
.
_file
.
read
(
plen
)
data
=
self
.
_file
.
read
(
plen
)
else
:
p
=
self
.
_file
.
read
(
8
)
if
p
==
z64
:
b
p
=
self
.
_file
.
read
(
8
)
if
b
p
==
z64
:
# If the backpointer is 0 (encoded as z64), then
# this transaction undoes the object creation. It
# either aborts the version that created the
# object or undid the transaction that created it.
# Return None instead of a pickle to indicate
# this.
p
=
None
data
=
None
else
:
p
,
_s
,
tid
=
_loadBackTxn
(
self
.
_file
,
oid
,
p
)
data
,
_s
,
tid
=
_loadBackTxn
(
self
.
_file
,
oid
,
bp
)
prev_txn
=
getTxnFromData
(
self
.
_file
,
oid
,
bp
)
r
=
Record
(
oid
,
serial
,
version
,
p
,
tid
)
r
=
Record
(
oid
,
serial
,
version
,
data
,
prev_txn
)
return
r
...
...
src/ZODB/Transaction.py
View file @
4d86e4e0
...
...
@@ -13,18 +13,40 @@
##############################################################################
"""Transaction management
$Id: Transaction.py,v 1.
39 2002/09/27 18:37:24 gvanrossum
Exp $"""
__version__
=
'$Revision: 1.
39
$'
[
11
:
-
2
]
$Id: Transaction.py,v 1.
40 2002/11/18 23:17:40 jeremy
Exp $"""
__version__
=
'$Revision: 1.
40
$'
[
11
:
-
2
]
import
time
,
sys
,
struct
,
POSException
from
struct
import
pack
from
string
import
split
,
strip
,
join
from
zLOG
import
LOG
,
ERROR
,
PANIC
from
zLOG
import
LOG
,
ERROR
,
PANIC
,
INFO
,
BLATHER
,
WARNING
from
POSException
import
ConflictError
from
ZODB
import
utils
# Flag indicating whether certain errors have occurred.
hosed
=
0
# There is an order imposed on all jars, based on the storages they
# serve, that must be consistent across all applications using the
# storages. The order is defined by the sortKey() method of the jar.
def
jar_cmp
(
j1
,
j2
):
# Call sortKey() every time, because a ZEO client could reconnect
# to a different server at any time.
try
:
k1
=
j1
.
sortKey
()
except
:
LOG
(
"TM"
,
WARNING
,
"jar missing sortKey() method: %s"
%
j1
)
k1
=
id
(
j1
)
try
:
k2
=
j2
.
sortKey
()
except
:
LOG
(
"TM"
,
WARNING
,
"jar missing sortKey() method: %s"
%
j2
)
k2
=
id
(
j2
)
return
cmp
(
k1
,
k2
)
class
Transaction
:
'Simple transaction objects for single-threaded applications.'
user
=
''
...
...
@@ -53,6 +75,9 @@ class Transaction:
for
c
in
self
.
_connections
.
values
():
c
.
close
()
del
self
.
_connections
def
log
(
self
,
msg
,
level
=
INFO
,
error
=
None
):
LOG
(
"TM:%s"
%
self
.
_id
,
level
,
msg
,
error
=
error
)
def
sub
(
self
):
# Create a manually managed subtransaction for internal use
r
=
self
.
__class__
()
...
...
@@ -84,11 +109,8 @@ class Transaction:
"""
)
t
=
None
subj
=
self
.
_sub
subjars
=
()
if
not
subtransaction
:
# Must add in any non-subtransaction supporting objects that
# may have been stowed away from previous subtransaction
# commits.
...
...
@@ -96,11 +118,14 @@ class Transaction:
self
.
_objects
.
extend
(
self
.
_non_st_objects
)
self
.
_non_st_objects
=
None
if
s
ubj
is
not
None
:
if
s
elf
.
_sub
is
not
None
:
# Abort of top-level transaction after commiting
# subtransactions.
subjars
=
subj
.
values
()
subjars
=
self
.
_sub
.
values
()
subjars
.
sort
(
jar_cmp
)
self
.
_sub
=
None
else
:
subjars
=
[]
try
:
# Abort the objects
...
...
@@ -110,13 +135,20 @@ class Transaction:
if
j
is
not
None
:
j
.
abort
(
o
,
self
)
except
:
# Record the first exception that occurred
if
t
is
None
:
t
,
v
,
tb
=
sys
.
exc_info
()
else
:
self
.
log
(
"Failed to abort object %016x"
%
utils
.
U64
(
o
.
_p_oid
),
error
=
sys
.
exc_info
())
# Ugh, we need to abort work done in sub-transactions.
while
subjars
:
j
=
subjars
.
pop
()
j
.
abort_sub
(
self
)
# This should never fail
# tpc_begin() was never called, so tpc_abort() should not be
# called.
if
not
subtransaction
:
# abort_sub() must be called to clear subtransaction state
for
jar
in
subjars
:
jar
.
abort_sub
(
self
)
# This should never fail
if
t
is
not
None
:
raise
t
,
v
,
tb
...
...
@@ -136,7 +168,8 @@ class Transaction:
This aborts any transaction in progres.
'''
if
self
.
_objects
:
self
.
abort
(
subtransaction
,
0
)
if
self
.
_objects
:
self
.
abort
(
subtransaction
,
0
)
if
info
:
info
=
split
(
info
,
'
\
t
'
)
self
.
user
=
strip
(
info
[
0
])
...
...
@@ -146,28 +179,30 @@ class Transaction:
'Finalize the transaction'
objects
=
self
.
_objects
jars
=
{}
jarsv
=
None
subj
=
self
.
_sub
subjars
=
()
subjars
=
[]
if
subtransaction
:
if
subj
is
None
:
self
.
_sub
=
subj
=
{}
if
self
.
_sub
is
None
:
# Must store state across multiple subtransactions
# so that the final commit can commit all subjars.
self
.
_sub
=
{}
else
:
if
subj
is
not
None
:
if
self
.
_sub
is
not
None
:
# This commit is for a top-level transaction that
# has previously committed subtransactions. Do
# one last subtransaction commit to clear out the
# current objects, then commit all the subjars.
if
objects
:
# Do an implicit sub-transaction commit:
self
.
commit
(
1
)
# XXX What does this do?
objects
=
[]
subjars
=
subj
.
values
()
subjars
=
self
.
_sub
.
values
()
subjars
.
sort
(
jar_cmp
)
self
.
_sub
=
None
# If not a subtransaction, then we need to add any non-
# subtransaction-supporting objects that may have been
# stowed away during subtransaction commits to _objects
.
if
(
subtransaction
is
None
)
and
(
self
.
_non_st_objects
is
not
None
)
:
# If there were any non-subtransaction-aware jars
# involved in earlier subtransaction commits, we need
# to add them to the list of jars to commit
.
if
self
.
_non_st_objects
is
not
None
:
objects
.
extend
(
self
.
_non_st_objects
)
self
.
_non_st_objects
=
None
...
...
@@ -188,88 +223,140 @@ class Transaction:
# either call tpc_abort or tpc_finish. It is OK to call
# these multiple times, as the storage is required to ignore
# these calls if tpc_begin has not been called.
#
# - That we call tpc_begin() in a globally consistent order,
# so that concurrent transactions involving multiple storages
# do not deadlock.
try
:
ncommitted
=
0
jars
=
self
.
_get_jars
(
objects
,
subtransaction
)
try
:
ncommitted
+=
self
.
_commit_objects
(
objects
,
jars
,
subtransaction
,
subj
)
self
.
_commit_subtrans
(
jars
,
subjars
)
jarsv
=
jars
.
values
()
for
jar
in
jarsv
:
# If not subtransaction, then jars will be modified.
self
.
_commit_begin
(
jars
,
subjars
,
subtransaction
)
ncommitted
+=
self
.
_commit_objects
(
objects
)
if
not
subtransaction
:
# Unless this is a really old jar that doesn't
# implement tpc_vote(), it must raise an exception
# if it can't commit the transaction.
for
jar
in
jars
:
try
:
vote
=
jar
.
tpc_vote
except
:
except
AttributeError
:
pass
else
:
vote
(
self
)
# last chance to bail
vote
(
self
)
# Handle multiple jars separately. If there are
# multiple jars and one fails during the finish, we
# mark this transaction manager as hosed.
if
len
(
jars
v
)
==
1
:
self
.
_finish_one
(
jars
v
[
0
])
if
len
(
jars
)
==
1
:
self
.
_finish_one
(
jars
[
0
])
else
:
self
.
_finish_many
(
jars
v
)
self
.
_finish_many
(
jars
)
except
:
# Ugh, we got an got an error during commit, so we
# have to clean up.
exc_info
=
sys
.
exc_info
()
if
jarsv
is
None
:
jarsv
=
jars
.
values
()
self
.
_commit_error
(
exc_info
,
objects
,
ncommitted
,
jarsv
,
subjars
)
# have to clean up. First save the original exception
# in case the cleanup process causes another
# exception.
t
,
v
,
tb
=
sys
.
exc_info
()
try
:
self
.
_commit_error
(
objects
,
ncommitted
,
jars
,
subjars
)
except
:
LOG
(
'ZODB'
,
ERROR
,
"A storage error occured during transaction "
"abort. This shouldn't happen."
,
error
=
sys
.
exc_info
())
raise
t
,
v
,
tb
finally
:
del
objects
[:]
# clear registered
if
not
subtransaction
and
self
.
_id
is
not
None
:
free_transaction
()
def
_commit_objects
(
self
,
objects
,
jars
,
subtransaction
,
subj
):
# commit objects and return number of commits
ncommitted
=
0
def
_get_jars
(
self
,
objects
,
subtransaction
):
# Returns a list of jars for this transaction.
# Find all the jars and sort them in a globally consistent order.
# objects is a list of persistent objects and jars.
# If this is a subtransaction and a jar is not subtransaction aware,
# it's object gets delayed until the parent transaction commits.
d
=
{}
for
o
in
objects
:
j
=
getattr
(
o
,
'_p_jar'
,
o
)
if
j
is
not
None
:
i
=
id
(
j
)
if
not
jars
.
has_key
(
i
):
jars
[
i
]
=
j
jar
=
getattr
(
o
,
'_p_jar'
,
o
)
if
jar
is
None
:
# I don't think this should ever happen, but can't
# prove that it won't. If there is no jar, there
# is nothing to be done.
self
.
log
(
"Object with no jar registered for transaction: "
"%s"
%
repr
(
o
),
level
=
BLATHER
)
continue
# jar may not be safe as a dictionary key
key
=
id
(
jar
)
d
[
key
]
=
jar
if
subtransaction
:
# If a jar does not support subtransactions,
# we need to save it away to be committed in
# the outer transaction.
try
:
j
.
tpc_begin
(
self
,
subtransaction
)
except
TypeError
:
j
.
tpc_begin
(
self
)
if
hasattr
(
j
,
'commit_sub'
):
subj
[
i
]
=
j
if
hasattr
(
jar
,
"commit_sub"
):
self
.
_sub
[
key
]
=
jar
else
:
if
self
.
_non_st_objects
is
None
:
self
.
_non_st_objects
=
[]
self
.
_non_st_objects
.
append
(
o
)
continue
jars
=
d
.
values
()
jars
.
sort
(
jar_cmp
)
return
jars
def
_commit_begin
(
self
,
jars
,
subjars
,
subtransaction
):
if
subtransaction
:
assert
not
subjars
for
jar
in
jars
:
try
:
jar
.
tpc_begin
(
self
,
subtransaction
)
except
TypeError
:
# Assume that TypeError means that tpc_begin() only
# takes one argument, and that the jar doesn't
# support subtransactions.
jar
.
tpc_begin
(
self
)
else
:
# Merge in all the jars used by one of the subtransactions.
# When the top-level subtransaction commits, the tm must
# call commit_sub() for each jar involved in one of the
# subtransactions. The commit_sub() method should call
# tpc_begin() on the storage object.
# It must also call tpc_begin() on jars that were used in
# a subtransaction but don't support subtransactions.
# These operations must be performed on the jars in order.
# Modify jars inplace to include the subjars, too.
jars
+=
subjars
jars
.
sort
(
jar_cmp
)
# assume that subjars is small, so that it's cheaper to test
# whether jar in subjars than to make a dict and do has_key.
for
jar
in
jars
:
if
jar
in
subjars
:
jar
.
commit_sub
(
self
)
else
:
j
.
tpc_begin
(
self
)
j
.
commit
(
o
,
self
)
jar
.
tpc_begin
(
self
)
def
_commit_objects
(
self
,
objects
):
ncommitted
=
0
for
o
in
objects
:
jar
=
getattr
(
o
,
"_p_jar"
,
o
)
if
jar
is
None
:
continue
jar
.
commit
(
o
,
self
)
ncommitted
+=
1
return
ncommitted
def
_commit_subtrans
(
self
,
jars
,
subjars
):
# Commit work done in subtransactions
while
subjars
:
j
=
subjars
.
pop
()
i
=
id
(
j
)
if
not
jars
.
has_key
(
i
):
jars
[
i
]
=
j
j
.
commit_sub
(
self
)
def
_finish_one
(
self
,
jar
):
try
:
jar
.
tpc_finish
(
self
)
# This should never fail
# The database can't guarantee consistency if call fails.
jar
.
tpc_finish
(
self
)
except
:
# Bug if it does, we need to keep track of it
LOG
(
'ZODB'
,
ERROR
,
...
...
@@ -278,42 +365,40 @@ class Transaction:
error
=
sys
.
exc_info
())
raise
def
_finish_many
(
self
,
jars
v
):
def
_finish_many
(
self
,
jars
):
global
hosed
try
:
while
jarsv
:
jarsv
[
-
1
].
tpc_finish
(
self
)
# This should never fail
jar
sv
.
pop
()
# It didn't, so it's taken care of.
for
jar
in
jars
:
# The database can't guarantee consistency if call fails.
jar
.
tpc_finish
(
self
)
except
:
# Bug if it does, we need to yell FIRE!
# Someone finished, so don't allow any more
# work without at least a restart!
hosed
=
1
LOG
(
'ZODB'
,
PANIC
,
"A storage error occurred in the last phase of a "
"two-phase commit. This shouldn
\
'
t happen. "
"The application may be in a hosed state, so "
"transactions will not be allowed to commit "
"The application will not be allowed to commit "
"until the site/storage is reset by a restart. "
,
error
=
sys
.
exc_info
())
raise
def
_commit_error
(
self
,
(
t
,
v
,
tb
),
objects
,
ncommitted
,
jarsv
,
subjars
):
# handle an exception raised during commit
# takes sys.exc_info() as argument
# First, we have to abort any uncommitted objects.
def
_commit_error
(
self
,
objects
,
ncommitted
,
jars
,
subjars
):
# First, we have to abort any uncommitted objects. The abort
# will mark the object for invalidation, so that it's last
# committed state will be restored.
for
o
in
objects
[
ncommitted
:]:
try
:
j
=
getattr
(
o
,
'_p_jar'
,
o
)
if
j
is
not
None
:
j
.
abort
(
o
,
self
)
except
:
pass
# nothing to do but log the error
self
.
log
(
"Failed to abort object %016x"
%
utils
.
U64
(
o
.
_p_oid
),
error
=
sys
.
exc_info
())
# Then, we unwind TPC for the jars that began it.
for
j
in
jarsv
:
# Abort the two-phase commit. It's only necessary to abort the
# commit for jars that began it, but it is harmless to abort it
# for all.
for
j
in
jars
:
try
:
j
.
tpc_abort
(
self
)
# This should never fail
except
:
...
...
@@ -321,9 +406,14 @@ class Transaction:
"A storage error occured during object abort. This "
"shouldn't happen. "
,
error
=
sys
.
exc_info
())
# Ugh, we need to abort work done in sub-transactions.
while
subjars
:
j
=
subjars
.
pop
()
# After the tpc_abort(), call abort_sub() on all the
# subtrans-aware jars to *really* abort the subtransaction.
# Example: For Connection(), the tpc_abort() will abort the
# subtransaction TmpStore() and abort_sub() will remove the
# TmpStore.
for
j
in
subjars
:
try
:
j
.
abort_sub
(
self
)
# This should never fail
except
:
...
...
@@ -332,8 +422,6 @@ class Transaction:
"object abort. This shouldn't happen."
,
error
=
sys
.
exc_info
())
raise
t
,
v
,
tb
def
register
(
self
,
object
):
'Register the given object for transaction control.'
self
.
_append
(
object
)
...
...
@@ -366,8 +454,6 @@ the system problem. See your application log for
information on the error that lead to this problem.
"""
############################################################################
# install get_transaction:
...
...
src/ZODB/fsdump.py
View file @
4d86e4e0
...
...
@@ -76,3 +76,80 @@ def fsdump(path, file=None, with_offset=1):
print
>>
file
i
+=
1
iter
.
close
()
import
struct
from
ZODB.FileStorage
import
TRANS_HDR
,
TRANS_HDR_LEN
from
ZODB.FileStorage
import
DATA_HDR
,
DATA_HDR_LEN
def
fmt
(
p64
):
# Return a nicely formatted string for a packaged 64-bit value
return
"%016x"
%
U64
(
p64
)
class
Dumper
:
"""A very verbose dumper for debuggin FileStorage problems."""
def
__init__
(
self
,
path
,
dest
=
None
):
self
.
file
=
open
(
path
,
"rb"
)
self
.
dest
=
dest
def
dump
(
self
):
fid
=
self
.
file
.
read
(
4
)
print
>>
self
.
dest
,
"*"
*
60
print
>>
self
.
dest
,
"file identifier: %r"
%
fid
while
self
.
dump_txn
():
pass
def
dump_txn
(
self
):
pos
=
self
.
file
.
tell
()
h
=
self
.
file
.
read
(
TRANS_HDR_LEN
)
if
not
h
:
return
False
tid
,
stlen
,
status
,
ul
,
dl
,
el
=
struct
.
unpack
(
TRANS_HDR
,
h
)
end
=
pos
+
U64
(
stlen
)
print
>>
self
.
dest
,
"="
*
60
print
>>
self
.
dest
,
"offset: %d"
%
pos
print
>>
self
.
dest
,
"end pos: %d"
%
end
print
>>
self
.
dest
,
"transaction id: %s"
%
fmt
(
tid
)
print
>>
self
.
dest
,
"trec len: %d"
%
U64
(
stlen
)
print
>>
self
.
dest
,
"status: %r"
%
status
user
=
descr
=
extra
=
""
if
ul
:
user
=
self
.
file
.
read
(
ul
)
if
dl
:
descr
=
self
.
file
.
read
(
dl
)
if
el
:
extra
=
self
.
file
.
read
(
el
)
print
>>
self
.
dest
,
"user: %r"
%
user
print
>>
self
.
dest
,
"description: %r"
%
descr
print
>>
self
.
dest
,
"len(extra): %d"
%
el
while
self
.
file
.
tell
()
<
end
:
self
.
dump_data
(
pos
)
stlen2
=
self
.
file
.
read
(
8
)
print
>>
self
.
dest
,
"redundant trec len: %d"
%
U64
(
stlen2
)
return
True
def
dump_data
(
self
,
tloc
):
pos
=
self
.
file
.
tell
()
h
=
self
.
file
.
read
(
DATA_HDR_LEN
)
assert
len
(
h
)
==
DATA_HDR_LEN
oid
,
revid
,
sprev
,
stloc
,
vlen
,
sdlen
=
struct
.
unpack
(
DATA_HDR
,
h
)
dlen
=
U64
(
sdlen
)
print
>>
self
.
dest
,
"-"
*
60
print
>>
self
.
dest
,
"offset: %d"
%
pos
print
>>
self
.
dest
,
"oid: %s"
%
fmt
(
oid
)
print
>>
self
.
dest
,
"revid: %s"
%
fmt
(
revid
)
print
>>
self
.
dest
,
"previous record offset: %d"
%
U64
(
sprev
)
print
>>
self
.
dest
,
"transaction offset: %d"
%
U64
(
stloc
)
if
vlen
:
pnv
=
self
.
file
.
read
(
8
)
sprevdata
=
self
.
file
.
read
(
8
)
version
=
self
.
file
.
read
(
vlen
)
print
>>
self
.
dest
,
"version: %r"
%
version
print
>>
self
.
dest
,
"non-version data offset: %d"
%
U64
(
pnv
)
print
>>
self
.
dest
,
\
"previous version data offset: %d"
%
U64
(
sprevdata
)
print
>>
self
.
dest
,
"len(data): %d"
%
dlen
self
.
file
.
read
(
dlen
)
if
not
dlen
:
sbp
=
self
.
file
.
read
(
8
)
print
>>
self
.
dest
,
"backpointer: %d"
%
U64
(
sbp
)
src/ZODB/tests/StorageTestBase.py
View file @
4d86e4e0
...
...
@@ -180,7 +180,6 @@ class StorageTestBase(unittest.TestCase):
def
_dostoreNP
(
self
,
oid
=
None
,
revid
=
None
,
data
=
None
,
version
=
None
,
user
=
None
,
description
=
None
):
return
self
.
_dostore
(
oid
,
revid
,
data
,
version
,
already_pickled
=
1
)
# The following methods depend on optional storage features.
def
_undo
(
self
,
tid
,
oid
):
...
...
src/ZODB/tests/TransactionalUndoVersionStorage.py
View file @
4d86e4e0
...
...
@@ -29,9 +29,25 @@ class TransactionalUndoVersionStorage:
pass
# not expected
return
self
.
_dostore
(
*
args
,
**
kwargs
)
def
_undo
(
self
,
tid
,
oid
):
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
transactionalUndo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
self
.
assertEqual
(
len
(
oids
),
1
)
self
.
assertEqual
(
oids
[
0
],
oid
)
def
checkUndoInVersion
(
self
):
eq
=
self
.
assertEqual
unless
=
self
.
failUnless
def
check_objects
(
nonversiondata
,
versiondata
):
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
versiondata
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
nonversiondata
))
oid
=
self
.
_storage
.
new_oid
()
version
=
'one'
revid_a
=
self
.
_dostore
(
oid
,
data
=
MinPO
(
91
))
...
...
@@ -39,21 +55,17 @@ class TransactionalUndoVersionStorage:
version
=
version
)
revid_c
=
self
.
_dostore
(
oid
,
revid
=
revid_b
,
data
=
MinPO
(
93
),
version
=
version
)
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
0
][
'id'
]
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
transactionalUndo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
eq
(
len
(
oids
),
1
)
eq
(
oids
[
0
],
oid
)
info
=
self
.
_storage
.
undoInfo
()
self
.
_undo
(
info
[
0
][
'id'
],
oid
)
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
eq
(
revid
,
revid_a
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
91
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
unless
(
revid
>
revid_b
and
revid
>
revid_c
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
92
))
# Now commit the version...
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
...
...
@@ -63,61 +75,25 @@ class TransactionalUndoVersionStorage:
eq
(
len
(
oids
),
1
)
eq
(
oids
[
0
],
oid
)
#JF# No, because we fall back to non-version data.
#JF# self.assertRaises(POSException.VersionError,
#JF# self._storage.load,
#JF# oid, version)
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
92
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
92
))
check_objects
(
92
,
92
)
# ...and undo the commit
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
0
][
'id'
]
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
transactionalUndo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
eq
(
len
(
oids
),
1
)
eq
(
oids
[
0
],
oid
)
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
92
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
91
))
# Now abort the version
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
abortVersion
(
version
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
eq
(
len
(
oids
),
1
)
eq
(
oids
[
0
],
oid
)
# The object should not exist in the version now, but it should exist
# in the non-version
#JF# No, because we fall back
#JF# self.assertRaises(POSException.VersionError,
#JF# self._storage.load,
#JF# oid, version)
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
91
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
91
))
info
=
self
.
_storage
.
undoInfo
()
self
.
_undo
(
info
[
0
][
'id'
],
oid
)
check_objects
(
91
,
92
)
oids
=
self
.
_abortVersion
(
version
)
assert
len
(
oids
)
==
1
assert
oids
[
0
]
==
oid
check_objects
(
91
,
91
)
# Now undo the abort
info
=
self
.
_storage
.
undoInfo
()
tid
=
info
[
0
][
'id'
]
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
transactionalUndo
(
tid
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
eq
(
len
(
oids
),
1
)
eq
(
oids
[
0
],
oid
)
# And the object should be back in versions 'one' and ''
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
92
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
eq
(
zodb_unpickle
(
data
),
MinPO
(
91
))
self
.
_undo
(
info
[
0
][
'id'
],
oid
)
check_objects
(
91
,
92
)
def
checkUndoCommitVersion
(
self
):
def
load_value
(
oid
,
version
=
''
):
...
...
src/ZODB/tests/VersionStorage.py
View file @
4d86e4e0
...
...
@@ -14,22 +14,6 @@ from ZODB.tests.StorageTestBase import zodb_unpickle
class
VersionStorage
:
def
_commitVersion
(
self
,
src
,
dst
):
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
commitVersion
(
src
,
dst
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
return
oids
def
_abortVersion
(
self
,
ver
):
t
=
Transaction
()
self
.
_storage
.
tpc_begin
(
t
)
oids
=
self
.
_storage
.
abortVersion
(
ver
,
t
)
self
.
_storage
.
tpc_vote
(
t
)
self
.
_storage
.
tpc_finish
(
t
)
return
oids
def
checkCommitVersionSerialno
(
self
):
oid
=
self
.
_storage
.
new_oid
()
revid1
=
self
.
_dostore
(
oid
,
data
=
MinPO
(
12
))
...
...
src/ZODB/tests/testFileStorage.py
View file @
4d86e4e0
...
...
@@ -12,6 +12,7 @@ from ZODB.tests import StorageTestBase, BasicStorage, \
Synchronization
,
ConflictResolution
,
HistoryStorage
,
\
IteratorStorage
,
Corruption
,
RevisionStorage
,
PersistentStorage
,
\
MTStorage
,
ReadOnlyStorage
,
RecoveryStorage
from
ZODB.tests.StorageTestBase
import
MinPO
,
zodb_unpickle
class
FileStorageTests
(
StorageTestBase
.
StorageTestBase
,
...
...
@@ -63,6 +64,8 @@ class FileStorageRecoveryTest(
):
def
setUp
(
self
):
StorageTestBase
.
removefs
(
"Source.fs"
)
StorageTestBase
.
removefs
(
"Dest.fs"
)
self
.
_storage
=
ZODB
.
FileStorage
.
FileStorage
(
'Source.fs'
)
self
.
_dst
=
ZODB
.
FileStorage
.
FileStorage
(
'Dest.fs'
)
...
...
@@ -76,6 +79,63 @@ class FileStorageRecoveryTest(
StorageTestBase
.
removefs
(
'Dest.fs'
)
return
ZODB
.
FileStorage
.
FileStorage
(
'Dest.fs'
)
def
checkRecoverUndoInVersion
(
self
):
oid
=
self
.
_storage
.
new_oid
()
version
=
"aVersion"
revid_a
=
self
.
_dostore
(
oid
,
data
=
MinPO
(
91
))
revid_b
=
self
.
_dostore
(
oid
,
revid
=
revid_a
,
version
=
version
,
data
=
MinPO
(
92
))
revid_c
=
self
.
_dostore
(
oid
,
revid
=
revid_b
,
version
=
version
,
data
=
MinPO
(
93
))
self
.
_undo
(
self
.
_storage
.
undoInfo
()[
0
][
'id'
],
oid
)
self
.
_commitVersion
(
version
,
''
)
self
.
_undo
(
self
.
_storage
.
undoInfo
()[
0
][
'id'
],
oid
)
# now copy the records to a new storage
self
.
_dst
.
copyTransactionsFrom
(
self
.
_storage
)
self
.
compare
(
self
.
_storage
,
self
.
_dst
)
# The last two transactions were applied directly rather than
# copied. So we can't use compare() to verify that they new
# transactions are applied correctly. (The new transactions
# will have different timestamps for each storage.)
self
.
_abortVersion
(
version
)
self
.
assert_
(
self
.
_storage
.
versionEmpty
(
version
))
self
.
_undo
(
self
.
_storage
.
undoInfo
()[
0
][
'id'
],
oid
)
self
.
assert_
(
not
self
.
_storage
.
versionEmpty
(
version
))
# check the data is what we expect it to be
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
92
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
91
))
# and swap the storages
tmp
=
self
.
_storage
self
.
_storage
=
self
.
_dst
self
.
_abortVersion
(
version
)
self
.
assert_
(
self
.
_storage
.
versionEmpty
(
version
))
self
.
_undo
(
self
.
_storage
.
undoInfo
()[
0
][
'id'
],
oid
)
self
.
assert_
(
not
self
.
_storage
.
versionEmpty
(
version
))
# check the data is what we expect it to be
data
,
revid
=
self
.
_storage
.
load
(
oid
,
version
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
92
))
data
,
revid
=
self
.
_storage
.
load
(
oid
,
''
)
self
.
assertEqual
(
zodb_unpickle
(
data
),
MinPO
(
91
))
# swap them back
self
.
_storage
=
tmp
# Now remove _dst and copy all the transactions a second time.
# This time we will be able to confirm via compare().
self
.
_dst
.
close
()
StorageTestBase
.
removefs
(
"Dest.fs"
)
self
.
_dst
=
ZODB
.
FileStorage
.
FileStorage
(
'Dest.fs'
)
self
.
_dst
.
copyTransactionsFrom
(
self
.
_storage
)
self
.
compare
(
self
.
_storage
,
self
.
_dst
)
def
test_suite
():
suite
=
unittest
.
makeSuite
(
FileStorageTests
,
'check'
)
...
...
src/ZODB/tests/testZODB.py
View file @
4d86e4e0
...
...
@@ -94,28 +94,6 @@ class ZODBTests(unittest.TestCase, ExportImportTests):
self
.
_storage
.
close
()
removefs
(
"ZODBTests.fs"
)
def
checkUnmodifiedObject
(
self
):
# Test that a transaction with only unmodified objects works
# correctly. The specific sequence of events is:
# - an object is modified
# - it is registered with the transaction
# - the object is explicitly "unmodified"
# - the transaction commits, but now has no modified objects
# We'd like to avoid doing anything with the storage.
ltid
=
self
.
_storage
.
lastTransaction
()
_objects
=
get_transaction
().
_objects
self
.
assertEqual
(
len
(
_objects
),
0
)
r
=
self
.
_db
.
open
().
root
()
obj
=
r
[
"test"
][
0
]
obj
[
1
]
=
1
self
.
assertEqual
(
obj
.
_p_changed
,
1
)
self
.
assertEqual
(
len
(
_objects
),
1
)
del
obj
.
_p_changed
self
.
assertEqual
(
obj
.
_p_changed
,
None
)
self
.
assertEqual
(
len
(
_objects
),
1
)
get_transaction
().
commit
()
self
.
assertEqual
(
ltid
,
self
.
_storage
.
lastTransaction
())
def
checkVersionOnly
(
self
):
# Make sure the changes to make empty transactions a no-op
# still allow things like abortVersion(). This should work
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment