Commit cc333941 authored by Jim Fulton's avatar Jim Fulton

Lots of changes while integrating wth ClientStorage

- testZEO tests now pass

- async tests now pass again

  Probably need to write more async tests to reflect changes.
  (Or maybe the ZEO tests that drove tem are enough.)

- dropped heartbeat tests, which were insane. Will add simpler test
  when I add heartbeats to the async implementation.
parent 3f31236b
......@@ -29,6 +29,8 @@ import time
import weakref
from binascii import hexlify
import BTrees.OOBTree
import zc.lockfile
import ZODB
import ZODB.BaseStorage
......@@ -223,7 +225,8 @@ class ClientStorage(object):
self._oids = [] # List of pre-fetched oids from server
cache = self._cache = open_cache(cache, var, client, cache_size)
cache = self._cache = open_cache(
cache, var, client, storage, cache_size)
# XXX need to check for POSIX-ness here
self.blob_dir = blob_dir
......@@ -257,8 +260,8 @@ class ClientStorage(object):
addr, self, cache, storage,
ZEO.asyncio.client.Fallback if read_only_fallback else read_only,
wait_timeout or 30,
wait=wait,
)
self._server.start()
self._call = self._server.call
self._async = self._server.async
self._async_iter = self._server.async_iter
......@@ -341,13 +344,6 @@ class ClientStorage(object):
self._info.update(info)
# for name in self._info.get('extensionMethods', {}).keys():
# if not hasattr(self, name):
# def mklambda(mname):
# return (lambda *args, **kw:
# self._server.rpc.call(mname, *args, **kw))
# setattr(self, name, mklambda(name))
for iface in (
ZODB.interfaces.IStorageRestoreable,
ZODB.interfaces.IStorageIteration,
......@@ -560,7 +556,7 @@ class ClientStorage(object):
def store():
yield ('storeBlobStart', ())
f = open(blobfilename, 'rb')
f = open(target, 'rb')
while 1:
chunk = f.read(59000)
if not chunk:
......@@ -714,6 +710,12 @@ class ClientStorage(object):
try:
tbuf = txn.data(self)
except AttributeError:
# Gaaaa. This is a recovery transaction. Work around this
# until we can think of something better. XXX
tb = {}
txn.data = tb.__getitem__
txn.set_data = tb.__setitem__
except KeyError:
pass
else:
......@@ -855,9 +857,6 @@ class ClientStorage(object):
assert not version
self._check_trans(transaction, 'restore')
self._async('restorea', oid, serial, data, prev_txn, id(transaction))
# Don't update the transaction buffer, because current data are
# unaffected.
return self._check_serials()
# Below are methods invoked by the StorageServer
......@@ -871,6 +870,10 @@ class ClientStorage(object):
"""Server callback to update the info dictionary."""
self._info.update(dict)
def invalidateCache(self):
if self._db is not None:
self._db.invalidateCache()
def invalidateTransaction(self, tid, oids):
"""Server callback: Invalidate objects modified by tid."""
if self._db is not None:
......@@ -1154,14 +1157,16 @@ def _lock_blob(path):
else:
break
def open_cache(cache, var, client, cache_size):
def open_cache(cache, var, client, storage, cache_size):
if isinstance(cache, (None.__class__, str)):
from ZEO.cache import ClientCache
if cache is None:
if client:
cache = os.path.join(var or os.getcwd(), client)
cache = os.path.join(var or os.getcwd(),
"%s-%s.zec" % (client, storage))
else:
return ClientCache(cache, cache_size)
# ephemeral cache
return ClientCache(None, cache_size)
cache = ClientCache(cache, cache_size)
......
......@@ -62,6 +62,7 @@ class TransactionBuffer:
def serial(self, oid, serial):
if isinstance(serial, Exception):
self.exception = serial
self.serials[oid] = None
else:
self.serials[oid] = serial
......
......@@ -7,9 +7,13 @@ import logging
import random
import threading
import traceback
import ZEO.Exceptions
import ZODB.event
import ZODB.POSException
import ZEO.Exceptions
import ZEO.interfaces
logger = logging.getLogger(__name__)
Fallback = object()
......@@ -272,6 +276,16 @@ class Protocol(asyncio.Protocol):
type(args[0]) == self.exception_type_type and
issubclass(args[0], Exception)
):
if not issubclass(
args[0], (
ZODB.POSException.POSKeyError,
ZODB.POSException.ConflictError,)
):
logger.error("%s from server: %s.%s:%s",
self.name,
args[0].__module__,
args[0].__name__,
args[1])
future.set_exception(args[1])
else:
future.set_result(args)
......@@ -307,7 +321,7 @@ class Protocol(asyncio.Protocol):
'receiveBlobStart', 'receiveBlobChunk', 'receiveBlobStop',
# plus: notify_connected, notify_disconnected
)
client_delegated = client_methods[1:]
client_delegated = client_methods[2:]
class Client:
"""asyncio low-level ZEO client interface
......@@ -432,6 +446,8 @@ class Client:
self.client.invalidateCache()
self.finished_verify(server_tid)
elif cache_tid > server_tid:
logger.critical(
'Client has seen newer transactions than server!')
raise AssertionError("Server behind client, %r < %r, %s",
server_tid, cache_tid, protocol)
elif cache_tid == server_tid:
......@@ -447,7 +463,15 @@ class Client:
return tid
else:
# cache is too old
logger.info("cache too old %s", protocol)
try:
ZODB.event.notify(
ZEO.interfaces.StaleCache(self.client))
except Exception:
logger.exception("sending StaleCache event")
logger.critical(
"%s dropping stale cache",
getattr(self.client, '__name__', ''),
)
self.cache.clear()
self.client.invalidateCache()
return server_tid
......@@ -561,12 +585,22 @@ class Client:
if self.ready:
@self.protocol.promise('tpc_finish', tid)
def committed(tid):
try:
cache = self.cache
for oid, data, resolved in updates:
cache.invalidate(oid, tid)
if data and not resolved:
cache.store(oid, tid, None, data)
cache.setLastTid(tid)
except Exception as exc:
future.set_exception(exc)
# At this point, our cache is in an inconsistent
# state. We need to reconnect in hopes of
# recovering to a consistent state.
self.protocol.close()
self.disconnected(self.protocol)
else:
f(tid)
future.set_result(tid)
......@@ -585,6 +619,18 @@ class Client:
self.cache.setLastTid(tid)
self.client.invalidateTransaction(tid, oids)
def serialnos(self, serials):
# Before delegating, check for errors (likely ConflictErrors)
# and invalidate the oids they're associated with. In the
# past, this was done by the client, but now we control the
# cache and this is our last chance, as the client won't call
# back into us when there's an error.
for oid, serial in serials:
if isinstance(serial, Exception):
self.cache.invalidate(oid, None)
self.client.serialnos(serials)
@property
def protocol_version(self):
return self.protocol.protocol_version
......@@ -699,19 +745,15 @@ class ClientThread(ClientRunner):
def __init__(self, addrs, client, cache,
storage_key='1', read_only=False, timeout=30,
disconnect_poll=1, wait=True):
disconnect_poll=1):
self.set_options(addrs, client, cache, storage_key, read_only,
timeout, disconnect_poll)
self.thread = threading.Thread(
target=self.run,
name='zeo_client_'+storage_key,
name="%s zeo client networking thread" % client.__name__,
daemon=True,
)
self.started = threading.Event()
self.thread.start()
self.started.wait()
if wait:
self.connected.result(timeout)
exception = None
def run(self):
......@@ -724,11 +766,24 @@ class ClientThread(ClientRunner):
except Exception as exc:
logger.exception("Client thread")
self.exception = exc
raise
else:
finally:
if not self.closed:
if self.client.ready:
self.closed = True
self.client.ready = False
self.client.client.notify_disconnected()
logger.critical("Client loop stopped unexpectedly")
loop.close()
logger.debug('Stopping client thread')
def start(self, wait=True):
self.thread.start()
self.started.wait()
if self.exception:
raise self.exception
if wait:
self.connected.result(self.timeout)
closed = False
def close(self):
if not self.closed:
......
......@@ -96,7 +96,16 @@ class AsyncTests(setupstack.TestCase, ClientRunner):
# Actually, the client isn't connected until it initializes it's cache:
self.assertFalse(client.connected.done() or transport.data)
# If we try to make calls while the client is connecting, they're queued
# If we try to make calls while the client is *initially*
# connecting, we get an error. This is because some dufus
# decided to create a client storage without waiting for it to
# connect.
f1 = self.call('foo', 1, 2)
self.assertTrue(isinstance(f1.exception(), ClientDisconnected))
# When the client is reconnecting, it's ready flag is set to False and
# it queues calls:
client.ready = False
f1 = self.call('foo', 1, 2)
self.assertFalse(f1.done())
......@@ -195,7 +204,7 @@ class AsyncTests(setupstack.TestCase, ClientRunner):
self.assertEqual(parse(transport.pop()),
(8, False, 'tpc_finish', (b'd'*8,)))
respond(8, b'e'*8)
self.assertEqual(committed.result(), None)
self.assertEqual(committed.result(), b'e'*8)
self.assertEqual(cache.load(b'1'*8), None)
self.assertEqual(cache.load(b'2'*8), ('committed 2', b'e'*8))
self.assertEqual(cache.load(b'4'*8), ('committed 4', b'e'*8))
......
......@@ -2,17 +2,14 @@ Avoiding cache verifification
=============================
For large databases it is common to also use very large ZEO cache
files. If a client has beed disconnected for too long, cache verification
might be necessary, but cache verification can be very hard on the
storage server.
files. If a client has beed disconnected for too long, the server
can't play back missing invalidations. In this case, the cache is
cleared. When this happens, a ZEO.interfaces.StaleCache event is
published, largely for backward compatibility.
When verification is needed, a ZEO.interfaces.StaleCache event is
published. Applications may handle this event to perform actions such
as exiting the process to avoid a cold restart.
ClientStorage provides an option to drop it's cache rather than doing
verification. When this option is used, and verification would be
necessary, after publishing the event, ClientStorage:
ClientStorage used to provide an option to drop it's cache rather than
doing verification. This is now the only behavior. Cache
verification is no longer supported.
- Invalidates all object caches
......@@ -27,8 +24,7 @@ Start a server, create a cient to it and commit some data
>>> addr, admin = start_server(keep=1)
>>> import ZEO, transaction
>>> db = ZEO.DB(addr, drop_cache_rather_verify=True, client='cache',
... name='test')
>>> db = ZEO.DB(addr, client='cache', name='test')
>>> wait_connected(db.storage)
>>> conn = db.open()
>>> conn.root()[1] = conn.root().__class__()
......@@ -58,11 +54,11 @@ logging and event data:
>>> import logging, zope.testing.loggingsupport, ZODB.event
>>> handler = zope.testing.loggingsupport.InstalledHandler(
... 'ZEO.ClientStorage', level=logging.ERROR)
... 'ZEO', level=logging.ERROR)
>>> events = []
>>> def event_handler(e):
... events.append((
... len(e.storage._cache), str(handler), e.__class__.__name__))
... len(e.storage._server.client.cache), str(handler), e.__class__.__name__))
>>> old_notify = ZODB.event.notify
>>> ZODB.event.notify = event_handler
......@@ -105,8 +101,8 @@ Now, let's verify our assertions above:
- Logs a CRITICAL message.
>>> print(handler)
ZEO.ClientStorage CRITICAL
>>> print(handler) # doctest: +ELLIPSIS
ZEO... CRITICAL
test dropping stale cache
>>> handler.clear()
......@@ -156,8 +152,8 @@ in the database, which is why we get 1, rather than 0 objects in the cache.)
- Logs a CRITICAL message.
>>> print(handler)
ZEO.ClientStorage CRITICAL
>>> print(handler) # doctest: +ELLIPSIS
ZEO... CRITICAL
test dropping stale cache
>>> handler.clear()
......@@ -168,49 +164,6 @@ If we access the root object, it'll be loaded from the server:
>>> conn.root()[1].x
11
Finally, let's look at what happens without the
drop_cache_rather_verify option:
>>> db.close()
>>> db = ZEO.DB(addr, client='cache')
>>> wait_connected(db.storage)
>>> conn = db.open()
>>> conn.root()[1].x
11
>>> conn.root()[2] = conn.root().__class__()
>>> transaction.commit()
>>> len(db.storage._cache)
4
>>> stop_server(admin)
>>> addr2, admin = start_server(keep=1)
>>> db2 = ZEO.DB(addr2)
>>> wait_connected(db2.storage)
>>> conn2 = db2.open()
>>> for i in range(5):
... conn2.root()[1].x += 1
... transaction.commit()
>>> db2.close()
>>> stop_server(admin)
>>> _, admin = start_server(zeo_conf=dict(invalidation_queue_size=1),
... addr=addr)
>>> wait_connected(db.storage)
>>> for e in events:
... print(e)
(4, '', 'StaleCache')
>>> print(handler)
<BLANKLINE>
>>> len(db.storage._cache)
3
Here we see the cache wasn't dropped, although one of the records was
invalidated during verification.
.. Cleanup
>>> db.close()
......
......@@ -108,6 +108,7 @@ def start_zeo_server(storage_conf=None, zeo_conf=None, port=None, keep=False,
if not storage_conf:
storage_conf = '<filestorage>\npath %s\n</filestorage>' % path
if blob_dir:
storage_conf = '<blobstorage>\nblob-dir %s\n%s\n</blobstorage>' % (
blob_dir, storage_conf)
......
......@@ -28,7 +28,7 @@ from ZODB.tests import StorageTestBase, BasicStorage, \
MTStorage, ReadOnlyStorage, IteratorStorage, RecoveryStorage
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.utils import p64, u64
from ZODB.utils import p64, u64, z64
from zope.testing import renormalizing
import doctest
......@@ -142,31 +142,6 @@ class MiscZEOTests:
self.assertNotEquals(ZODB.utils.z64, storage3.lastTransaction())
storage3.close()
class ConfigurationTests(unittest.TestCase):
def checkDropCacheRatherVerifyConfiguration(self):
from ZODB.config import storageFromString
# the default is to do verification and not drop the cache
cs = storageFromString('''
<zeoclient>
server localhost:9090
wait false
</zeoclient>
''')
self.assertEqual(cs._drop_cache_rather_verify, False)
cs.close()
# now for dropping
cs = storageFromString('''
<zeoclient>
server localhost:9090
wait false
drop-cache-rather-verify true
</zeoclient>
''')
self.assertEqual(cs._drop_cache_rather_verify, True)
cs.close()
class GenericTests(
# Base class for all ZODB tests
StorageTestBase.StorageTestBase,
......@@ -451,56 +426,6 @@ class DemoStorageTests(
pass # DemoStorage pack doesn't do gc
checkPackAllRevisions = checkPackWithMultiDatabaseReferences
class HeartbeatTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
"""Make sure a heartbeat is being sent and that it does no harm
This is really hard to test properly because we can't see the data
flow between the client and server and we can't really tell what's
going on in the server very well. :(
"""
def setUp(self):
# Crank down the select frequency
self.__old_client_timeout = ZEO.zrpc.client.client_timeout
ZEO.zrpc.client.client_timeout = self.__client_timeout
ZEO.tests.ConnectionTests.CommonSetupTearDown.setUp(self)
__client_timeouts = 0
def __client_timeout(self):
self.__client_timeouts += 1
return .1
def tearDown(self):
ZEO.zrpc.client.client_timeout = self.__old_client_timeout
ZEO.tests.ConnectionTests.CommonSetupTearDown.tearDown(self)
def getConfig(self, path, create, read_only):
return """<mappingstorage 1/>"""
def checkHeartbeatWithServerClose(self):
# This is a minimal test that mainly tests that the heartbeat
# function does no harm.
self._storage = self.openClientStorage()
client_timeouts = self.__client_timeouts
forker.wait_until('got a timeout',
lambda : self.__client_timeouts > client_timeouts
)
self._dostore()
if hasattr(os, 'kill') and hasattr(signal, 'SIGKILL'):
# Kill server violently, in hopes of provoking problem
os.kill(self._pids[0], signal.SIGKILL)
self._servers[0] = None
else:
self.shutdownServer()
forker.wait_until('disconnected',
lambda : not self._storage.is_connected()
)
self._storage.close()
class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
def getConfig(self, path, create, read_only):
......@@ -510,20 +435,15 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
# Test what happens when the client loop falls over
self._storage = self.openClientStorage()
class Evil:
def writable(self):
raise SystemError("I'm evil")
import zope.testing.loggingsupport
handler = zope.testing.loggingsupport.InstalledHandler(
'ZEO.zrpc.client')
'ZEO.asyncio.client')
self._storage._rpc_mgr.map[None] = Evil()
try:
self._storage._rpc_mgr.trigger.pull_trigger()
except DisconnectedError:
pass
# We no longer implement the event loop, we we no longer know
# how to break it. We'll just stop it instead for now.
self._storage._server.loop.call_soon_threadsafe(
self._storage._server.loop.stop)
forker.wait_until(
'disconnected',
......@@ -532,62 +452,31 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
log = str(handler)
handler.uninstall()
self.assert_("ZEO client loop failed" in log)
self.assert_("Couldn't close a dispatcher." in log)
self.assert_("Client loop stopped unexpectedly" in log)
def checkExceptionLogsAtError(self):
# Test the exceptions are logged at error
self._storage = self.openClientStorage()
conn = self._storage._connection
# capture logging
log = []
conn.logger.log = (
lambda l, m, *a, **kw: log.append((l,m % a, kw))
)
# This is a deliberately bogus call to get an exception
# logged
self._storage._connection.handle_request(
'foo', 0, 'history', (1, 2, 3, 4))
# test logging
py2_msg = (
'history() raised exception: history() takes at most '
'3 arguments (5 given)'
)
py32_msg = (
'history() raised exception: history() takes at most '
'3 positional arguments (5 given)'
)
py3_msg = (
'history() raised exception: history() takes '
'from 2 to 3 positional arguments but 5 were given'
)
for level, message, kw in log:
if (message.endswith(py2_msg) or
message.endswith(py32_msg) or
message.endswith(py3_msg)):
self.assertEqual(level,logging.ERROR)
self.assertEqual(kw,{'exc_info':True})
break
else:
self.fail("error not in log %s" % log)
# cleanup
del conn.logger.log
self._dostore(z64, data=MinPO("X" * (10 * 128 * 1024)))
from zope.testing.loggingsupport import InstalledHandler
handler = InstalledHandler('ZEO.asyncio.client')
import ZODB.POSException
self.assertRaises(TypeError, self._storage.history, z64, None)
self.assertTrue(" from server: builtins.TypeError" in str(handler))
# POSKeyErrors and ConflictErrors aren't logged:
handler.clear()
self.assertRaises(ZODB.POSException.POSKeyError,
self._storage.history, None, None)
handler.uninstall()
self.assertEquals(str(handler), '')
def checkConnectionInvalidationOnReconnect(self):
storage = ClientStorage(self.addr, wait=1, min_disconnect_poll=0.1)
storage = ClientStorage(self.addr, min_disconnect_poll=0.1)
self._storage = storage
# and we'll wait for the storage to be reconnected:
for i in range(100):
if storage.is_connected():
break
time.sleep(0.1)
else:
raise AssertionError("Couldn't connect to server")
assert storage.is_connected()
class DummyDB:
_invalidatedCache = 0
......@@ -602,11 +491,14 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
base = db._invalidatedCache
# Now we'll force a disconnection and reconnection
storage._connection.close()
storage._server.loop.call_soon_threadsafe(
storage._server.client.protocol.connection_lost,
ValueError('test'))
# and we'll wait for the storage to be reconnected:
for i in range(100):
if storage.is_connected():
if db._invalidatedCache > base:
break
time.sleep(0.1)
else:
......@@ -1023,108 +915,50 @@ transaction, we'll get a result:
def tpc_finish_error():
r"""Server errors in tpc_finish weren't handled properly.
>>> import ZEO.ClientStorage, ZEO.zrpc.connection
If there are errors applying changes to the client cache, don't
leave the cache in an inconsistent state.
>>> class Connection:
... peer_protocol_version = (
... ZEO.zrpc.connection.Connection.current_protocol)
... def __init__(self, client):
... self.client = client
... def get_addr(self):
... return 'server'
... def is_async(self):
... return True
... def register_object(self, ob):
... pass
... def close(self):
... print('connection closed')
... trigger = property(lambda self: self)
... pull_trigger = lambda self, func, *args: func(*args)
>>> class ConnectionManager:
... def __init__(self, addr, client, tmin, tmax):
... self.client = client
... def connect(self, sync=1):
... self.client.notifyConnected(Connection(self.client))
... def close(self):
... pass
>>> class StorageServer:
... should_fail = True
... def __init__(self, conn):
... self.conn = conn
... self.t = None
... def get_info(self):
... return {}
... def endZeoVerify(self):
... self.conn.client.endVerify()
... def lastTransaction(self):
... return b'\0'*8
... def tpc_begin(self, t, *args):
... if self.t is not None:
... raise TypeError('already trans')
... self.t = t
... print('begin', args)
... def vote(self, t):
... if self.t != t:
... raise TypeError('bad trans')
... print('vote')
... def tpc_finish(self, *args):
... if self.should_fail:
... raise TypeError()
... print('finish')
... def tpc_abort(self, t):
... if self.t != t:
... raise TypeError('bad trans')
... self.t = None
... print('abort')
... def iterator_gc(*args):
... pass
>>> class ClientStorage(ZEO.ClientStorage.ClientStorage):
... ConnectionManagerClass = ConnectionManager
... StorageServerStubClass = StorageServer
>>> class Transaction:
... user = 'test'
... description = ''
... _extension = {}
>>> cs = ClientStorage(('', ''))
>>> t1 = Transaction()
>>> cs.tpc_begin(t1)
begin ('test', '', {}, None, ' ')
>>> cs.tpc_vote(t1)
vote
>>> cs.tpc_finish(t1)
>>> addr, admin = start_server()
>>> db = ZEO.DB(addr)
>>> conn = db.open()
>>> conn.root.x = 1
>>> t = conn.transaction_manager.get()
>>> client = conn._storage
>>> client.tpc_begin(t)
>>> conn.commit(t)
>>> _ = client.tpc_vote(t)
Cause some breakage by messing with the clients transaction
buffer, sadly, using implementation details:
>>> tbuf = t.data(client)
>>> tbuf.serials = None
tpc_finish will fail:
>>> client.tpc_finish(t)
Traceback (most recent call last):
...
TypeError
TypeError: 'NoneType' object is not subscriptable
>>> client.tpc_abort(t)
>>> t.abort()
>>> cs.tpc_abort(t1)
abort
But we can still load the saved data:
>>> t2 = Transaction()
>>> cs.tpc_begin(t2)
begin ('test', '', {}, None, ' ')
>>> cs.tpc_vote(t2)
vote
>>> conn2 = db.open()
>>> conn2.root.x
1
If client storage has an internal error after the storage finish
succeeeds, it will close the connection, which will force a
restart and reverification.
And we can save new data:
>>> StorageServer.should_fail = False
>>> cs._update_cache = lambda : None
>>> try: cs.tpc_finish(t2)
... except: pass
... else: print("Should have failed")
finish
connection closed
>>> conn2.root.x += 1
>>> conn2.transaction_manager.commit()
>>> cs.close()
>>> db.close()
>>> stop_server(admin)
"""
def client_has_newer_data_than_server():
......@@ -1156,11 +990,9 @@ def client_has_newer_data_than_server():
>>> wait_until('got enough errors', lambda:
... len([x for x in handler.records
... if x.filename.lower() == 'clientstorage.py' and
... x.funcName == 'verify_cache' and
... x.levelname == 'CRITICAL' and
... x.msg == 'client Client has seen '
... 'newer transactions than server!']) >= 2)
... if x.levelname == 'CRITICAL' and
... 'Client has seen newer transactions than server!' in x.msg
... ]) >= 2)
Note that the errors repeat because the client keeps on trying to connect.
......@@ -1577,7 +1409,7 @@ Now we'll try to use the connection, mainly to wait for everything to
get processed. Before we fixed this by making tpc_finish a synchronous
call to the server. we'd get some sort of error here.
>>> _ = c._storage._server.loadEx(b'\0'*8)
>>> _ = c._storage._call('loadEx', b'\0'*8)
>>> c.close()
......@@ -1681,31 +1513,6 @@ def sync_connect_doesnt_hang():
>>> ZEO.zrpc.client.ConnectThread = ConnectThread
"""
def lp143344_extension_methods_not_lost_on_server_restart():
r"""
Make sure we don't lose exension methods on server restart.
>>> addr, adminaddr = start_server(keep=True)
>>> conn = ZEO.connection(addr)
>>> conn.root.x = 1
>>> transaction.commit()
>>> conn.db().storage.answer_to_the_ultimate_question()
42
>>> stop_server(adminaddr)
>>> wait_until('not connected',
... lambda : not conn.db().storage.is_connected())
>>> _ = start_server(addr=addr)
>>> wait_until('connected', conn.db().storage.is_connected)
>>> conn.root.x
1
>>> conn.db().storage.answer_to_the_ultimate_question()
42
>>> conn.close()
"""
def can_use_empty_string_for_local_host_on_client():
"""We should be able to spell localhost with ''.
......@@ -1725,10 +1532,7 @@ slow_test_classes = [
FileStorageTests, FileStorageHexTests, FileStorageClientHexTests,
]
quick_test_classes = [
FileStorageRecoveryTests, ConfigurationTests, HeartbeatTests,
ZRPCConnectionTests,
]
quick_test_classes = [FileStorageRecoveryTests, ZRPCConnectionTests]
class ServerManagingClientStorage(ClientStorage):
......
......@@ -103,7 +103,7 @@ Now, let's see if we can break it. :)
... path = s2.fshelper.getBlobFilename(*blob_id)
... if os.path.exists(path):
... ZODB.blob.remove_committed(path)
... s2._server.sendBlob(*blob_id)
... s2._call('sendBlob', *blob_id)
... else: print('Dang')
>>> threadf.join()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment