From 98978059d54655d8669f9e687c814849482bc93e Mon Sep 17 00:00:00 2001
From: Tim Peters <tim.one@comcast.net>
Date: Thu, 24 Mar 2005 22:41:24 +0000
Subject: [PATCH] Fixes merged to 3.3 branch, so no need for this branch.

---
 branches/bug1734/COPYING                      |    5 -
 branches/bug1734/COPYRIGHT.txt                |    9 -
 branches/bug1734/LICENSE.txt                  |   54 -
 branches/bug1734/MANIFEST                     |  380 ---
 branches/bug1734/MANIFEST.in                  |    9 -
 branches/bug1734/NEWS.txt                     | 2202 --------------
 branches/bug1734/README.txt                   |  195 --
 branches/bug1734/doc/ACKS                     |   42 -
 branches/bug1734/doc/Makefile                 |   36 -
 branches/bug1734/doc/README.txt               |   47 -
 branches/bug1734/doc/ZConfig/Makefile         |   50 -
 branches/bug1734/doc/ZConfig/README.txt       |   16 -
 branches/bug1734/doc/ZConfig/schema.dtd       |   99 -
 branches/bug1734/doc/ZConfig/xmlmarkup.perl   |   59 -
 branches/bug1734/doc/ZConfig/xmlmarkup.sty    |   38 -
 branches/bug1734/doc/ZConfig/zconfig.pdf      |  Bin 94001 -> 0 bytes
 branches/bug1734/doc/ZConfig/zconfig.tex      | 1856 -----------
 branches/bug1734/doc/ZEO/README.txt           |   21 -
 branches/bug1734/doc/ZEO/ZopeREADME.txt       |   96 -
 branches/bug1734/doc/ZEO/cache.txt            |   67 -
 branches/bug1734/doc/ZEO/howto.txt            |  433 ---
 branches/bug1734/doc/ZEO/trace.txt            |  126 -
 branches/bug1734/doc/guide/README             |    4 -
 branches/bug1734/doc/guide/TODO               |    4 -
 branches/bug1734/doc/guide/admin.tex          |    6 -
 branches/bug1734/doc/guide/chatter.py         |  125 -
 branches/bug1734/doc/guide/gfdl.tex           |  367 ---
 branches/bug1734/doc/guide/indexing.tex       |    5 -
 branches/bug1734/doc/guide/introduction.tex   |  197 --
 branches/bug1734/doc/guide/links.tex          |   21 -
 branches/bug1734/doc/guide/modules.tex        |  471 ---
 branches/bug1734/doc/guide/prog-zodb.tex      |  484 ---
 branches/bug1734/doc/guide/storages.tex       |   22 -
 branches/bug1734/doc/guide/transactions.tex   |  202 --
 branches/bug1734/doc/guide/zeo.tex            |  273 --
 branches/bug1734/doc/guide/zodb.tex           |   32 -
 branches/bug1734/doc/storage.pdf              |  Bin 25330 -> 0 bytes
 branches/bug1734/doc/storage.tex              |  425 ---
 branches/bug1734/doc/zdctl.txt                |  335 --
 branches/bug1734/doc/zodb.pdf                 |  Bin 121233 -> 0 bytes
 branches/bug1734/log.ini                      |   32 -
 branches/bug1734/release.py                   |   83 -
 .../bug1734/releases/ZODB3/DEPENDENCIES.cfg   |    5 -
 branches/bug1734/releases/ZODB3/PACKAGE.cfg   |   24 -
 .../bug1734/releases/ZODB3/PUBLICATION.cfg    |   23 -
 branches/bug1734/releases/ZODB3/SETUP.cfg     |    7 -
 branches/bug1734/setup.py                     |  267 --
 .../bug1734/src/BTrees/BTreeItemsTemplate.c   |  698 -----
 .../bug1734/src/BTrees/BTreeModuleTemplate.c  |  491 ---
 branches/bug1734/src/BTrees/BTreeTemplate.c   | 2078 -------------
 branches/bug1734/src/BTrees/BucketTemplate.c  | 1722 -----------
 branches/bug1734/src/BTrees/DEPENDENCIES.cfg  |    2 -
 branches/bug1734/src/BTrees/IFBTree.py        |   16 -
 branches/bug1734/src/BTrees/IIBTree.py        |   16 -
 branches/bug1734/src/BTrees/IOBTree.py        |   16 -
 branches/bug1734/src/BTrees/Interfaces.py     |  402 ---
 branches/bug1734/src/BTrees/Length.py         |   58 -
 branches/bug1734/src/BTrees/Maintainer.txt    |  374 ---
 branches/bug1734/src/BTrees/MergeTemplate.c   |  331 --
 branches/bug1734/src/BTrees/OIBTree.py        |   16 -
 branches/bug1734/src/BTrees/OOBTree.py        |   16 -
 branches/bug1734/src/BTrees/SETUP.cfg         |  120 -
 branches/bug1734/src/BTrees/SetOpTemplate.c   |  557 ----
 branches/bug1734/src/BTrees/SetTemplate.c     |  362 ---
 branches/bug1734/src/BTrees/TreeSetTemplate.c |  244 --
 branches/bug1734/src/BTrees/_IFBTree.c        |   34 -
 branches/bug1734/src/BTrees/_IIBTree.c        |   34 -
 branches/bug1734/src/BTrees/_IOBTree.c        |   32 -
 branches/bug1734/src/BTrees/_OIBTree.c        |   32 -
 branches/bug1734/src/BTrees/_OOBTree.c        |   32 -
 branches/bug1734/src/BTrees/__init__.py       |    1 -
 branches/bug1734/src/BTrees/_fsBTree.c        |   70 -
 branches/bug1734/src/BTrees/check.py          |  424 ---
 branches/bug1734/src/BTrees/convert.py        |   28 -
 .../bug1734/src/BTrees/floatvaluemacros.h     |   25 -
 branches/bug1734/src/BTrees/intkeymacros.h    |   16 -
 branches/bug1734/src/BTrees/intvaluemacros.h  |   23 -
 branches/bug1734/src/BTrees/objectkeymacros.h |    9 -
 .../bug1734/src/BTrees/objectvaluemacros.h    |   13 -
 branches/bug1734/src/BTrees/sorters.c         |  527 ----
 branches/bug1734/src/BTrees/tests/__init__.py |    1 -
 .../bug1734/src/BTrees/tests/testBTrees.py    | 1483 ---------
 .../src/BTrees/tests/testBTreesUnicode.py     |   76 -
 .../bug1734/src/BTrees/tests/testConflict.py  |  857 ------
 .../bug1734/src/BTrees/tests/testSetOps.py    |  485 ---
 .../src/BTrees/tests/test_btreesubclass.py    |   44 -
 .../bug1734/src/BTrees/tests/test_check.py    |   96 -
 .../bug1734/src/BTrees/tests/test_compare.py  |   74 -
 .../bug1734/src/Persistence/DEPENDENCIES.cfg  |    2 -
 .../bug1734/src/Persistence/_Persistence.c    |  175 --
 branches/bug1734/src/Persistence/__init__.py  |   55 -
 branches/bug1734/src/Persistence/mapping.py   |   37 -
 .../bug1734/src/Persistence/tests/__init__.py |    1 -
 .../src/Persistence/tests/testPersistent.py   |  224 --
 .../Persistence/tests/test_ExtensionClass.py  |  507 ----
 .../src/Persistence/tests/test_mapping.py     |   90 -
 .../bug1734/src/ThreadedAsync/LoopCallback.py |  192 --
 .../bug1734/src/ThreadedAsync/__init__.py     |   19 -
 branches/bug1734/src/ZConfig/NEWS.txt         |   74 -
 branches/bug1734/src/ZConfig/PACKAGE.cfg      |   26 -
 branches/bug1734/src/ZConfig/PUBLICATION.cfg  |   31 -
 branches/bug1734/src/ZConfig/README.txt       |   57 -
 branches/bug1734/src/ZConfig/SETUP.cfg        |    6 -
 branches/bug1734/src/ZConfig/__init__.py      |  128 -
 branches/bug1734/src/ZConfig/cfgparser.py     |  191 --
 branches/bug1734/src/ZConfig/cmdline.py       |  179 --
 .../src/ZConfig/components/__init__.py        |    1 -
 .../src/ZConfig/components/basic/__init__.py  |    1 -
 .../ZConfig/components/basic/component.xml    |    9 -
 .../src/ZConfig/components/basic/mapping.py   |   18 -
 .../src/ZConfig/components/basic/mapping.xml  |   34 -
 .../components/basic/tests/__init__.py        |    1 -
 .../components/basic/tests/test_mapping.py    |   89 -
 .../src/ZConfig/components/logger/__init__.py |   23 -
 .../ZConfig/components/logger/abstract.xml    |    7 -
 .../ZConfig/components/logger/base-logger.xml |   48 -
 .../ZConfig/components/logger/component.xml   |   10 -
 .../ZConfig/components/logger/datatypes.py    |   39 -
 .../ZConfig/components/logger/eventlog.xml    |   15 -
 .../src/ZConfig/components/logger/factory.py  |   36 -
 .../src/ZConfig/components/logger/handlers.py |  177 --
 .../ZConfig/components/logger/handlers.xml    |   75 -
 .../src/ZConfig/components/logger/logger.py   |  102 -
 .../src/ZConfig/components/logger/logger.xml  |   39 -
 .../ZConfig/components/logger/loghandler.py   |   71 -
 .../components/logger/tests/__init__.py       |    1 -
 .../components/logger/tests/test_logger.py    |  233 --
 branches/bug1734/src/ZConfig/datatypes.py     |  400 ---
 branches/bug1734/src/ZConfig/doc/Makefile     |   50 -
 branches/bug1734/src/ZConfig/doc/README.txt   |   16 -
 branches/bug1734/src/ZConfig/doc/schema.dtd   |   99 -
 .../bug1734/src/ZConfig/doc/xmlmarkup.perl    |   59 -
 .../bug1734/src/ZConfig/doc/xmlmarkup.sty     |   38 -
 branches/bug1734/src/ZConfig/doc/zconfig.pdf  |  Bin 94001 -> 0 bytes
 branches/bug1734/src/ZConfig/doc/zconfig.tex  | 1856 -----------
 branches/bug1734/src/ZConfig/info.py          |  514 ----
 branches/bug1734/src/ZConfig/loader.py        |  307 --
 branches/bug1734/src/ZConfig/matcher.py       |  302 --
 branches/bug1734/src/ZConfig/schema.py        |  581 ----
 branches/bug1734/src/ZConfig/scripts/zconfig  |   90 -
 .../src/ZConfig/scripts/zconfig_schema2html   |  103 -
 branches/bug1734/src/ZConfig/substitution.py  |   86 -
 .../bug1734/src/ZConfig/tests/__init__.py     |   17 -
 .../ZConfig/tests/input/base-datatype1.xml    |    4 -
 .../ZConfig/tests/input/base-datatype2.xml    |    3 -
 .../src/ZConfig/tests/input/base-keytype1.xml |    3 -
 .../src/ZConfig/tests/input/base-keytype2.xml |    3 -
 .../bug1734/src/ZConfig/tests/input/base.xml  |    3 -
 .../src/ZConfig/tests/input/include.conf      |    4 -
 .../src/ZConfig/tests/input/inner.conf        |    2 -
 .../src/ZConfig/tests/input/library.xml       |    7 -
 .../src/ZConfig/tests/input/logger.xml        |   12 -
 .../src/ZConfig/tests/input/outer.conf        |    3 -
 .../src/ZConfig/tests/input/simple.conf       |   32 -
 .../src/ZConfig/tests/input/simple.xml        |   29 -
 .../ZConfig/tests/input/simplesections.conf   |   40 -
 .../ZConfig/tests/input/simplesections.xml    |   25 -
 .../src/ZConfig/tests/library/README.txt      |    2 -
 .../src/ZConfig/tests/library/__init__.py     |    1 -
 .../ZConfig/tests/library/thing/__init__.py   |   22 -
 .../ZConfig/tests/library/thing/component.xml |   10 -
 .../tests/library/thing/extras/extras.xml     |    5 -
 .../ZConfig/tests/library/widget/__init__.py  |    1 -
 .../tests/library/widget/component.xml        |    7 -
 .../ZConfig/tests/library/widget/extra.xml    |    5 -
 .../bug1734/src/ZConfig/tests/runtests.bat    |   12 -
 .../bug1734/src/ZConfig/tests/runtests.py     |   64 -
 branches/bug1734/src/ZConfig/tests/support.py |   72 -
 .../src/ZConfig/tests/test_cfgimports.py      |   56 -
 .../bug1734/src/ZConfig/tests/test_cmdline.py |  180 --
 .../bug1734/src/ZConfig/tests/test_config.py  |  182 --
 .../src/ZConfig/tests/test_cookbook.py        |   71 -
 .../src/ZConfig/tests/test_datatypes.py       |  393 ---
 .../bug1734/src/ZConfig/tests/test_loader.py  |  293 --
 .../bug1734/src/ZConfig/tests/test_schema.py  | 1037 -------
 .../bug1734/src/ZConfig/tests/test_subst.py   |   97 -
 branches/bug1734/src/ZConfig/url.py           |   67 -
 branches/bug1734/src/ZEO/ClientStorage.py     | 1141 -------
 branches/bug1734/src/ZEO/ClientStub.py        |   62 -
 branches/bug1734/src/ZEO/CommitLog.py         |   49 -
 branches/bug1734/src/ZEO/DEPENDENCIES.cfg     |    7 -
 branches/bug1734/src/ZEO/DebugServer.py       |   89 -
 branches/bug1734/src/ZEO/Exceptions.py        |   28 -
 branches/bug1734/src/ZEO/README.txt           |   44 -
 branches/bug1734/src/ZEO/SETUP.cfg            |    4 -
 branches/bug1734/src/ZEO/ServerStub.py        |  295 --
 branches/bug1734/src/ZEO/StorageServer.py     | 1015 -------
 branches/bug1734/src/ZEO/TransactionBuffer.py |  148 -
 branches/bug1734/src/ZEO/__init__.py          |   25 -
 branches/bug1734/src/ZEO/auth/__init__.py     |   30 -
 branches/bug1734/src/ZEO/auth/auth_digest.py  |  143 -
 branches/bug1734/src/ZEO/auth/base.py         |  131 -
 branches/bug1734/src/ZEO/auth/hmac.py         |   97 -
 branches/bug1734/src/ZEO/cache.py             | 1071 -------
 branches/bug1734/src/ZEO/component.xml        |   98 -
 branches/bug1734/src/ZEO/mkzeoinst.py         |  245 --
 branches/bug1734/src/ZEO/monitor.py           |  162 -
 branches/bug1734/src/ZEO/runzeo.py            |  276 --
 branches/bug1734/src/ZEO/schema.xml           |   40 -
 branches/bug1734/src/ZEO/simul.py             |  757 -----
 branches/bug1734/src/ZEO/stats.py             |  392 ---
 branches/bug1734/src/ZEO/tests/Cache.py       |  107 -
 .../bug1734/src/ZEO/tests/CommitLockTests.py  |  249 --
 .../bug1734/src/ZEO/tests/ConnectionTests.py  | 1125 -------
 .../src/ZEO/tests/InvalidationTests.py        |  608 ----
 branches/bug1734/src/ZEO/tests/TestThread.py  |   56 -
 branches/bug1734/src/ZEO/tests/ThreadTests.py |  132 -
 branches/bug1734/src/ZEO/tests/__init__.py    |   13 -
 .../bug1734/src/ZEO/tests/auth_plaintext.py   |   55 -
 branches/bug1734/src/ZEO/tests/deadlock.py    |   48 -
 branches/bug1734/src/ZEO/tests/forker.py      |  192 --
 branches/bug1734/src/ZEO/tests/multi.py       |  158 -
 branches/bug1734/src/ZEO/tests/speed.py       |  215 --
 branches/bug1734/src/ZEO/tests/stress.py      |  137 -
 branches/bug1734/src/ZEO/tests/testAuth.py    |  134 -
 .../bug1734/src/ZEO/tests/testConnection.py   |  142 -
 branches/bug1734/src/ZEO/tests/testMonitor.py |   89 -
 .../src/ZEO/tests/testTransactionBuffer.py    |   70 -
 branches/bug1734/src/ZEO/tests/testZEO.py     |  215 --
 .../bug1734/src/ZEO/tests/testZEOOptions.py   |  112 -
 branches/bug1734/src/ZEO/tests/test_cache.py  |  156 -
 branches/bug1734/src/ZEO/tests/zeoserver.py   |  216 --
 branches/bug1734/src/ZEO/util.py              |   56 -
 branches/bug1734/src/ZEO/version.txt          |    1 -
 branches/bug1734/src/ZEO/zeoctl.py            |   20 -
 branches/bug1734/src/ZEO/zeoctl.xml           |   31 -
 branches/bug1734/src/ZEO/zeopasswd.py         |  129 -
 branches/bug1734/src/ZEO/zrpc/__init__.py     |   24 -
 branches/bug1734/src/ZEO/zrpc/_hmac.py        |  104 -
 branches/bug1734/src/ZEO/zrpc/client.py       |  531 ----
 branches/bug1734/src/ZEO/zrpc/connection.py   |  781 -----
 branches/bug1734/src/ZEO/zrpc/error.py        |   27 -
 branches/bug1734/src/ZEO/zrpc/log.py          |   77 -
 branches/bug1734/src/ZEO/zrpc/marshal.py      |   79 -
 branches/bug1734/src/ZEO/zrpc/server.py       |   59 -
 branches/bug1734/src/ZEO/zrpc/smac.py         |  308 --
 branches/bug1734/src/ZEO/zrpc/trigger.py      |  216 --
 branches/bug1734/src/ZODB/ActivityMonitor.py  |  108 -
 branches/bug1734/src/ZODB/BaseStorage.py      |  435 ---
 .../bug1734/src/ZODB/ConflictResolution.py    |  147 -
 branches/bug1734/src/ZODB/Connection.py       |  918 ------
 branches/bug1734/src/ZODB/DB.py               |  785 -----
 branches/bug1734/src/ZODB/DEPENDENCIES.cfg    |    6 -
 branches/bug1734/src/ZODB/DemoStorage.py      |  539 ----
 branches/bug1734/src/ZODB/ExportImport.py     |  155 -
 .../src/ZODB/FileStorage/FileStorage.py       | 2082 -------------
 .../bug1734/src/ZODB/FileStorage/__init__.py  |    4 -
 .../bug1734/src/ZODB/FileStorage/format.py    |  354 ---
 .../bug1734/src/ZODB/FileStorage/fsdump.py    |  132 -
 .../bug1734/src/ZODB/FileStorage/fsoids.py    |  200 --
 .../bug1734/src/ZODB/FileStorage/fspack.py    |  699 -----
 branches/bug1734/src/ZODB/MappingStorage.py   |  149 -
 branches/bug1734/src/ZODB/Mount.py            |  304 --
 branches/bug1734/src/ZODB/POSException.py     |  309 --
 branches/bug1734/src/ZODB/SETUP.cfg           |    5 -
 branches/bug1734/src/ZODB/TmpStore.py         |  126 -
 .../bug1734/src/ZODB/UndoLogCompatible.py     |   29 -
 branches/bug1734/src/ZODB/ZApplication.py     |   88 -
 branches/bug1734/src/ZODB/__init__.py         |   38 -
 branches/bug1734/src/ZODB/broken.py           |  337 --
 branches/bug1734/src/ZODB/collaborations.txt  |  172 --
 branches/bug1734/src/ZODB/component.xml       |  161 -
 branches/bug1734/src/ZODB/config.py           |  177 --
 branches/bug1734/src/ZODB/config.xml          |    7 -
 branches/bug1734/src/ZODB/conversionhack.py   |   34 -
 branches/bug1734/src/ZODB/dbmStorage.py       |  117 -
 branches/bug1734/src/ZODB/fsIndex.py          |  194 --
 branches/bug1734/src/ZODB/fsrecover.py        |  386 ---
 branches/bug1734/src/ZODB/fstools.py          |  151 -
 branches/bug1734/src/ZODB/interfaces.py       |  475 ---
 branches/bug1734/src/ZODB/lock_file.py        |   75 -
 branches/bug1734/src/ZODB/loglevels.py        |   47 -
 branches/bug1734/src/ZODB/serialize.py        |  551 ----
 branches/bug1734/src/ZODB/storage.xml         |    4 -
 branches/bug1734/src/ZODB/subtransactions.txt |   51 -
 .../bug1734/src/ZODB/tests/BasicStorage.py    |  221 --
 .../src/ZODB/tests/ConflictResolution.py      |  183 --
 branches/bug1734/src/ZODB/tests/Corruption.py |   79 -
 .../bug1734/src/ZODB/tests/HistoryStorage.py  |  230 --
 .../bug1734/src/ZODB/tests/IteratorStorage.py |  233 --
 .../bug1734/src/ZODB/tests/LocalStorage.py    |   27 -
 branches/bug1734/src/ZODB/tests/MTStorage.py  |  226 --
 branches/bug1734/src/ZODB/tests/MinPO.py      |   26 -
 .../bug1734/src/ZODB/tests/PackableStorage.py |  700 -----
 .../src/ZODB/tests/PersistentStorage.py       |   55 -
 .../bug1734/src/ZODB/tests/ReadOnlyStorage.py |   64 -
 .../bug1734/src/ZODB/tests/RecoveryStorage.py |  288 --
 .../bug1734/src/ZODB/tests/RevisionStorage.py |  175 --
 .../bug1734/src/ZODB/tests/StorageTestBase.py |  242 --
 .../bug1734/src/ZODB/tests/Synchronization.py |  145 -
 .../ZODB/tests/TransactionalUndoStorage.py    |  726 -----
 .../tests/TransactionalUndoVersionStorage.py  |  198 --
 .../bug1734/src/ZODB/tests/VersionStorage.py  |  545 ----
 branches/bug1734/src/ZODB/tests/__init__.py   |    1 -
 branches/bug1734/src/ZODB/tests/dangle.py     |   65 -
 branches/bug1734/src/ZODB/tests/dbopen.txt    |  278 --
 branches/bug1734/src/ZODB/tests/multidb.txt   |  146 -
 branches/bug1734/src/ZODB/tests/sampledm.py   |  412 ---
 branches/bug1734/src/ZODB/tests/speed.py      |  125 -
 .../src/ZODB/tests/testActivityMonitor.py     |  107 -
 branches/bug1734/src/ZODB/tests/testBroken.py |   91 -
 branches/bug1734/src/ZODB/tests/testCache.py  |  426 ---
 branches/bug1734/src/ZODB/tests/testConfig.py |  124 -
 .../bug1734/src/ZODB/tests/testConnection.py  |  660 ----
 branches/bug1734/src/ZODB/tests/testDB.py     |  143 -
 .../bug1734/src/ZODB/tests/testDemoStorage.py |   65 -
 .../bug1734/src/ZODB/tests/testFileStorage.py |  498 ---
 .../src/ZODB/tests/testMappingStorage.py      |   47 -
 .../src/ZODB/tests/testPersistentList.py      |  223 --
 .../src/ZODB/tests/testPersistentMapping.py   |  108 -
 .../bug1734/src/ZODB/tests/testRecover.py     |  185 --
 .../bug1734/src/ZODB/tests/testSerialize.py   |  124 -
 .../src/ZODB/tests/testSubTransaction.py      |  138 -
 .../bug1734/src/ZODB/tests/testTimeStamp.py   |  144 -
 branches/bug1734/src/ZODB/tests/testUtils.py  |   97 -
 branches/bug1734/src/ZODB/tests/testZODB.py   |  646 ----
 branches/bug1734/src/ZODB/tests/test_cache.py |  209 --
 .../src/ZODB/tests/test_datamanageradapter.py |  808 -----
 .../src/ZODB/tests/test_doctest_files.py      |   20 -
 .../bug1734/src/ZODB/tests/test_fsdump.py     |   78 -
 .../bug1734/src/ZODB/tests/test_storage.py    |  164 -
 .../bug1734/src/ZODB/tests/testfsIndex.py     |  176 --
 branches/bug1734/src/ZODB/tests/testfsoids.py |  177 --
 branches/bug1734/src/ZODB/tests/testmvcc.py   |  364 ---
 branches/bug1734/src/ZODB/tests/util.py       |   40 -
 branches/bug1734/src/ZODB/tests/warnhook.py   |   57 -
 branches/bug1734/src/ZODB/transact.py         |   58 -
 branches/bug1734/src/ZODB/utils.py            |  300 --
 branches/bug1734/src/ZODB/winlock.c           |  105 -
 branches/bug1734/src/ZopeUndo/Prefix.py       |   39 -
 branches/bug1734/src/ZopeUndo/__init__.py     |   13 -
 .../bug1734/src/ZopeUndo/tests/__init__.py    |   13 -
 .../bug1734/src/ZopeUndo/tests/testPrefix.py  |   36 -
 .../bug1734/src/persistent/DEPENDENCIES.cfg   |    3 -
 branches/bug1734/src/persistent/README.txt    |   19 -
 branches/bug1734/src/persistent/SETUP.cfg     |   33 -
 branches/bug1734/src/persistent/TimeStamp.c   |  437 ---
 branches/bug1734/src/persistent/__init__.py   |   34 -
 .../bug1734/src/persistent/cPersistence.c     | 1210 --------
 .../bug1734/src/persistent/cPersistence.h     |  128 -
 .../bug1734/src/persistent/cPickleCache.c     | 1127 -------
 branches/bug1734/src/persistent/dict.py       |   77 -
 branches/bug1734/src/persistent/interfaces.py |  297 --
 branches/bug1734/src/persistent/list.py       |   96 -
 branches/bug1734/src/persistent/mapping.py    |  104 -
 branches/bug1734/src/persistent/ring.c        |   61 -
 branches/bug1734/src/persistent/ring.h        |   66 -
 .../bug1734/src/persistent/tests/__init__.py  |    1 -
 .../src/persistent/tests/persistent.txt       |  448 ---
 .../persistent/tests/persistenttestbase.py    |  372 ---
 .../src/persistent/tests/testPersistent.py    |  259 --
 .../src/persistent/tests/test_PickleCache.py  |   52 -
 .../bug1734/src/persistent/tests/test_list.py |  229 --
 .../persistent/tests/test_overriding_attrs.py |  402 ---
 .../src/persistent/tests/test_persistent.py   |   26 -
 .../src/persistent/tests/test_pickle.py       |  280 --
 .../bug1734/src/persistent/tests/test_wref.py |   24 -
 branches/bug1734/src/persistent/wref.py       |  300 --
 branches/bug1734/src/scripts/README.txt       |  155 -
 branches/bug1734/src/scripts/analyze.py       |  135 -
 branches/bug1734/src/scripts/checkbtrees.py   |  122 -
 branches/bug1734/src/scripts/fsdump.py        |    9 -
 branches/bug1734/src/scripts/fsoids.py        |   78 -
 branches/bug1734/src/scripts/fsrefs.py        |  157 -
 branches/bug1734/src/scripts/fsstats.py       |  199 --
 branches/bug1734/src/scripts/fstail.py        |   49 -
 branches/bug1734/src/scripts/fstest.py        |  227 --
 .../src/scripts/manual_tests/test-checker.fs  |  Bin 802 -> 0 bytes
 .../src/scripts/manual_tests/testfstest.py    |  181 --
 .../src/scripts/manual_tests/testrepozo.py    |  151 -
 .../src/scripts/manual_tests/testzeopack.py   |  115 -
 branches/bug1734/src/scripts/migrate.py       |  372 ---
 branches/bug1734/src/scripts/netspace.py      |  120 -
 branches/bug1734/src/scripts/parsezeolog.py   |  135 -
 branches/bug1734/src/scripts/repozo.py        |  517 ----
 branches/bug1734/src/scripts/space.py         |   60 -
 branches/bug1734/src/scripts/timeout.py       |   68 -
 branches/bug1734/src/scripts/zeopack.py       |  123 -
 branches/bug1734/src/scripts/zeoqueue.py      |  401 ---
 branches/bug1734/src/scripts/zeoreplay.py     |  315 --
 branches/bug1734/src/scripts/zeoserverlog.py  |  538 ----
 branches/bug1734/src/scripts/zeoup.py         |  137 -
 branches/bug1734/src/scripts/zodbload.py      |  842 -----
 .../bug1734/src/transaction/DEPENDENCIES.cfg  |    1 -
 branches/bug1734/src/transaction/README.txt   |   14 -
 branches/bug1734/src/transaction/__init__.py  |   39 -
 branches/bug1734/src/transaction/_manager.py  |  112 -
 .../bug1734/src/transaction/_transaction.py   |  632 ----
 .../bug1734/src/transaction/interfaces.py     |  263 --
 branches/bug1734/src/transaction/notes.txt    |  269 --
 .../bug1734/src/transaction/tests/__init__.py |    1 -
 .../transaction/tests/abstestIDataManager.py  |   63 -
 .../tests/test_SampleDataManager.py           |  412 ---
 .../tests/test_SampleResourceManager.py       |  435 ---
 .../transaction/tests/test_register_compat.py |  154 -
 .../src/transaction/tests/test_transaction.py |  646 ----
 .../src/transaction/tests/test_util.py        |   25 -
 branches/bug1734/src/transaction/util.py      |   51 -
 branches/bug1734/src/zdaemon/DEPENDENCIES.cfg |    1 -
 branches/bug1734/src/zdaemon/SETUP.cfg        |    2 -
 branches/bug1734/src/zdaemon/__init__.py      |   14 -
 branches/bug1734/src/zdaemon/component.xml    |  275 --
 branches/bug1734/src/zdaemon/sample.conf      |   24 -
 branches/bug1734/src/zdaemon/schema.xml       |   26 -
 .../bug1734/src/zdaemon/tests/__init__.py     |    1 -
 .../bug1734/src/zdaemon/tests/donothing.sh    |    6 -
 branches/bug1734/src/zdaemon/tests/nokill.py  |    8 -
 branches/bug1734/src/zdaemon/tests/parent.py  |   32 -
 .../src/zdaemon/tests/testzdoptions.py        |  294 --
 .../bug1734/src/zdaemon/tests/testzdrun.py    |  299 --
 branches/bug1734/src/zdaemon/zdctl.py         |  584 ----
 branches/bug1734/src/zdaemon/zdoptions.py     |  411 ---
 branches/bug1734/src/zdaemon/zdrun.py         |  719 -----
 branches/bug1734/src/zope/__init__.py         |   15 -
 .../src/zope/interface/DEPENDENCIES.cfg       |    1 -
 .../src/zope/interface/PUBLICATION.cfg        |    8 -
 .../bug1734/src/zope/interface/README.txt     |  697 -----
 branches/bug1734/src/zope/interface/SETUP.cfg |    5 -
 .../bug1734/src/zope/interface/__init__.py    |   80 -
 .../bug1734/src/zope/interface/_flatten.py    |   37 -
 .../_zope_interface_coptimizations.c          |  553 ----
 .../bug1734/src/zope/interface/adapter.py     |  732 -----
 .../bug1734/src/zope/interface/adapter.txt    |  505 ---
 branches/bug1734/src/zope/interface/advice.py |  192 --
 .../src/zope/interface/common/__init__.py     |    2 -
 .../src/zope/interface/common/idatetime.py    |  577 ----
 .../src/zope/interface/common/interfaces.py   |   98 -
 .../src/zope/interface/common/mapping.py      |  127 -
 .../src/zope/interface/common/sequence.py     |  129 -
 .../zope/interface/common/tests/__init__.py   |    2 -
 .../interface/common/tests/basemapping.py     |  115 -
 .../interface/common/tests/test_idatetime.py  |   49 -
 .../src/zope/interface/declarations.py        | 1388 ---------
 .../bug1734/src/zope/interface/document.py    |  121 -
 .../bug1734/src/zope/interface/exceptions.py  |   69 -
 branches/bug1734/src/zope/interface/human.txt |  152 -
 .../bug1734/src/zope/interface/interface.py   |  942 ------
 .../bug1734/src/zope/interface/interfaces.py  |  681 -----
 branches/bug1734/src/zope/interface/ro.py     |   63 -
 .../src/zope/interface/tests/__init__.py      |    2 -
 .../bug1734/src/zope/interface/tests/dummy.py |   25 -
 .../zope/interface/tests/foodforthought.txt   |   61 -
 .../bug1734/src/zope/interface/tests/ifoo.py  |   28 -
 .../bug1734/src/zope/interface/tests/m1.py    |   23 -
 .../bug1734/src/zope/interface/tests/m2.py    |   17 -
 .../bug1734/src/zope/interface/tests/odd.py   |  129 -
 .../src/zope/interface/tests/test_adapter.py  |  297 --
 .../src/zope/interface/tests/test_advice.py   |  177 --
 .../zope/interface/tests/test_declarations.py |  366 ---
 .../src/zope/interface/tests/test_document.py |   71 -
 .../src/zope/interface/tests/test_element.py  |   44 -
 .../zope/interface/tests/test_interface.py    |  296 --
 .../interface/tests/test_odd_declarations.py  |  204 --
 .../src/zope/interface/tests/test_sorting.py  |   49 -
 .../src/zope/interface/tests/test_verify.py   |  196 --
 .../src/zope/interface/tests/unitfixtures.py  |  142 -
 branches/bug1734/src/zope/interface/verify.py |  111 -
 .../bug1734/src/zope/proxy/DEPENDENCIES.cfg   |    2 -
 branches/bug1734/src/zope/proxy/SETUP.cfg     |    8 -
 branches/bug1734/src/zope/proxy/__init__.py   |   31 -
 .../src/zope/proxy/_zope_proxy_proxy.c        | 1098 -------
 branches/bug1734/src/zope/proxy/interfaces.py |   62 -
 branches/bug1734/src/zope/proxy/proxy.h       |   54 -
 .../bug1734/src/zope/proxy/tests/__init__.py  |    2 -
 .../src/zope/proxy/tests/test_proxy.py        |  565 ----
 .../bug1734/src/zope/testing/DEPENDENCIES.cfg |    1 -
 branches/bug1734/src/zope/testing/__init__.py |   30 -
 branches/bug1734/src/zope/testing/cleanup.py  |   65 -
 branches/bug1734/src/zope/testing/doctest.py  | 2704 -----------------
 .../bug1734/src/zope/testing/doctestunit.py   |   33 -
 .../bug1734/src/zope/testing/formparser.py    |  212 --
 .../bug1734/src/zope/testing/formparser.txt   |  130 -
 .../src/zope/testing/loggingsupport.py        |  122 -
 .../bug1734/src/zope/testing/loghandler.py    |   77 -
 branches/bug1734/src/zope/testing/module.py   |   36 -
 branches/bug1734/src/zope/testing/tests.py    |   30 -
 branches/bug1734/test.py                      |  875 ------
 477 files changed, 99741 deletions(-)
 delete mode 100644 branches/bug1734/COPYING
 delete mode 100644 branches/bug1734/COPYRIGHT.txt
 delete mode 100644 branches/bug1734/LICENSE.txt
 delete mode 100644 branches/bug1734/MANIFEST
 delete mode 100644 branches/bug1734/MANIFEST.in
 delete mode 100644 branches/bug1734/NEWS.txt
 delete mode 100644 branches/bug1734/README.txt
 delete mode 100644 branches/bug1734/doc/ACKS
 delete mode 100644 branches/bug1734/doc/Makefile
 delete mode 100644 branches/bug1734/doc/README.txt
 delete mode 100644 branches/bug1734/doc/ZConfig/Makefile
 delete mode 100644 branches/bug1734/doc/ZConfig/README.txt
 delete mode 100644 branches/bug1734/doc/ZConfig/schema.dtd
 delete mode 100644 branches/bug1734/doc/ZConfig/xmlmarkup.perl
 delete mode 100644 branches/bug1734/doc/ZConfig/xmlmarkup.sty
 delete mode 100644 branches/bug1734/doc/ZConfig/zconfig.pdf
 delete mode 100644 branches/bug1734/doc/ZConfig/zconfig.tex
 delete mode 100644 branches/bug1734/doc/ZEO/README.txt
 delete mode 100644 branches/bug1734/doc/ZEO/ZopeREADME.txt
 delete mode 100644 branches/bug1734/doc/ZEO/cache.txt
 delete mode 100644 branches/bug1734/doc/ZEO/howto.txt
 delete mode 100644 branches/bug1734/doc/ZEO/trace.txt
 delete mode 100644 branches/bug1734/doc/guide/README
 delete mode 100644 branches/bug1734/doc/guide/TODO
 delete mode 100644 branches/bug1734/doc/guide/admin.tex
 delete mode 100644 branches/bug1734/doc/guide/chatter.py
 delete mode 100644 branches/bug1734/doc/guide/gfdl.tex
 delete mode 100644 branches/bug1734/doc/guide/indexing.tex
 delete mode 100644 branches/bug1734/doc/guide/introduction.tex
 delete mode 100644 branches/bug1734/doc/guide/links.tex
 delete mode 100644 branches/bug1734/doc/guide/modules.tex
 delete mode 100644 branches/bug1734/doc/guide/prog-zodb.tex
 delete mode 100644 branches/bug1734/doc/guide/storages.tex
 delete mode 100644 branches/bug1734/doc/guide/transactions.tex
 delete mode 100644 branches/bug1734/doc/guide/zeo.tex
 delete mode 100644 branches/bug1734/doc/guide/zodb.tex
 delete mode 100644 branches/bug1734/doc/storage.pdf
 delete mode 100644 branches/bug1734/doc/storage.tex
 delete mode 100644 branches/bug1734/doc/zdctl.txt
 delete mode 100644 branches/bug1734/doc/zodb.pdf
 delete mode 100644 branches/bug1734/log.ini
 delete mode 100644 branches/bug1734/release.py
 delete mode 100644 branches/bug1734/releases/ZODB3/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/releases/ZODB3/PACKAGE.cfg
 delete mode 100644 branches/bug1734/releases/ZODB3/PUBLICATION.cfg
 delete mode 100644 branches/bug1734/releases/ZODB3/SETUP.cfg
 delete mode 100644 branches/bug1734/setup.py
 delete mode 100644 branches/bug1734/src/BTrees/BTreeItemsTemplate.c
 delete mode 100755 branches/bug1734/src/BTrees/BTreeModuleTemplate.c
 delete mode 100755 branches/bug1734/src/BTrees/BTreeTemplate.c
 delete mode 100755 branches/bug1734/src/BTrees/BucketTemplate.c
 delete mode 100644 branches/bug1734/src/BTrees/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/BTrees/IFBTree.py
 delete mode 100644 branches/bug1734/src/BTrees/IIBTree.py
 delete mode 100644 branches/bug1734/src/BTrees/IOBTree.py
 delete mode 100644 branches/bug1734/src/BTrees/Interfaces.py
 delete mode 100644 branches/bug1734/src/BTrees/Length.py
 delete mode 100644 branches/bug1734/src/BTrees/Maintainer.txt
 delete mode 100644 branches/bug1734/src/BTrees/MergeTemplate.c
 delete mode 100644 branches/bug1734/src/BTrees/OIBTree.py
 delete mode 100644 branches/bug1734/src/BTrees/OOBTree.py
 delete mode 100644 branches/bug1734/src/BTrees/SETUP.cfg
 delete mode 100644 branches/bug1734/src/BTrees/SetOpTemplate.c
 delete mode 100644 branches/bug1734/src/BTrees/SetTemplate.c
 delete mode 100644 branches/bug1734/src/BTrees/TreeSetTemplate.c
 delete mode 100644 branches/bug1734/src/BTrees/_IFBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/_IIBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/_IOBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/_OIBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/_OOBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/__init__.py
 delete mode 100644 branches/bug1734/src/BTrees/_fsBTree.c
 delete mode 100644 branches/bug1734/src/BTrees/check.py
 delete mode 100644 branches/bug1734/src/BTrees/convert.py
 delete mode 100644 branches/bug1734/src/BTrees/floatvaluemacros.h
 delete mode 100644 branches/bug1734/src/BTrees/intkeymacros.h
 delete mode 100644 branches/bug1734/src/BTrees/intvaluemacros.h
 delete mode 100644 branches/bug1734/src/BTrees/objectkeymacros.h
 delete mode 100644 branches/bug1734/src/BTrees/objectvaluemacros.h
 delete mode 100644 branches/bug1734/src/BTrees/sorters.c
 delete mode 100644 branches/bug1734/src/BTrees/tests/__init__.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/testBTrees.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/testBTreesUnicode.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/testConflict.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/testSetOps.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/test_btreesubclass.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/test_check.py
 delete mode 100644 branches/bug1734/src/BTrees/tests/test_compare.py
 delete mode 100644 branches/bug1734/src/Persistence/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/Persistence/_Persistence.c
 delete mode 100644 branches/bug1734/src/Persistence/__init__.py
 delete mode 100644 branches/bug1734/src/Persistence/mapping.py
 delete mode 100644 branches/bug1734/src/Persistence/tests/__init__.py
 delete mode 100644 branches/bug1734/src/Persistence/tests/testPersistent.py
 delete mode 100644 branches/bug1734/src/Persistence/tests/test_ExtensionClass.py
 delete mode 100644 branches/bug1734/src/Persistence/tests/test_mapping.py
 delete mode 100644 branches/bug1734/src/ThreadedAsync/LoopCallback.py
 delete mode 100644 branches/bug1734/src/ThreadedAsync/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/NEWS.txt
 delete mode 100644 branches/bug1734/src/ZConfig/PACKAGE.cfg
 delete mode 100644 branches/bug1734/src/ZConfig/PUBLICATION.cfg
 delete mode 100644 branches/bug1734/src/ZConfig/README.txt
 delete mode 100644 branches/bug1734/src/ZConfig/SETUP.cfg
 delete mode 100644 branches/bug1734/src/ZConfig/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/cfgparser.py
 delete mode 100644 branches/bug1734/src/ZConfig/cmdline.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/component.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/mapping.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/mapping.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/tests/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/basic/tests/test_mapping.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/abstract.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/base-logger.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/component.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/datatypes.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/eventlog.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/factory.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/handlers.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/handlers.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/logger.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/logger.xml
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/loghandler.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/tests/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/components/logger/tests/test_logger.py
 delete mode 100644 branches/bug1734/src/ZConfig/datatypes.py
 delete mode 100644 branches/bug1734/src/ZConfig/doc/Makefile
 delete mode 100644 branches/bug1734/src/ZConfig/doc/README.txt
 delete mode 100644 branches/bug1734/src/ZConfig/doc/schema.dtd
 delete mode 100644 branches/bug1734/src/ZConfig/doc/xmlmarkup.perl
 delete mode 100644 branches/bug1734/src/ZConfig/doc/xmlmarkup.sty
 delete mode 100644 branches/bug1734/src/ZConfig/doc/zconfig.pdf
 delete mode 100644 branches/bug1734/src/ZConfig/doc/zconfig.tex
 delete mode 100644 branches/bug1734/src/ZConfig/info.py
 delete mode 100644 branches/bug1734/src/ZConfig/loader.py
 delete mode 100644 branches/bug1734/src/ZConfig/matcher.py
 delete mode 100644 branches/bug1734/src/ZConfig/schema.py
 delete mode 100755 branches/bug1734/src/ZConfig/scripts/zconfig
 delete mode 100755 branches/bug1734/src/ZConfig/scripts/zconfig_schema2html
 delete mode 100644 branches/bug1734/src/ZConfig/substitution.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/base-datatype1.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/base-datatype2.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/base-keytype1.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/base-keytype2.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/base.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/include.conf
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/inner.conf
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/library.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/logger.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/outer.conf
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/simple.conf
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/simple.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/simplesections.conf
 delete mode 100644 branches/bug1734/src/ZConfig/tests/input/simplesections.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/README.txt
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/thing/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/thing/component.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/thing/extras/extras.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/widget/__init__.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/widget/component.xml
 delete mode 100644 branches/bug1734/src/ZConfig/tests/library/widget/extra.xml
 delete mode 100755 branches/bug1734/src/ZConfig/tests/runtests.bat
 delete mode 100755 branches/bug1734/src/ZConfig/tests/runtests.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/support.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_cfgimports.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_cmdline.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_config.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_cookbook.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_datatypes.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_loader.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_schema.py
 delete mode 100644 branches/bug1734/src/ZConfig/tests/test_subst.py
 delete mode 100644 branches/bug1734/src/ZConfig/url.py
 delete mode 100644 branches/bug1734/src/ZEO/ClientStorage.py
 delete mode 100644 branches/bug1734/src/ZEO/ClientStub.py
 delete mode 100644 branches/bug1734/src/ZEO/CommitLog.py
 delete mode 100644 branches/bug1734/src/ZEO/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/ZEO/DebugServer.py
 delete mode 100644 branches/bug1734/src/ZEO/Exceptions.py
 delete mode 100644 branches/bug1734/src/ZEO/README.txt
 delete mode 100644 branches/bug1734/src/ZEO/SETUP.cfg
 delete mode 100644 branches/bug1734/src/ZEO/ServerStub.py
 delete mode 100644 branches/bug1734/src/ZEO/StorageServer.py
 delete mode 100644 branches/bug1734/src/ZEO/TransactionBuffer.py
 delete mode 100644 branches/bug1734/src/ZEO/__init__.py
 delete mode 100644 branches/bug1734/src/ZEO/auth/__init__.py
 delete mode 100644 branches/bug1734/src/ZEO/auth/auth_digest.py
 delete mode 100644 branches/bug1734/src/ZEO/auth/base.py
 delete mode 100644 branches/bug1734/src/ZEO/auth/hmac.py
 delete mode 100644 branches/bug1734/src/ZEO/cache.py
 delete mode 100644 branches/bug1734/src/ZEO/component.xml
 delete mode 100755 branches/bug1734/src/ZEO/mkzeoinst.py
 delete mode 100644 branches/bug1734/src/ZEO/monitor.py
 delete mode 100644 branches/bug1734/src/ZEO/runzeo.py
 delete mode 100644 branches/bug1734/src/ZEO/schema.xml
 delete mode 100644 branches/bug1734/src/ZEO/simul.py
 delete mode 100755 branches/bug1734/src/ZEO/stats.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/Cache.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/CommitLockTests.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/ConnectionTests.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/InvalidationTests.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/TestThread.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/ThreadTests.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/__init__.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/auth_plaintext.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/deadlock.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/forker.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/multi.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/speed.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/stress.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testAuth.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testConnection.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testMonitor.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testTransactionBuffer.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testZEO.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/testZEOOptions.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/test_cache.py
 delete mode 100644 branches/bug1734/src/ZEO/tests/zeoserver.py
 delete mode 100644 branches/bug1734/src/ZEO/util.py
 delete mode 100644 branches/bug1734/src/ZEO/version.txt
 delete mode 100644 branches/bug1734/src/ZEO/zeoctl.py
 delete mode 100644 branches/bug1734/src/ZEO/zeoctl.xml
 delete mode 100644 branches/bug1734/src/ZEO/zeopasswd.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/__init__.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/_hmac.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/client.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/connection.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/error.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/log.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/marshal.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/server.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/smac.py
 delete mode 100644 branches/bug1734/src/ZEO/zrpc/trigger.py
 delete mode 100644 branches/bug1734/src/ZODB/ActivityMonitor.py
 delete mode 100644 branches/bug1734/src/ZODB/BaseStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/ConflictResolution.py
 delete mode 100644 branches/bug1734/src/ZODB/Connection.py
 delete mode 100644 branches/bug1734/src/ZODB/DB.py
 delete mode 100644 branches/bug1734/src/ZODB/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/ZODB/DemoStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/ExportImport.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/FileStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/__init__.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/format.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/fsdump.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/fsoids.py
 delete mode 100644 branches/bug1734/src/ZODB/FileStorage/fspack.py
 delete mode 100644 branches/bug1734/src/ZODB/MappingStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/Mount.py
 delete mode 100644 branches/bug1734/src/ZODB/POSException.py
 delete mode 100644 branches/bug1734/src/ZODB/SETUP.cfg
 delete mode 100644 branches/bug1734/src/ZODB/TmpStore.py
 delete mode 100644 branches/bug1734/src/ZODB/UndoLogCompatible.py
 delete mode 100644 branches/bug1734/src/ZODB/ZApplication.py
 delete mode 100644 branches/bug1734/src/ZODB/__init__.py
 delete mode 100644 branches/bug1734/src/ZODB/broken.py
 delete mode 100644 branches/bug1734/src/ZODB/collaborations.txt
 delete mode 100644 branches/bug1734/src/ZODB/component.xml
 delete mode 100644 branches/bug1734/src/ZODB/config.py
 delete mode 100644 branches/bug1734/src/ZODB/config.xml
 delete mode 100644 branches/bug1734/src/ZODB/conversionhack.py
 delete mode 100644 branches/bug1734/src/ZODB/dbmStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/fsIndex.py
 delete mode 100644 branches/bug1734/src/ZODB/fsrecover.py
 delete mode 100644 branches/bug1734/src/ZODB/fstools.py
 delete mode 100644 branches/bug1734/src/ZODB/interfaces.py
 delete mode 100644 branches/bug1734/src/ZODB/lock_file.py
 delete mode 100644 branches/bug1734/src/ZODB/loglevels.py
 delete mode 100644 branches/bug1734/src/ZODB/serialize.py
 delete mode 100644 branches/bug1734/src/ZODB/storage.xml
 delete mode 100644 branches/bug1734/src/ZODB/subtransactions.txt
 delete mode 100644 branches/bug1734/src/ZODB/tests/BasicStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/ConflictResolution.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/Corruption.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/HistoryStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/IteratorStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/LocalStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/MTStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/MinPO.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/PackableStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/PersistentStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/ReadOnlyStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/RecoveryStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/RevisionStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/StorageTestBase.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/Synchronization.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/TransactionalUndoStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/TransactionalUndoVersionStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/VersionStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/__init__.py
 delete mode 100755 branches/bug1734/src/ZODB/tests/dangle.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/dbopen.txt
 delete mode 100644 branches/bug1734/src/ZODB/tests/multidb.txt
 delete mode 100644 branches/bug1734/src/ZODB/tests/sampledm.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/speed.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testActivityMonitor.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testBroken.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testCache.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testConfig.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testConnection.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testDB.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testDemoStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testFileStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testMappingStorage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testPersistentList.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testPersistentMapping.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testRecover.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testSerialize.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testSubTransaction.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testTimeStamp.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testUtils.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testZODB.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/test_cache.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/test_datamanageradapter.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/test_doctest_files.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/test_fsdump.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/test_storage.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testfsIndex.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testfsoids.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/testmvcc.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/util.py
 delete mode 100644 branches/bug1734/src/ZODB/tests/warnhook.py
 delete mode 100644 branches/bug1734/src/ZODB/transact.py
 delete mode 100644 branches/bug1734/src/ZODB/utils.py
 delete mode 100755 branches/bug1734/src/ZODB/winlock.c
 delete mode 100644 branches/bug1734/src/ZopeUndo/Prefix.py
 delete mode 100644 branches/bug1734/src/ZopeUndo/__init__.py
 delete mode 100644 branches/bug1734/src/ZopeUndo/tests/__init__.py
 delete mode 100644 branches/bug1734/src/ZopeUndo/tests/testPrefix.py
 delete mode 100644 branches/bug1734/src/persistent/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/persistent/README.txt
 delete mode 100644 branches/bug1734/src/persistent/SETUP.cfg
 delete mode 100644 branches/bug1734/src/persistent/TimeStamp.c
 delete mode 100644 branches/bug1734/src/persistent/__init__.py
 delete mode 100644 branches/bug1734/src/persistent/cPersistence.c
 delete mode 100644 branches/bug1734/src/persistent/cPersistence.h
 delete mode 100644 branches/bug1734/src/persistent/cPickleCache.c
 delete mode 100644 branches/bug1734/src/persistent/dict.py
 delete mode 100644 branches/bug1734/src/persistent/interfaces.py
 delete mode 100644 branches/bug1734/src/persistent/list.py
 delete mode 100644 branches/bug1734/src/persistent/mapping.py
 delete mode 100644 branches/bug1734/src/persistent/ring.c
 delete mode 100644 branches/bug1734/src/persistent/ring.h
 delete mode 100644 branches/bug1734/src/persistent/tests/__init__.py
 delete mode 100644 branches/bug1734/src/persistent/tests/persistent.txt
 delete mode 100644 branches/bug1734/src/persistent/tests/persistenttestbase.py
 delete mode 100644 branches/bug1734/src/persistent/tests/testPersistent.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_PickleCache.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_list.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_overriding_attrs.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_persistent.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_pickle.py
 delete mode 100644 branches/bug1734/src/persistent/tests/test_wref.py
 delete mode 100644 branches/bug1734/src/persistent/wref.py
 delete mode 100644 branches/bug1734/src/scripts/README.txt
 delete mode 100755 branches/bug1734/src/scripts/analyze.py
 delete mode 100755 branches/bug1734/src/scripts/checkbtrees.py
 delete mode 100644 branches/bug1734/src/scripts/fsdump.py
 delete mode 100644 branches/bug1734/src/scripts/fsoids.py
 delete mode 100644 branches/bug1734/src/scripts/fsrefs.py
 delete mode 100755 branches/bug1734/src/scripts/fsstats.py
 delete mode 100644 branches/bug1734/src/scripts/fstail.py
 delete mode 100644 branches/bug1734/src/scripts/fstest.py
 delete mode 100644 branches/bug1734/src/scripts/manual_tests/test-checker.fs
 delete mode 100644 branches/bug1734/src/scripts/manual_tests/testfstest.py
 delete mode 100644 branches/bug1734/src/scripts/manual_tests/testrepozo.py
 delete mode 100644 branches/bug1734/src/scripts/manual_tests/testzeopack.py
 delete mode 100755 branches/bug1734/src/scripts/migrate.py
 delete mode 100644 branches/bug1734/src/scripts/netspace.py
 delete mode 100644 branches/bug1734/src/scripts/parsezeolog.py
 delete mode 100755 branches/bug1734/src/scripts/repozo.py
 delete mode 100644 branches/bug1734/src/scripts/space.py
 delete mode 100755 branches/bug1734/src/scripts/timeout.py
 delete mode 100755 branches/bug1734/src/scripts/zeopack.py
 delete mode 100755 branches/bug1734/src/scripts/zeoqueue.py
 delete mode 100644 branches/bug1734/src/scripts/zeoreplay.py
 delete mode 100644 branches/bug1734/src/scripts/zeoserverlog.py
 delete mode 100755 branches/bug1734/src/scripts/zeoup.py
 delete mode 100644 branches/bug1734/src/scripts/zodbload.py
 delete mode 100644 branches/bug1734/src/transaction/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/transaction/README.txt
 delete mode 100644 branches/bug1734/src/transaction/__init__.py
 delete mode 100644 branches/bug1734/src/transaction/_manager.py
 delete mode 100644 branches/bug1734/src/transaction/_transaction.py
 delete mode 100644 branches/bug1734/src/transaction/interfaces.py
 delete mode 100644 branches/bug1734/src/transaction/notes.txt
 delete mode 100644 branches/bug1734/src/transaction/tests/__init__.py
 delete mode 100644 branches/bug1734/src/transaction/tests/abstestIDataManager.py
 delete mode 100644 branches/bug1734/src/transaction/tests/test_SampleDataManager.py
 delete mode 100644 branches/bug1734/src/transaction/tests/test_SampleResourceManager.py
 delete mode 100644 branches/bug1734/src/transaction/tests/test_register_compat.py
 delete mode 100644 branches/bug1734/src/transaction/tests/test_transaction.py
 delete mode 100644 branches/bug1734/src/transaction/tests/test_util.py
 delete mode 100644 branches/bug1734/src/transaction/util.py
 delete mode 100644 branches/bug1734/src/zdaemon/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/zdaemon/SETUP.cfg
 delete mode 100644 branches/bug1734/src/zdaemon/__init__.py
 delete mode 100644 branches/bug1734/src/zdaemon/component.xml
 delete mode 100644 branches/bug1734/src/zdaemon/sample.conf
 delete mode 100644 branches/bug1734/src/zdaemon/schema.xml
 delete mode 100644 branches/bug1734/src/zdaemon/tests/__init__.py
 delete mode 100755 branches/bug1734/src/zdaemon/tests/donothing.sh
 delete mode 100755 branches/bug1734/src/zdaemon/tests/nokill.py
 delete mode 100644 branches/bug1734/src/zdaemon/tests/parent.py
 delete mode 100644 branches/bug1734/src/zdaemon/tests/testzdoptions.py
 delete mode 100644 branches/bug1734/src/zdaemon/tests/testzdrun.py
 delete mode 100755 branches/bug1734/src/zdaemon/zdctl.py
 delete mode 100644 branches/bug1734/src/zdaemon/zdoptions.py
 delete mode 100755 branches/bug1734/src/zdaemon/zdrun.py
 delete mode 100644 branches/bug1734/src/zope/__init__.py
 delete mode 100644 branches/bug1734/src/zope/interface/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/zope/interface/PUBLICATION.cfg
 delete mode 100644 branches/bug1734/src/zope/interface/README.txt
 delete mode 100644 branches/bug1734/src/zope/interface/SETUP.cfg
 delete mode 100644 branches/bug1734/src/zope/interface/__init__.py
 delete mode 100644 branches/bug1734/src/zope/interface/_flatten.py
 delete mode 100644 branches/bug1734/src/zope/interface/_zope_interface_coptimizations.c
 delete mode 100644 branches/bug1734/src/zope/interface/adapter.py
 delete mode 100644 branches/bug1734/src/zope/interface/adapter.txt
 delete mode 100644 branches/bug1734/src/zope/interface/advice.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/__init__.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/idatetime.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/interfaces.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/mapping.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/sequence.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/tests/__init__.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/tests/basemapping.py
 delete mode 100644 branches/bug1734/src/zope/interface/common/tests/test_idatetime.py
 delete mode 100644 branches/bug1734/src/zope/interface/declarations.py
 delete mode 100644 branches/bug1734/src/zope/interface/document.py
 delete mode 100644 branches/bug1734/src/zope/interface/exceptions.py
 delete mode 100644 branches/bug1734/src/zope/interface/human.txt
 delete mode 100644 branches/bug1734/src/zope/interface/interface.py
 delete mode 100644 branches/bug1734/src/zope/interface/interfaces.py
 delete mode 100644 branches/bug1734/src/zope/interface/ro.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/__init__.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/dummy.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/foodforthought.txt
 delete mode 100644 branches/bug1734/src/zope/interface/tests/ifoo.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/m1.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/m2.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/odd.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_adapter.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_advice.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_declarations.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_document.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_element.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_interface.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_odd_declarations.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_sorting.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/test_verify.py
 delete mode 100644 branches/bug1734/src/zope/interface/tests/unitfixtures.py
 delete mode 100644 branches/bug1734/src/zope/interface/verify.py
 delete mode 100644 branches/bug1734/src/zope/proxy/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/zope/proxy/SETUP.cfg
 delete mode 100644 branches/bug1734/src/zope/proxy/__init__.py
 delete mode 100644 branches/bug1734/src/zope/proxy/_zope_proxy_proxy.c
 delete mode 100644 branches/bug1734/src/zope/proxy/interfaces.py
 delete mode 100644 branches/bug1734/src/zope/proxy/proxy.h
 delete mode 100644 branches/bug1734/src/zope/proxy/tests/__init__.py
 delete mode 100644 branches/bug1734/src/zope/proxy/tests/test_proxy.py
 delete mode 100644 branches/bug1734/src/zope/testing/DEPENDENCIES.cfg
 delete mode 100644 branches/bug1734/src/zope/testing/__init__.py
 delete mode 100644 branches/bug1734/src/zope/testing/cleanup.py
 delete mode 100644 branches/bug1734/src/zope/testing/doctest.py
 delete mode 100644 branches/bug1734/src/zope/testing/doctestunit.py
 delete mode 100644 branches/bug1734/src/zope/testing/formparser.py
 delete mode 100644 branches/bug1734/src/zope/testing/formparser.txt
 delete mode 100644 branches/bug1734/src/zope/testing/loggingsupport.py
 delete mode 100644 branches/bug1734/src/zope/testing/loghandler.py
 delete mode 100644 branches/bug1734/src/zope/testing/module.py
 delete mode 100644 branches/bug1734/src/zope/testing/tests.py
 delete mode 100644 branches/bug1734/test.py

diff --git a/branches/bug1734/COPYING b/branches/bug1734/COPYING
deleted file mode 100644
index cdbcf60f..00000000
--- a/branches/bug1734/COPYING
+++ /dev/null
@@ -1,5 +0,0 @@
-See:
-
- - the copyright notice in: COPYRIGHT.txt
-
- - The Zope Public License in LICENSE.txt
diff --git a/branches/bug1734/COPYRIGHT.txt b/branches/bug1734/COPYRIGHT.txt
deleted file mode 100644
index 32b0177c..00000000
--- a/branches/bug1734/COPYRIGHT.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) 2004 Zope Corporation and Contributors.
-All Rights Reserved.
-
-This software is subject to the provisions of the Zope Public License,
-Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-FOR A PARTICULAR PURPOSE.
diff --git a/branches/bug1734/LICENSE.txt b/branches/bug1734/LICENSE.txt
deleted file mode 100644
index 0f06d960..00000000
--- a/branches/bug1734/LICENSE.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Zope Public License (ZPL) Version 2.1
--------------------------------------
-
-A copyright notice accompanies this license document that
-identifies the copyright holders.
-
-This license has been certified as open source. It has also
-been designated as GPL compatible by the Free Software
-Foundation (FSF).
-
-Redistribution and use in source and binary forms, with or
-without modification, are permitted provided that the
-following conditions are met:
-
-1. Redistributions in source code must retain the
-   accompanying copyright notice, this list of conditions,
-   and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the accompanying
-   copyright notice, this list of conditions, and the
-   following disclaimer in the documentation and/or other
-   materials provided with the distribution.
-
-3. Names of the copyright holders must not be used to
-   endorse or promote products derived from this software
-   without prior written permission from the copyright
-   holders.
-
-4. The right to distribute this software or to use it for
-   any purpose does not give you the right to use
-   Servicemarks (sm) or Trademarks (tm) of the copyright
-   holders. Use of them is covered by separate agreement
-   with the copyright holders.
-
-5. If any files are modified, you must cause the modified
-   files to carry prominent notices stating that you changed
-   the files and the date of any change.
-
-Disclaimer
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS''
-  AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
-  NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
-  AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
-  NO EVENT SHALL THE COPYRIGHT HOLDERS BE
-  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-  HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-  OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-  DAMAGE.
diff --git a/branches/bug1734/MANIFEST b/branches/bug1734/MANIFEST
deleted file mode 100644
index f08c57b1..00000000
--- a/branches/bug1734/MANIFEST
+++ /dev/null
@@ -1,380 +0,0 @@
-LICENSE.txt
-MANIFEST
-MANIFEST.in
-NEWS.txt
-README.txt
-log.ini
-setup.py
-test.py
-doc/ACKS
-doc/Makefile
-doc/README.txt
-doc/storage.pdf
-doc/storage.tex
-doc/zdctl.txt
-doc/zodb.pdf
-doc/ZConfig/Makefile
-doc/ZConfig/README.txt
-doc/ZConfig/schema.dtd
-doc/ZConfig/xmlmarkup.perl
-doc/ZConfig/xmlmarkup.sty
-doc/ZConfig/zconfig.pdf
-doc/ZConfig/zconfig.tex
-doc/ZEO/README.txt
-doc/ZEO/ZopeREADME.txt
-doc/ZEO/cache.txt
-doc/ZEO/howto.txt
-doc/ZEO/trace.txt
-doc/guide/README
-doc/guide/TODO
-doc/guide/admin.tex
-doc/guide/chatter.py
-doc/guide/gfdl.tex
-doc/guide/indexing.tex
-doc/guide/introduction.tex
-doc/guide/links.tex
-doc/guide/modules.tex
-doc/guide/prog-zodb.tex
-doc/guide/storages.tex
-doc/guide/transactions.tex
-doc/guide/zeo.tex
-doc/guide/zodb.tex
-src/BTrees/BTreeItemsTemplate.c
-src/BTrees/BTreeModuleTemplate.c
-src/BTrees/BTreeTemplate.c
-src/BTrees/BucketTemplate.c
-src/BTrees/IIBTree.py
-src/BTrees/IOBTree.py
-src/BTrees/Interfaces.py
-src/BTrees/Length.py
-src/BTrees/Maintainer.txt
-src/BTrees/MergeTemplate.c
-src/BTrees/OIBTree.py
-src/BTrees/OOBTree.py
-src/BTrees/SetOpTemplate.c
-src/BTrees/SetTemplate.c
-src/BTrees/TreeSetTemplate.c
-src/BTrees/_IIBTree.c
-src/BTrees/_IOBTree.c
-src/BTrees/_OIBTree.c
-src/BTrees/_OOBTree.c
-src/BTrees/__init__.py
-src/BTrees/_fsBTree.c
-src/BTrees/check.py
-src/BTrees/convert.py
-src/BTrees/intkeymacros.h
-src/BTrees/intvaluemacros.h
-src/BTrees/objectkeymacros.h
-src/BTrees/objectvaluemacros.h
-src/BTrees/sorters.c
-src/BTrees/tests/__init__.py
-src/BTrees/tests/testBTrees.py
-src/BTrees/tests/testBTreesUnicode.py
-src/BTrees/tests/testConflict.py
-src/BTrees/tests/testSetOps.py
-src/BTrees/tests/test_btreesubclass.py
-src/BTrees/tests/test_check.py
-src/BTrees/tests/test_compare.py
-src/Persistence/_Persistence.c
-src/Persistence/__init__.py
-src/Persistence/mapping.py
-src/Persistence/tests/__init__.py
-src/Persistence/tests/testPersistent.py
-src/Persistence/tests/test_ExtensionClass.py
-src/Persistence/tests/test_mapping.py
-src/ThreadedAsync/LoopCallback.py
-src/ThreadedAsync/__init__.py
-src/ZConfig/BRANCHES.txt
-src/ZConfig/__init__.py
-src/ZConfig/cfgparser.py
-src/ZConfig/cmdline.py
-src/ZConfig/datatypes.py
-src/ZConfig/info.py
-src/ZConfig/loader.py
-src/ZConfig/matcher.py
-src/ZConfig/schema.py
-src/ZConfig/substitution.py
-src/ZConfig/url.py
-src/ZConfig/components/__init__.py
-src/ZConfig/components/basic/__init__.py
-src/ZConfig/components/basic/component.xml
-src/ZConfig/components/basic/mapping.py
-src/ZConfig/components/basic/mapping.xml
-src/ZConfig/components/basic/tests/__init__.py
-src/ZConfig/components/basic/tests/test_mapping.py
-src/ZConfig/components/logger/__init__.py
-src/ZConfig/components/logger/abstract.xml
-src/ZConfig/components/logger/base-logger.xml
-src/ZConfig/components/logger/component.xml
-src/ZConfig/components/logger/datatypes.py
-src/ZConfig/components/logger/eventlog.xml
-src/ZConfig/components/logger/factory.py
-src/ZConfig/components/logger/handlers.py
-src/ZConfig/components/logger/handlers.xml
-src/ZConfig/components/logger/logger.py
-src/ZConfig/components/logger/logger.xml
-src/ZConfig/components/logger/loghandler.py
-src/ZConfig/components/logger/tests/__init__.py
-src/ZConfig/components/logger/tests/test_logger.py
-src/ZConfig/doc/Makefile
-src/ZConfig/doc/README.txt
-src/ZConfig/doc/schema.dtd
-src/ZConfig/doc/xmlmarkup.perl
-src/ZConfig/doc/xmlmarkup.sty
-src/ZConfig/doc/zconfig.pdf
-src/ZConfig/doc/zconfig.tex
-src/ZConfig/scripts/zconfig
-src/ZConfig/tests/__init__.py
-src/ZConfig/tests/runtests.bat
-src/ZConfig/tests/runtests.py
-src/ZConfig/tests/support.py
-src/ZConfig/tests/test_cfgimports.py
-src/ZConfig/tests/test_cmdline.py
-src/ZConfig/tests/test_config.py
-src/ZConfig/tests/test_datatypes.py
-src/ZConfig/tests/test_loader.py
-src/ZConfig/tests/test_schema.py
-src/ZConfig/tests/test_subst.py
-src/ZConfig/tests/input/base-datatype1.xml
-src/ZConfig/tests/input/base-datatype2.xml
-src/ZConfig/tests/input/base-keytype1.xml
-src/ZConfig/tests/input/base-keytype2.xml
-src/ZConfig/tests/input/base.xml
-src/ZConfig/tests/input/include.conf
-src/ZConfig/tests/input/inner.conf
-src/ZConfig/tests/input/library.xml
-src/ZConfig/tests/input/logger.xml
-src/ZConfig/tests/input/outer.conf
-src/ZConfig/tests/input/simple.conf
-src/ZConfig/tests/input/simple.xml
-src/ZConfig/tests/input/simplesections.conf
-src/ZConfig/tests/input/simplesections.xml
-src/ZConfig/tests/library/README.txt
-src/ZConfig/tests/library/__init__.py
-src/ZConfig/tests/library/thing/__init__.py
-src/ZConfig/tests/library/thing/component.xml
-src/ZConfig/tests/library/thing/extras/extras.xml
-src/ZConfig/tests/library/widget/__init__.py
-src/ZConfig/tests/library/widget/component.xml
-src/ZConfig/tests/library/widget/extra.xml
-src/ZEO/ClientStorage.py
-src/ZEO/ClientStub.py
-src/ZEO/CommitLog.py
-src/ZEO/DebugServer.py
-src/ZEO/Exceptions.py
-src/ZEO/README.txt
-src/ZEO/ServerStub.py
-src/ZEO/StorageServer.py
-src/ZEO/TransactionBuffer.py
-src/ZEO/__init__.py
-src/ZEO/cache.py
-src/ZEO/component.xml
-src/ZEO/mkzeoinst.py
-src/ZEO/monitor.py
-src/ZEO/runzeo.py
-src/ZEO/schema.xml
-src/ZEO/simul.py
-src/ZEO/stats.py
-src/ZEO/util.py
-src/ZEO/version.txt
-src/ZEO/zeoctl.py
-src/ZEO/zeoctl.xml
-src/ZEO/zeopasswd.py
-src/ZEO/auth/__init__.py
-src/ZEO/auth/auth_digest.py
-src/ZEO/auth/base.py
-src/ZEO/auth/hmac.py
-src/ZEO/tests/Cache.py
-src/ZEO/tests/CommitLockTests.py
-src/ZEO/tests/ConnectionTests.py
-src/ZEO/tests/InvalidationTests.py
-src/ZEO/tests/TestThread.py
-src/ZEO/tests/ThreadTests.py
-src/ZEO/tests/__init__.py
-src/ZEO/tests/auth_plaintext.py
-src/ZEO/tests/deadlock.py
-src/ZEO/tests/forker.py
-src/ZEO/tests/multi.py
-src/ZEO/tests/speed.py
-src/ZEO/tests/stress.py
-src/ZEO/tests/testAuth.py
-src/ZEO/tests/testConnection.py
-src/ZEO/tests/testMonitor.py
-src/ZEO/tests/testTransactionBuffer.py
-src/ZEO/tests/testZEO.py
-src/ZEO/tests/testZEOOptions.py
-src/ZEO/tests/test_cache.py
-src/ZEO/tests/zeoserver.py
-src/ZEO/zrpc/__init__.py
-src/ZEO/zrpc/_hmac.py
-src/ZEO/zrpc/client.py
-src/ZEO/zrpc/connection.py
-src/ZEO/zrpc/error.py
-src/ZEO/zrpc/log.py
-src/ZEO/zrpc/marshal.py
-src/ZEO/zrpc/server.py
-src/ZEO/zrpc/smac.py
-src/ZEO/zrpc/trigger.py
-src/ZODB/ActivityMonitor.py
-src/ZODB/BaseStorage.py
-src/ZODB/ConflictResolution.py
-src/ZODB/Connection.py
-src/ZODB/DB.py
-src/ZODB/DemoStorage.py
-src/ZODB/ExportImport.py
-src/ZODB/MappingStorage.py
-src/ZODB/Mount.py
-src/ZODB/POSException.py
-src/ZODB/TmpStore.py
-src/ZODB/UndoLogCompatible.py
-src/ZODB/ZApplication.py
-src/ZODB/__init__.py
-src/ZODB/broken.py
-src/ZODB/component.xml
-src/ZODB/config.py
-src/ZODB/config.xml
-src/ZODB/conversionhack.py
-src/ZODB/dbmStorage.py
-src/ZODB/fsIndex.py
-src/ZODB/fsrecover.py
-src/ZODB/fstools.py
-src/ZODB/interfaces.py
-src/ZODB/lock_file.py
-src/ZODB/serialize.py
-src/ZODB/storage.xml
-src/ZODB/subtransactions.txt
-src/ZODB/transact.py
-src/ZODB/utils.py
-src/ZODB/winlock.c
-src/ZODB/FileStorage/FileStorage.py
-src/ZODB/FileStorage/__init__.py
-src/ZODB/FileStorage/format.py
-src/ZODB/FileStorage/fsdump.py
-src/ZODB/FileStorage/fspack.py
-src/ZODB/tests/BasicStorage.py
-src/ZODB/tests/ConflictResolution.py
-src/ZODB/tests/Corruption.py
-src/ZODB/tests/HistoryStorage.py
-src/ZODB/tests/IteratorStorage.py
-src/ZODB/tests/LocalStorage.py
-src/ZODB/tests/MTStorage.py
-src/ZODB/tests/MinPO.py
-src/ZODB/tests/PackableStorage.py
-src/ZODB/tests/PersistentStorage.py
-src/ZODB/tests/ReadOnlyStorage.py
-src/ZODB/tests/RecoveryStorage.py
-src/ZODB/tests/RevisionStorage.py
-src/ZODB/tests/StorageTestBase.py
-src/ZODB/tests/Synchronization.py
-src/ZODB/tests/TransactionalUndoStorage.py
-src/ZODB/tests/TransactionalUndoVersionStorage.py
-src/ZODB/tests/VersionStorage.py
-src/ZODB/tests/__init__.py
-src/ZODB/tests/dangle.py
-src/ZODB/tests/sampledm.py
-src/ZODB/tests/speed.py
-src/ZODB/tests/testActivityMonitor.py
-src/ZODB/tests/testBroken.py
-src/ZODB/tests/testCache.py
-src/ZODB/tests/testConfig.py
-src/ZODB/tests/testConnection.py
-src/ZODB/tests/testDB.py
-src/ZODB/tests/testDemoStorage.py
-src/ZODB/tests/testFileStorage.py
-src/ZODB/tests/testMappingStorage.py
-src/ZODB/tests/testPersistentList.py
-src/ZODB/tests/testPersistentMapping.py
-src/ZODB/tests/testRecover.py
-src/ZODB/tests/testSerialize.py
-src/ZODB/tests/testSubTransaction.py
-src/ZODB/tests/testTimeStamp.py
-src/ZODB/tests/testUtils.py
-src/ZODB/tests/testZODB.py
-src/ZODB/tests/test_cache.py
-src/ZODB/tests/test_datamanageradapter.py
-src/ZODB/tests/test_storage.py
-src/ZODB/tests/testfsIndex.py
-src/ZODB/tests/testmvcc.py
-src/ZODB/tests/util.py
-src/ZODB/tests/warnhook.py
-src/ZopeUndo/Prefix.py
-src/ZopeUndo/__init__.py
-src/ZopeUndo/tests/__init__.py
-src/ZopeUndo/tests/testPrefix.py
-src/persistent/README.txt
-src/persistent/TimeStamp.c
-src/persistent/__init__.py
-src/persistent/cPersistence.c
-src/persistent/cPersistence.h
-src/persistent/cPickleCache.c
-src/persistent/dict.py
-src/persistent/interfaces.py
-src/persistent/list.py
-src/persistent/mapping.py
-src/persistent/ring.c
-src/persistent/ring.h
-src/persistent/wref.py
-src/persistent/tests/__init__.py
-src/persistent/tests/persistent.txt
-src/persistent/tests/persistenttestbase.py
-src/persistent/tests/testPersistent.py
-src/persistent/tests/test_PickleCache.py
-src/persistent/tests/test_list.py
-src/persistent/tests/test_overriding_attrs.py
-src/persistent/tests/test_persistent.py
-src/persistent/tests/test_pickle.py
-src/persistent/tests/test_wref.py
-src/scripts/README.txt
-src/scripts/analyze.py
-src/scripts/checkbtrees.py
-src/scripts/fsdump.py
-src/scripts/fsrefs.py
-src/scripts/fsstats.py
-src/scripts/fstail.py
-src/scripts/fstest.py
-src/scripts/migrate.py
-src/scripts/netspace.py
-src/scripts/parsezeolog.py
-src/scripts/repozo.py
-src/scripts/space.py
-src/scripts/timeout.py
-src/scripts/zeopack.py
-src/scripts/zeoqueue.py
-src/scripts/zeoreplay.py
-src/scripts/zeoserverlog.py
-src/scripts/zeoup.py
-src/scripts/zodbload.py
-src/scripts/tests/test-checker.fs
-src/scripts/tests/testfstest.py
-src/scripts/tests/testzeopack.py
-src/transaction/README.txt
-src/transaction/__init__.py
-src/transaction/_manager.py
-src/transaction/_transaction.py
-src/transaction/interfaces.py
-src/transaction/notes.txt
-src/transaction/util.py
-src/transaction/tests/__init__.py
-src/transaction/tests/abstestIDataManager.py
-src/transaction/tests/test_SampleDataManager.py
-src/transaction/tests/test_register_compat.py
-src/transaction/tests/test_transaction.py
-src/transaction/tests/test_util.py
-src/zLOG/EventLogger.py
-src/zLOG/__init__.py
-src/zLOG/tests/__init__.py
-src/zLOG/tests/test_logging.py
-src/zLOG/tests/testzLog.py
-src/zdaemon/__init__.py
-src/zdaemon/component.xml
-src/zdaemon/sample.conf
-src/zdaemon/schema.xml
-src/zdaemon/zdctl.py
-src/zdaemon/zdoptions.py
-src/zdaemon/zdrun.py
-src/zdaemon/tests/__init__.py
-src/zdaemon/tests/donothing.sh
-src/zdaemon/tests/nokill.py
-src/zdaemon/tests/parent.py
-src/zdaemon/tests/testzdoptions.py
-src/zdaemon/tests/testzdrun.py
diff --git a/branches/bug1734/MANIFEST.in b/branches/bug1734/MANIFEST.in
deleted file mode 100644
index 3b5de232..00000000
--- a/branches/bug1734/MANIFEST.in
+++ /dev/null
@@ -1,9 +0,0 @@
-include MANIFEST MANIFEST.in
-include *.txt
-include test.py log.ini
-recursive-include src *.h *.c *.xml *.txt *.sh *.conf *.bat
-include src/ZConfig/scripts/zconfig
-graft doc
-graft src/scripts
-graft src/ZConfig/doc
-global-exclude .cvsignore
diff --git a/branches/bug1734/NEWS.txt b/branches/bug1734/NEWS.txt
deleted file mode 100644
index 8b615763..00000000
--- a/branches/bug1734/NEWS.txt
+++ /dev/null
@@ -1,2202 +0,0 @@
-What's new in ZODB3 3.4?
-========================
-Release date: DD-MMM-2004
-
-transaction
------------
-
-- ``get_transaction()`` is officially deprecated now, and will be removed
-  in ZODB 3.6.  Use the ``transaction`` package instead.   For example,
-  instead of::
-
-      import ZODB
-      ...
-      get_transaction().commit()
-
-  do::
-
-      import transaction
-      ...
-      transaction.commit()
-
-DB
---
-
-- There is no longer a hard limit on the number of connections that
-  ``DB.open()`` will create.  In other words, ``DB.open()`` never blocks
-  anymore waiting for an earlier connection to close, and ``DB.open()``
-  always returns a connection now (while it wasn't documented, it was
-  possible for ``DB.open()`` to return ``None`` before).
-
-  ``pool_size`` continues to default to 7, but its meaning has changed:
-  if more than ``pool_size`` connections are obtained from ``DB.open()``
-  and not closed, a warning is logged; if more than twice ``pool_size``, a
-  critical problem is logged.  ``pool_size`` should be set to the maximum
-  number of connections from the ``DB`` instance you expect to have open
-  simultaneously.
-
-  In addition, if a connection obtained from ``DB.open()`` becomes
-  unreachable without having been explicitly closed, when Python's garbage
-  collection reclaims that connection it no longer counts against the
-  ``pool_size`` thresholds for logging messages.
-
-  The following optional arguments to ``DB.open()`` are deprecated:
-  ``transaction``, ``waitflag``, ``force`` and ``temporary``.  If one
-  is specified, its value is ignored, and ``DeprecationWarning`` is
-  raised.  In ZODB 3.6, these optional arguments will be removed.
-
-- Lightweight support for "multi-databases" is implemented.  These are
-  collections of named DB objects and associated open Connections, such
-  that the Connection for any DB in the collection can be obtained from
-  a Connection from any other DB in the collection.  See the new test
-  file ZODB/tests/multidb.txt for a tutorial doctest.  Thanks to Christian
-  Theune for his work on this during the PyCon 2005 ZODB sprint.
-
-BTrees
-------
-
-- A new family of BTree types, in the ``IFBTree`` module, map
-  signed integers (32 bits) to C floats (also 32 bits).  The
-  intended use is to help construct search indices, where, e.g.,
-  integer word or document identifiers map to scores of some
-  kind.  This is easier than trying to work with scaled integer
-  scores in an ``IIBTree``, and Zope3 has moved to ``IFBTrees``
-  for these purposes in its search code.
-
-FileStorage
------------
-
-- Addded a record iteration protocol to FileStorage.  You can use the
-  record iterator to iterate over all current revisions of data
-  pickles in the storage.
-
-  In order to support calling via ZEO, we don't implement this as an
-  actual iterator.  An example of using the record iterator protocol
-  is as follows::
-
-      storage = FileStorage('anexisting.fs')
-      next_oid = None
-      while True:
-          oid, tid, data, next_oid = storage.record_iternext(next_oid)
-          # do something with oid, tid and data
-          if next_oid is None:
-              break
-
-  The behavior of the iteration protocol is now to iterate over all
-  current records in the database in ascending oid order, although
-  this is not a promise to do so in the future.
-
-
-Tools
------
-
-New tool fsoids.py, for heavy debugging of FileStorages; shows all
-uses of specified oids in the entire database (e.g., suppose oid 0x345620
-is missing -- did it ever exist?  if so, when?  who referenced it?  when
-was the last transaction that modified an object that referenced it?
-which objects did it reference?  what kind of object was it?).
-ZODB/test/testfsoids.py is a tutorial doctest.
-
-
-fsIndex
--------
-
-Efficient, general implementations of ``minKey()`` and ``maxKey()`` methods
-were added.  ``fsIndex`` is a special hybrid kind of BTree used to implement
-FileStorage indices.  Thanks to Chris McDonough for code and tests.
-
-
-What's new in ZODB3 3.3.1a2?
-============================
-Release date: DD-MMM-2005
-
-ZEO
----
-
-Repaired subtle race conditions in establishing ZEO connections, both client-
-and server-side.  These account for intermittent cases where ZEO failed
-to make a connection (or reconnection), accompanied by a log message showing
-an error caught in ``asyncore`` and having a traceback ending with:
-
-    ``UnpicklingError: invalid load key, 'Z'.``
-
-or:
-
-    ``ZRPCError: bad handshake '(K\x00K\x00U\x0fgetAuthProtocol)t.'``
-
-or:
-
-    ``error: (9, 'Bad file descriptor')``
-
-or an ``AttributeError``.
-
-These were exacerbated when running the test suite, because of an unintended
-busy loop in the test scaffolding, which could starve the thread trying to
-make a connection.  The ZEO reconnection tests may run much faster now,
-depending on platform, and should suffer far fewer (if any) intermittent
-"timed out waiting for storage to connect" failures.
-
-
-FileStorage
------------
-
-- The ``.store()`` and ``.restore()`` methods didn't update the storage's
-  belief about the largest oid in use when passed an oid larger than the
-  largest oid the storage already knew about.  Because ``.restore()`` in
-  particular is used  by ``copyTransactionsFrom()``, and by the first stage
-  of ZRS recovery, a large database could be created that believed the only
-  oid in use was oid 0 (the special oid reserved for the root object).  In
-  rare cases, it could go on from there assigning duplicate oids to new
-  objects, starting over from oid 1 again.  This has been repaired.  A
-  new ``set_max_oid()`` method was added to the ``BaseStorage`` class so
-  that derived storages can update the largest oid in use in a threadsafe
-  way.
-
-- A FileStorage's index file tried to maintain the index's largest oid as a
-  separate piece of data, incrementally updated over the storage's lifetime.
-  This scheme was more complicated than necessary, so was also more brittle
-  and slower than necessary.  It indirectly participated in a rare but
-  critical bug:  when a FileStorage was created via
-  ``copyTransactionsFrom()``, the "maximum oid" saved in the index file was
-  always 0.  Use that FileStorage, and it could then create "new" oids
-  starting over at 0 again, despite that those oids were already in use by
-  old objects in the database.  Packing a FileStorage has no reason to
-  try to update the maximum oid in the index file either, so this kind of
-  damage could (and did) persist even across packing.
-
-  The index file's maximum-oid data is ignored now, but is still written
-  out so that ``.index`` files can be read by older versions of ZODB.
-  Finding the true maximum oid is done now by exploiting that the main
-  index is really a kind of BTree (long ago, this wasn't true), and finding
-  the largest key in a BTree is inexpensive.
-
-- A FileStorage's index file could be updated on disk even if the storage
-  was opened in read-only mode.  That bug has been repaired.
-
-- An efficient ``maxKey()`` implementation was added to class ``fsIndex``.
-
-
-Pickle (in-memory Connection) Cache
------------------------------------
-
-You probably never saw this exception:
-
-    ``ValueError: Can not re-register object under a different oid``
-
-It's been changed to say what it meant:
-
-    ``ValueError: A different object already has the same oid``
-
-This happens if an attempt is made to add distinct objects to the cache
-that have the same oid (object identifier).  ZODB should never do this,
-but it's possible for application code to force such an attempt.
-
-PersistentMapping and PersistentList
-------------------------------------
-
-Backward compatibility code has been added so that the sanest of the
-ZODB 3.2 dotted paths for ``PersistentMapping`` and ``PersistentList``
-resolve.  These are still preferred:
-
-- ``from persistent.list import PersistentList``
-- ``from persistent.mapping import PersistentMapping``
-
-but these work again too:
-
-- ``from ZODB.PersistentList import PersistentList``
-- ``from ZODB.PersistentMapping import PersistentMapping``
-
-BTrees
-------
-
-The BTrees interface file neglected to document the optional
-``excludemin`` and ``excludemax`` arguments to the ``keys()``, ``values()``
-and ``items()`` methods.  Appropriate changes were merged in from the
-ZODB4 BTrees interface file.
-
-fsIndex
--------
-
-An efficient ``maxKey()`` method was implemented for the ``fsIndex`` class.
-This makes it possible to determine the largest oid in a ``FileStorage``
-index efficiently, directly, and reliably, replacing a more delicate scheme
-that tried to keep track of this by saving an oid high water mark in the
-index file and incrementally updating it.
-
-
-What's new in ZODB3 3.3.1a1?
-============================
-Release date: 11-Jan-2005
-
-ZEO client cache
-----------------
-
-- Collector 1536:  The ``cache-size`` configuration option for ZEO clients
-  was being ignored.  Worse, the client cache size was only one megabyte,
-  much smaller than the advertised default of 20MB.  Note that the default
-  is carried over from a time when gigabyte disks were expensive and rare;
-  20MB is also too small on most modern machines.
-
-- Fixed a nasty bug in cache verification.  A persistent ZEO cache uses a
-  disk file, and, when active, has some in-memory data structures too to
-  speed operation.  Invalidations processed as part of startup cache
-  verification were reflected in the in-memory data structures, but not
-  correctly in the disk file.  So if an object revision was invalidated as
-  part of verification, the object wasn't loaded again before the connection
-  was closed, and the object revision remained in the cache file until the
-  connection was closed, then the next time the cache file was opened it
-  could believe that the stale object revision in the file was actually
-  current.
-
-- Fixed a bug wherein an object removed from the client cache didn't
-  properly mark the file slice it occupied as being available for reuse.
-
-ZEO
----
-
-Collector 1503:  excessive logging.  It was possible for a ZEO client to
-log "waiting for cache verification to finish" messages at a very high
-rate, producing gigabytes of such messages in short order.
-``ClientStorage._wait_sync()`` was changed to log no more than one
-such message per 5 minutes.
-
-persistent
-----------
-
-Collector #1350:  ZODB has a default one-thread-per-connection model, and
-two threads should never do operations on a single connection
-simultaneously.  However, ZODB can't detect violations, and this happened
-in an early stage of Zope 2.8 development.  The low-level ``ghostify()``
-and ``unghostify()`` routines in ``cPerisistence.c`` were changed to give
-some help in detecting this when it happens.  In a debug build, both abort
-the process if thread interference is detected.  This is extreme, but
-impossible to overlook.  In a release build, ``unghostify()`` raises
-``SystemError`` if thread damage is detected; ``ghostify()`` ignores the
-problem in a release build (``ghostify()`` is supposed to be so simple that
-it "can't fail").
-
-ConflictError
--------------
-
-New in 3.3, a ``ConflictError`` exception may attempt to insert the path to
-the object's class in its message.  However, a ZEO server may not have
-access to application class implementations, and then the attempt by the
-server to raise ``ConflictError`` could raise ``ImportError`` instead while
-trying to determine the object's class path.  This was confusing.  The code
-has been changed to obtain the class path from the object's pickle, without
-trying to import application modules or classes.
-
-FileStorage
------------
-
-Collector 1581:  When an attempt to pack a corrupted ``Data.fs`` file was
-made, it was possible for the pack routine to die with a reference to an
-undefined global while it was trying to raise ``CorruptedError``.  It
-raises ``CorruptedError``, as it always intended, in these cases now.
-
-Install
--------
-
-The C header file ``ring.h`` is now installed.
-
-Tools
------
-
-- ``BTrees.check.display()`` now displays the oids (if any) of the
-  BTree's or TreeSet's constituent objects.
-
-
-What's new in ZODB3 3.3?
-========================
-Release date: 06-Oct-2004
-
-ZEO
----
-
-The encoding of RPC calls between server and client was being done
-with protocol 0 ("text mode") pickles, which could require sending
-four times as many bytes as necessary.  Protocol 1 pickles are used
-now.  Thanks to Andreas Jung for the diagnosis and cure.
-
-ZODB/component.xml
-------------------
-
-``cache-size`` parameters were changed from type ``integer`` to
-type ``byte-size``.  This allows you to specify, for example,
-"``cache-size 20MB``" to get a 20 megabyte cache.
-
-transaction
------------
-
-The deprecation warning for ``Transaction.begin()`` was changed to
-point to the caller, instead of to ``Transaction.begin()`` itself.
-
-Connection
-----------
-
-Restored Connection's private ``_opened`` attribute.  This was still
-referenced by ``DB.connectionDebugInfo()``, and Zope 2 calls the latter.
-
-FileStorage
------------
-
-Collector #1517: History tab for ZPT does not work. ``FileStorage.history()``
-was reading the user, description, and extension fields out of the object
-pickle, due to starting the read at a wrong location.  Looked like
-cut-and-paste repetition of the same bug in ``FileStorage.FileIterator``
-noted in the news for 3.3c1.
-
-What's new in ZODB3 3.3 release candidate 1?
-============================================
-Release date: 14-Sep-2004
-
-Connection
-----------
-
-ZODB intends to raise ``ConnnectionStateError`` if an attempt is made to
-close a connection while modifications are pending (the connection is
-involved in a transaction that hasn't been ``abort()``'ed or
-``commit()``'ed).  It was missing the case where the only pending
-modifications were made in subtransactions.  This has been fixed.  If an
-attempt to close a connection with pending subtransactions is made now::
-
-    ConnnectionStateError: Cannot close a connection with a pending subtransaction
-
-is raised.
-
-transaction
------------
-
-- Transactions have new, backward-incompatible behavior in one respect:
-  if a ``Transaction.commit()``, ``Transaction.commit(False)``, or
-  ``Transaction.commit(True)`` raised an exception, prior behavior was that
-  the transaction effectively aborted, and a new transaction began.
-  A primary bad consequence was that, if in a sequence of subtransaction
-  commits, one of the commits failed but the exception was suppressed,
-  all changes up to and including the failing commit were lost, but
-  later subtransaction commits in the sequence got no indication that
-  something had gone wrong, nor did the final (top level) commit.  This
-  could easily lead to inconsistent data being committed, from the
-  application's point of view.
-
-  The new behavior is that a failing commit "sticks" until explicitly
-  cleared.  Now if an exception is raised by a ``commit()`` call (whether
-  subtransaction or top level) on a Transaction object ``T``:
-
-    - Pending changes are aborted, exactly as they were for a failing
-      commit before.
-
-    - But ``T`` remains the current transaction object (if ``tm`` is ``T``'s
-      transaction manger, ``tm.get()`` continues to return ``T``).
-
-    - All subsequent attempts to do ``T.commit()``, ``T.join()``, or
-      ``T.register()`` raise the new ``TransactionFailedError`` exception.
-      Note that if you try to modify a persistent object, that object's
-      resource manager (usually a ``Connection`` object) will attempt to
-      ``join()`` the failed transaction, and ``TransactionFailedError``
-      will be raised right away.
-
-  So after a transaction or subtransaction commit fails, that must be
-  explicitly cleared now, either by invoking ``abort()`` on the transaction
-  object, or by invoking ``begin()`` on its transaction manager.
-
-- Some explanations of new transaction features in the 3.3a3 news
-  were incorrect, and this news file has been retroactively edited to
-  repair that.  See news for 3.3a3 below.
-
-- If ReadConflictError was raised by an attempt to load an object with a
-  ``_p_independent()`` method that returned false, attempting to commit the
-  transaction failed to (re)raise ReadConflictError for that object.  Note
-  that ZODB intends to prevent committing a transaction in which a
-  ReadConflictError occurred; this was an obscure case it missed.
-
-- Growing pains:  ZODB 3.2 had a bug wherein ``Transaction.begin()`` didn't
-  abort the current transaction if the only pending changes were in a
-  subtransaction.  In ZODB 3.3, it's intended that a transaction manager be
-  used to effect ``begin()`` (instead of invoking ``Transaction.begin()``),
-  and calling ``begin()`` on a transaction manager didn't have this old
-  bug.  However, ``Transaction.begin()`` still exists in 3.3, and it had a
-  worse bug:  it never aborted the transaction (not even if changes were
-  pending outside of subtransactions). ``Transaction.begin()`` has been
-  changed to abort the transaction. ``Transaction.begin()`` is also
-  deprecated.  Don't use it.  Use ``begin()`` on the relevant transaction
-  manager instead.  For example,
-
-      >>> import transaction
-      >>> txn = transaction.begin()  # start a txn using the default TM
-
-  if using the default ``ThreadTransactionManager`` (see news for 3.3a3
-  below). In 3.3, it's intended that a single ``Transaction`` object is
-  used for exactly one transaction.  So, unlike as in 3.2, when somtimes
-  ``Transaction`` objects were reused across transactions, but sometimes
-  weren't, when you do ``Transaction.begin()`` in 3.3 a brand new
-  transaction object is created.  That's why this use is deprecated.  Code
-  of the form:
-
-      >>> txn = transaction.get()
-      >>> ...
-      >>> txn.begin()
-      >>> ...
-      >>> txn.commit()
-
-  can't work as intended in 3.3, because ``txn`` is no longer the current
-  ``Transaction`` object the instant ``txn.begin()`` returns.
-
-BTrees
-------
-
-The BTrees __init__.py file is now just a comment.  It had been trying
-to set up support for (long gone) "int sets", and to import an old
-version of Zope's Interface package, which doesn't even ship with ZODB.
-The latter in particular created problems, at least clashing with
-PythonCAD's Interface package.
-
-POSException
-------------
-
-Collector #1488 (TemporaryStorage -- going backward in time).  This
-confusion was really due to that the detail on a ConflictError exception
-didn't make sense.  It called the current revision "was", and the old
-revision "now".  The detail is much more informative now.  For example,
-if the exception said::
-
-    ConflictError: database conflict error (oid 0xcb22,
-    serial was 0x03441422948b4399, now 0x034414228c3728d5)
-
-before, it now says::
-
-    ConflictError: database conflict error (oid 0xcb22,
-    serial this txn started with 0x034414228c3728d5 2002-04-14 20:50:32.863000,
-    serial currently committed 0x03441422948b4399 2002-04-14 20:50:34.815000)
-
-ConflictError
--------------
-
-The undocumented ``get_old_serial()`` and ``get_new_serial()`` methods
-were swapped (the first returned the new serial, and the second returned
-the old serial).
-
-Tools
------
-
-``FileStorage.FileIterator`` was confused about how to read a transaction's
-user and description fields, which caused several tools to display
-binary gibberish for these values.
-
-``ZODB.utils.oid_repr()`` changed to add a leading "0x", and to strip
-leading zeroes.  This is used, e.g., in the detail of a ``POSKeyError``
-exception, to identify the missing oid.  Before, the output was ambiguous.
-For example, oid 17 was displayed as 0000000000000011.  As a Python
-integer, that's octal 9.  Or was it meant to be decimal 11?  Or was it
-meant to be hex? Now it displays as 0x11.
-
-fsrefs.py:
-
-    When run with ``-v``, produced tracebacks for objects whose creation was
-    merely undone.  This was confusing.  Tracebacks are now produced only
-    if there's "a real" problem loading an oid.
-
-    If the current revision of object O refers to an object P whose
-    creation has been undone, this is now identified as a distinct case.
-
-    Captured and ignored most attempts to stop it via Ctrl+C.  Repaired.
-
-    Now makes two passes, so that an accurate report can be given of all
-    invalid references.
-
-``analyze.py`` produced spurious "len of unsized object" messages when
-finding a data record for an object uncreation or version abort.  These
-no longer appear.
-
-``fsdump.py``'s ``get_pickle_metadata()`` function (which is used by several
-tools) was confused about what to do when the ZODB pickle started with
-a pickle ``GLOBAL`` opcode.  It actually loaded the class then, which it
-intends never to do, leading to stray messages on stdout when the class
-wasn't available, and leading to a strange return value even when it was
-available (the repr of the type object was returned as "the module name",
-and an empty string was returned as "the class name").  This has been
-repaired.
-
-
-What's new in ZODB3 3.3 beta 2
-==============================
-Release date: 13-Aug-2004
-
-Transaction Managers
---------------------
-
-Zope3-dev Collector #139: Memory leak involving buckets and connections
-
-The transaction manager internals effectively made every Connection
-object immortal, except for those explicitly closed.  Since typical
-practice is not to close connections explicitly (and closing a DB
-happens not to close the connections to it -- although that may
-change), this caused massive memory leaks when many connections were
-opened.  The transaction manager internals were reworked to use weak
-references instead, so that connection memory (and other registered
-synch objects) now get cleaned up when nothing other than the
-transaction manager knows about them.
-
-Storages
---------
-
-Collector #1327: FileStorage init confused by time travel
-
-If the system clock "went backwards" a long time between the times a
-FileStorage was closed and reopened, new transaction ids could be
-smaller than transaction ids already in the storage, violating a
-key invariant.  Now transaction ids are guaranteed to be increasing
-even when this happens.  If time appears to have run backwards at all
-when a FileStorage is opened, a new message saying so is logged at
-warning level; if time appears to have run backwards at least 30
-minutes, the message is logged at critical level (and you should
-investigate to find and repair the true cause).
-
-Tools
------
-
-repozo.py:  Thanks to a suggestion from Toby Dickenson, backups
-(whether incremental or full) are first written to a temp file now,
-which is fsync'ed at the end, and only after that succeeds is the
-file renamed to YYYY-MM-DD-HH-MM-SS.ext form.  In case of a system
-crash during a repozo backup, this at least makes it much less
-likely that a backup file with incomplete or incorrect data will be
-left behind.
-
-fsrefs.py:  Fleshed out the module docstring, and repaired a bug
-wherein spurious error msgs could be produced after reporting a
-problem with an unloadable object.
-
-Test suite
-----------
-
-Collector #1397: testTimeStamp fails on FreeBSD
-
-    The BSD distributions are unique in that their mktime()
-    implementation usually ignores the input tm_isdst value.  Test
-    checkFullTimeStamp() was sensitive to this platform quirk.
-
-Reworked the way some of the ZEO tests use threads, so that unittest is
-more likely to notice the real cause of a failure (which usually occurs in
-a thread), and less likely to latch on to spurious problems resulting from
-the real failure.
-
-
-What's new in ZODB3 3.3 beta 1
-==============================
-Release date: 07-Jun-2004
-
-3.3b1 is the first ZODB release built using the new zpkg tools:
-
-    http://zope.org/Members/fdrake/zpkgtools/
-
-This appears to have worked very well.  The structure of the tarball
-release differs from previous releases because of it, and the set of
-installed files includes some that were not installed in previous
-releases.  That shouldn't create problems, so let us know if it does!
-We'll fine-tune this for the next release.
-
-BTrees
-------
-
-Fixed bug indexing BTreeItems objects with negative indexes.  This
-caused reverse iteration to return each item twice.  Thanks to Casey
-Duncan for the fix.
-
-ZODB
-----
-
-Methods removed from the database (ZODB.DB.DB) class:  cacheStatistics(),
-cacheMeanAge(), cacheMeanDeac(), and cacheMeanDeal().  These were
-undocumented, untested, and unused.  The first always returned an empty
-tuple, and the rest always returned None.
-
-When trying to do recovery to a time earlier than that of the most recent
-full backup, repozo.py failed to find the appropriate files, erroneously
-claiming "No files in repository before <specified time>".  This has
-been repaired.
-
-Collector #1330:  repozo.py -R can create corrupt .fs.
-When looking for the backup files needed to recreate a Data.fs file,
-repozo could (unintentionally) include its meta .dat files in the list,
-or random files of any kind created by the user in the backup directory.
-These would then get copied verbatim into the reconstructed file, filling
-parts with junk.  Repaired by filtering the file list to include only
-files with the data extensions repozo.py creates (.fs, .fsz, .deltafs,
-and .deltafsz).  Thanks to James Henderson for the diagnosis.
-
-fsrecover.py couldn't work, because it referenced attributes that no
-longer existed after the MVCC changes.  Repaired that, and added new
-tests to ensure it continues working.
-
-Collector #1309:  The reference counts reported by DB.cacheExtremeDetails()
-for ghosts were one too small.  Thanks to Dieter Maurer for the diagnosis.
-
-Collector #1208:  Infinite loop in cPickleCache.
-If a persistent object had a __del__ method (probably not a good idea
-regardless, but we don't prevent it) that referenced an attribute of
-self, the code to deactivate objects in the cache could get into an
-infinite loop:  ghostifying the object could lead to calling its __del__
-method, the latter would load the object into cache again to
-satsify the attribute reference, the cache would again decide that
-the object should be ghostified, and so on.  The infinite loop no longer
-occurs, but note that objects of this kind still aren't sensible (they're
-effectively immortal).  Thanks to Toby Dickenson for suggesting a nice
-cure.
-
-
-What's new in ZODB3 3.3 alpha 3
-===============================
-Release date: 16-Apr-2004
-
-transaction
------------
-
-There is a new transaction package, which provides new interfaces for
-application code and for the interaction between transactions and
-resource managers.
-
-The top-level transaction package has functions ``commit()``, ``abort()``,
-``get()``, and ``begin()``.  They should be used instead of the magic
-``get_transaction()`` builtin, which will be deprecated.  For example:
-
-    >>> get_transaction().commit()
-
-should now be written as
-
-    >>> import transaction
-    >>> transaction.commit()
-
-The new API provides explicit transaction manager objects.  A transaction
-manager (TM) is responsible for associating resource managers with a
-"current" transaction.  The default TM, implemented by class
-``ThreadedTransactionManager``, assigns each thread its own current
-transaction.  This default TM is available as ``transaction.manager``.  The
-``TransactionManager`` class assigns all threads to the same transaction,
-and is an explicit replacement for the ``Connection.setLocalTransaction()``
-method:
-
-A transaction manager instance can be passed as the txn_mgr argument to
-``DB.open()``.  If you do, the connection will use the specified
-transaction manager instead of the default TM.  The current transaction is
-obtained by calling ``get()`` on a TM. For example:
-
-    >>> tm = transaction.TransactionManager()
-    >>> cn = db.open(txn_mgr=tm)
-    [...]
-    >>> tm.get().commit()
-
-The ``setLocalTransaction()`` and ``getTransaction()`` methods of
-Connection are deprecated.  Use an explicit TM passed via ``txn_mgr=`` to
-``DB.open()`` instead.  The ``setLocalTransaction()`` method still works,
-but it returns a TM instead of a Transaction.
-
-A TM creates Transaction objects, which are used for exactly one
-transaction.  Transaction objects still have ``commit()``, ``abort()``,
-``note()``, ``setUser()``, and ``setExtendedInfo()`` methods.
-
-Resource managers, e.g. Connection or RDB adapter, should use a
-Transaction's ``join()`` method instead of its ``register()`` method.  An
-object that calls ``join()`` manages its own resources.  An object that
-calls ``register()`` expects the TM to manage the objects.
-
-Data managers written against the ZODB 4 transaction API are now
-supported in ZODB 3.
-
-persistent
-----------
-
-A database can now contain persistent weak references.  An object that
-is only reachable from persistent weak references will be removed by
-pack().
-
-The persistence API now distinguishes between deactivation and
-invalidation.  This change is intended to support objects that can't
-be ghosts, like persistent classes.  Deactivation occurs when a user
-calls _p_deactivate() or when the cache evicts objects because it is
-full.  Invalidation occurs when a transaction updates the object.  An
-object that can't be a ghost must load new state when it is
-invalidated, but can ignore deactivation.
-
-Persistent objects can implement a __getnewargs__() method that will
-be used to provide arguments that should be passed to __new__() when
-instances (including ghosts) are created.  An object that implements
-__getnewargs__() must be loaded from storage even to create a ghost.
-
-There is new support for writing hooks like __getattr__ and
-__getattribute__.  The new hooks require that user code call special
-persistence methods like _p_getattr() inside their hook.  See the ZODB
-programming guide for details.
-
-The format of serialized persistent references has changed; that is,
-the on-disk format for references has changed.  The old format is
-still supported, but earlier versions of ZODB will not be able to read
-the new format.
-
-ZODB
-----
-
-Closing a ZODB Connection while it is registered with a transaction,
-e.g. has pending modifications, will raise a ConnnectionStateError.
-Trying to load objects from or store objects to a closed connection
-will also raise a ConnnectionStateError.
-
-ZODB connections are synchronized on commit, even when they didn't
-modify objects.  This feature assumes that the thread that opened the
-connection is also the thread that uses it.  If not, this feature will
-cause problems.  It can be disabled by passing synch=False to open().
-
-New broken object support.
-
-New add() method on Connection.  User code should not assign the
-_p_jar attribute of a new persistent object directly; a deprecation
-warning is issued in this case.
-
-Added a get() method to Connection as a preferred synonym for
-__getitem__().
-
-Several methods and/or specific optional arguments of methods have
-been deprecated.  The cache_deactivate_after argument used by DB() and
-Connection() is deprecated.  The DB methods getCacheDeactivateAfter(),
-getVersionCacheDeactivateAfter(), setCacheDeactivateAfter(), and
-setVersionCacheDeactivateAfter() are also deprecated.
-
-The old-style undo() method was removed from the storage API, and
-transactionalUndo() was renamed to undo().
-
-The BDBStorages are no longer distributed with ZODB.
-
-Fixed a serious bug in the new pack implementation.  If pack was
-called on the storage and passed a time earlier than a previous pack
-time, data could be lost.  In other words, if there are any two pack
-calls, where the time argument passed to the second call was earlier
-than the first call, data loss could occur.  The bug was fixed by
-causing the second call to raise a StorageError before performing any
-work.
-
-Fixed a rare bug in pack:  if a pack started during a small window of
-time near the end of a concurrent transaction's commit, it was possible
-for the pack attempt to raise a spurious
-
-     CorruptedError: ... transaction with checkpoint flag set
-
-exception.  This did no damage to the database, or to the transaction
-in progress, but no pack was performed then.
-
-By popular demand, FileStorage.pack() no longer propagates a
-
-    FileStorageError:  The database has already been packed to a
-    later time or no changes have been made since the last pack
-
-exception.  Instead that message is logged (at INFO level), and
-the pack attempt simply returns then (no pack is performed).
-
-ZEO
----
-
-Fixed a bug that prevented the -m / --monitor argument from working.
-
-zdaemon
--------
-
-Added a -m / --mask option that controls the umask of the subprocess.
-
-zLOG
-----
-
-The zLOG backend has been removed.  zLOG is now just a facade over the
-standard Python logging package.  Environment variables like
-STUPID_LOG_FILE are no longer honored.  To configure logging, you need
-to follow the directions in the logging package documentation.  The
-process is currently more complicated than configured zLOG.  See
-test.py for an example.
-
-ZConfig
--------
-
-This release of ZODB contains ZConfig 2.1.
-
-More documentation has been written.
-
-Make sure keys specified as attributes of the <default> element are
-converted by the appropriate key type, and are re-checked for derived
-sections.
-
-Refactored the ZConfig.components.logger schema components so that a
-schema can import just one of the "eventlog" or "logger" sections if
-desired.  This can be helpful to avoid naming conflicts.
-
-Added a reopen() method to the logger factories.
-
-Always use an absolute pathname when opening a FileHandler.
-
-
-Miscellaneous
--------------
-
-The layout of the ZODB source release has changed.  All the source
-code is contained in a src subdirectory.  The primary motivation for
-this change was to avoid confusion caused by installing ZODB and then
-testing it interactively from the source directory; the interpreter
-would find the uncompiled ZODB package in the source directory and
-report an import error.
-
-A reference-counting bug was fixed, in the logic calling a modified
-persistent object's data manager's register() method.  The primary symptom
-was rare assertion failures in Python's cyclic garbage collection.
-
-The Connection class's onCommitAction() method was removed.
-
-Some of the doc strings in ZODB are now written for processing by
-epydoc.
-
-Several new test suites were written using doctest instead of the
-standard unittest TestCase framework.
-
-MappingStorage now implements getTid().
-
-ThreadedAsync: Provide a way to shutdown the servers using an exit
-status.
-
-The mkzeoinstance script looks for a ZODB installation, not a Zope
-installation.  The received wisdom is that running a ZEO server
-without access to the appserver code avoids many mysterious problems.
-
-
-What's new in ZODB3 3.3 alpha 2
-===============================
-Release date: 06-Jan-2004
-
-This release contains a major overhaul of the persistence machinery,
-including some user-visible changes.  The Persistent base class is now
-a new-style class instead of an ExtensionClass.  The change enables
-the use of features like properties with persistent object classes.
-The Persistent base class is now contained in the persistent package.
-
-The Persistence package is included for backwards compatibility.  The
-Persistence package is used by Zope to provide special
-ExtensionClass-compatibility features like a non-C3 MRO and an __of__
-method.  ExtensionClass is not included with this release of ZODB3.
-If you use the Persistence package, it will print a warning and import
-Persistent from persistent.
-
-In short, the new persistent package is recommended for non-Zope
-applications.  The following dotted class names are now preferred over
-earlier names:
-
-- persistent.Persistent
-- persistent.list.PersistentList
-- persistent.mapping.PersistentMapping
-- persistent.TimeStamp
-
-The in-memory, per-connection object cache (pickle cache) was changed
-to participate in garbage collection.  This should reduce the number
-of memory leaks, although we are still tracking a few problems.
-
-Multi-version concurrency control
----------------------------------
-
-ZODB now supports multi-version concurrency control (MVCC) for
-storages that support multiple revisions.  FileStorage and
-BDBFullStorage both support MVCC.  In short, MVCC means that read
-conflicts should almost never occur.  When an object is modified in
-one transaction, other concurrent transactions read old revisions of
-the object to preserve consistency.  In earlier versions of ZODB, any
-access of the modified object would raise a ReadConflictError.
-
-The ZODB internals changed significantly to accommodate MVCC.  There
-are relatively few user visible changes, aside from the lack of read
-conflicts.  It is possible to disable the MVCC feature using the mvcc
-keyword argument to the DB open() method, ex.: db.open(mvcc=False).
-
-ZEO
----
-
-Changed the ZEO server and control process to work with a single
-configuration file; this is now the default way to configure these
-processes.  (It's still possible to use separate configuration files.)
-The ZEO configuration file can now include a "runner" section used by
-the control process and ignored by the ZEO server process itself.  If
-present, the control process can use the same configuration file.
-
-Fixed a performance problem in the logging code for the ZEO protocol.
-The logging code could call repr() on arbitrarily long lists, even
-though it only logged the first 60 bytes; worse, it called repr() even
-if logging was currently disabled.  Fixed to call repr() on individual
-elements until the limit is reached.
-
-Fixed a bug in zrpc (when using authentication) where the MAC header
-wasn't being read for large messages, generating errors while unpickling
-commands sent over the wire. Also fixed the zeopasswd.py script, added
-testcases and provided a more complete commandline interface.
-
-Fixed a misuse of the _map variable in zrpc Connectio objects, which
-are also asyncore.dispatcher objects.  This allows ZEO to work with
-CVS Python (2.4). _map is used to indicate whether the dispatcher
-users the default socket_map or a custom socket_map.  A recent change
-to asyncore caused it to use _map in its add_channel() and
-del_channel() methods, which presumes to be a bug fix (may get ported
-to 2.3).  That causes our dubious use of _map to be a problem, because
-we also put the Connections in the global socket_map.  The new
-asyncore won't remove it from the global socket map, because it has a
-custom _map.
-
-The prefix used for log messages from runzeo.py was changed from
-RUNSVR to RUNZEO.
-
-Miscellaneous
--------------
-
-ReadConflictError objects now have an ignore() method.  Normally, a
-transaction that causes a read conflict can't be committed.  If the
-exception is caught and its ignore() method called, the transaction
-can be committed.  Application code may need this in advanced
-applications.
-
-
-What's new in ZODB3 3.3 alpha 1
-===============================
-Release date: 17-Jul-2003
-
-New features of Persistence
----------------------------
-
-The Persistent base class is a regular Python type implemented in C.
-It should be possible to create new-style classes that inherit from
-Persistent, and, thus, use all the new Python features introduced in
-Python 2.2 and 2.3.
-
-The __changed__() method on Persistent objects is no longer supported.
-
-New features in BTrees
-----------------------
-
-BTree, Bucket, TreeSet and Set objects are now iterable objects, playing
-nicely with the iteration protocol introduced in Python 2.2, and can
-be used in any context that accepts an iterable object.  As for Python
-dicts, the iterator constructed for BTrees and Buckets iterates
-over the keys.
-
->>> from BTrees.OOBTree import OOBTree
->>> b = OOBTree({"one": 1, "two": 2, "three": 3, "four": 4})
->>> for key in b: # iterates over the keys
-...    print key
-four
-one
-three
-two
->>> list(enumerate(b))
-[(0, 'four'), (1, 'one'), (2, 'three'), (3, 'two')]
->>> i = iter(b)
->>> i.next()
-'four'
->>> i.next()
-'one'
->>> i.next()
-'three'
->>> i.next()
-'two'
->>>
-
-As for Python dicts in 2.2, BTree and Bucket objects have new
-.iterkeys(), .iteritems(), and .itervalues() methods.  TreeSet and Set
-objects have a new .iterkeys() method.  Unlike as for Python dicts,
-these new methods accept optional min and max arguments to effect
-range searches.  While Bucket.keys() produces a list, Bucket.iterkeys()
-produces an iterator, and similarly for Bucket values() versus
-itervalues(), Bucket items() versus iteritems(), and Set keys() versus
-iterkeys().  The iter{keys,values,items} methods of BTrees and the
-iterkeys() method of Treesets also produce iterators, while their
-keys() (etc) methods continue to produce BTreeItems objects (a form of
-"lazy" iterator that predates Python 2.2's iteration protocol).
-
->>> sum(b.itervalues())
-10
->>> zip(b.itervalues(), b.iterkeys())
-[(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')]
->>>
-
-BTree, Bucket, TreeSet and Set objects also implement the __contains__
-method new in Python 2.2, which means that testing for key membership
-can be done directly now via the "in" and "not in" operators:
-
->>> "won" in b
-False
->>> "won" not in b
-True
->>> "one" in b
-True
->>>
-
-All old and new range-search methods now accept keyword arguments,
-and new optional excludemin and excludemax keyword arguments.  The
-new keyword arguments allow doing a range search that's exclusive
-at one or both ends (doesn't include min, and/or doesn't include
-max).
-
->>> list(b.keys())
-['four', 'one', 'three', 'two']
->>> list(b.keys(max='three'))
-['four', 'one', 'three']
->>> list(b.keys(max='three', excludemax=True))
-['four', 'one']
->>>
-
-Other improvements
-------------------
-
-The exceptions generated by write conflicts now contain the name of
-the conflicted object's class.  This feature requires support for the
-storage.  All the standard storages support it.
-
-What's new in ZODB3 3.2
-========================
-Release date: 08-Oct-2003
-
-Nothing has changed since release candidate 1.
-
-What's new in ZODB3 3.2 release candidate 1
-===========================================
-Release date: 01-Oct-2003
-
-Added a summary to the Doc directory.  There are several new documents
-in the 3.2 release, including "Using zdctl and zdrun to manage server
-processes" and "Running a ZEO Server HOWTO."
-
-Fixed ZEO's protocol negotiation mechanism so that a client ZODB 3.1
-can talk to a ZODB 3.2 server.
-
-Fixed a memory leak in the ZEO server.  The server was leaking a few
-KB of memory per connection.
-
-Fixed a memory leak in the ZODB object cache (cPickleCache).  The
-cache did not release two references to its Connection, causing a
-large cycle of objects to leak when a database was closed.
-
-Fixed a bug in the ZEO code that caused it to leak socket objects on
-Windows.  Specifically, fix the trigger mechanism so that both sockets
-created for a trigger are closed.
-
-Fixed a bug in the ZEO storage server that caused it to leave temp
-files behind.  The CommitLog class contains a temp file, but it was
-not closing the file.
-
-Changed the order of setuid() and setgid() calls in zdrun, so that
-setgid() is called first.
-
-Added a timeout to the ZEO test suite that prevents hangs.  The test
-suite creates ZEO servers with randomly assigned ports.  If the port
-happens to be in use, the test suite would hang because the ZEO client
-would never stop trying to connect.  The fix will cause the test to
-fail after a minute, but should prevent the test runner from hanging.
-
-The logging package was updated to include the latest version of the
-logging package from Python CVS.  Note that this package is only
-installed for Python 2.2.  In later versions of Python, it is
-available in the Python standard library.
-
-The ZEO1 directory was removed from the source distribution.  ZEO1 is
-not supported, and we never intended to include it in the release.
-
-What's new in ZODB3 3.2 beta 3
-==============================
-Release date: 23-Sep-2003
-
-Note: The changes listed for this release include changes also made in
-ZODB 3.1.x releases and ported to the 3.2 release.
-
-This version of ZODB 3.2 is not compatible with Python 2.1.  Early
-versions were explicitly designed to be compatible with Zope 2.6.
-That plan has been dropped, because Zope 2.7 is already in beta
-release.
-
-Several of the classes in ZEO and ZODB now inherit from object, making
-them new-style classes.  The primary motivation for the change was to
-make it easier to debug memory leaks.  We don't expect any behavior to
-change as a result.
-
-A new feature to allow removal of connection pools for versions was
-ported from Zope 2.6.  This feature is needed by Zope to avoid denial
-of service attacks that allow a client to create an arbitrary number
-of version pools.
-
-Fixed several critical ZEO bugs.
-
-- If several client transactions were blocked waiting for the storage
-  and one of the blocked clients disconnected, the server would
-  attempt to restart one of the other waiting clients.  Since the
-  disconnected client did not have the storage lock, this could lead
-  to deadlock.  It could also cause the assertion "self._client is
-  None" to fail.
-
-- If a storage server fails or times out between the vote and the
-  finish, the ZEO cache could get populated with objects that didn't
-  make it to the storage server.
-
-- If a client loses its connection to the server near the end of a
-  transaction, it is now guaranteed to get a ClientDisconnected error
-  even if it reconnects before the transaction finishes.  This is
-  necessary because the server will always abort the transaction.
-  In some cases, the client would never see an error for the aborted
-  transaction.
-
-- In tpc_finish(), reordered the calls so that the server's tpc_finish()
-  is called (and must succeed) before we update the ZEO client cache.
-
-- The storage name is now prepended to the sort key, to ensure a
-  unique global sort order if storages are named uniquely.  This
-  can prevent deadlock in some unusual cases.
-
-Fixed several serious flaws in the implementation of the ZEO
-authentication protocol.
-
-- The smac layer would accept a message without a MAC even after the
-  session key was established.
-
-- The client never initialized its session key, so it never checked
-  incoming messages or created MACs for outgoing messags.
-
-- The smac layer used a single HMAC instance for sending and receiving
-  messages.  This approach could only work if client and server were
-  guaranteed to process all messages in the same total order, which
-  could only happen in simple scenarios like unit tests.
-
-Fixed a bug in ExtensionClass when comparing ExtensionClass instances.
-The code could raise RuntimeWarning under Python 2.3, and produce
-incorrect results on 64-bit platforms.
-
-Fixed bug in BDBStorage that could lead to DBRunRecoveryErrors when a
-transaction was aborted after performing operations like commit
-version or undo that create new references to existing pickles.
-
-Fixed a bug in Connection.py that caused it to fail with an
-AttributeError if close() was called after the database was closed.
-
-The test suite leaves fewer log files behind, although it still leaves
-a lot of junk.  The test.py script puts each tests temp files in a
-separate directory, so it is easier to see which tests are causing
-problems.  Unfortunately, it is still to tedious to figure out why the
-identified tests are leaving files behind.
-
-This release contains the latest and greatest version of the
-BDBStorage.  This storage has still not seen testing in a production
-environment, but it represents the current best design and most recent
-code culled from various branches where development has occurred.
-
-The Tools directory contains a number of small improvements, a few new
-tools, and README.txt that catalogs the tools.  Many of the tools are
-installed by setup.py; those scripts will now have a #! line set
-automatically on Unix.
-
-Fixed bugs in Tools/repozo.py, including a timing-dependent one that
-could cause the following invocation of repozo to do a full backup when
-an incremental backup would have sufficed.
-
-A pair of new scripts from Jim Fulton can be used to synthesize
-workloads and measure ZEO performance:  see zodbload.py and
-zeoserverlog.py in the Tools directory.  Note that these require
-Zope.
-
-Tools/checkbtrees.py was strengthened in two ways:
-
-- In addition to running the _check() method on each BTree B found,
-  BTrees.check.check(B) is also run.  The check() function was written
-  after checkbtrees.py, and identifies kinds of damage B._check()
-  cannot find.
-
-- Cycles in the object graph no longer lead to unbounded output.
-  Note that preventing this requires remembering the oid of each
-  persistent object found, which increases the memory needed by the
-  script.
-
-What's new in ZODB3 3.2 beta 2
-==============================
-Release date: 16-Jun-2003
-
-Fixed critical race conditions in ZEO's cache consistency code that
-could cause invalidations to be lost or stale data to be written to
-the cache.  These bugs can lead to data loss or data corruption.
-These bugs are relatively unlikely to be provoked in sites with few
-conflicts, but the possibility of failure existed any time an object
-was loaded and stored concurrently.
-
-Fixed a bug in conflict resolution that failed to ghostify an object
-if it was involved in a conflict.  (This code may be redundant, but it
-has been fixed regardless.)
-
-The ZEO server was fixed so that it does not perform any I/O until all
-of a transactions' invalidations are queued.  If it performs I/O in the
-middle of sending invalidations, it would be possible to overlap a
-load from a client with the invalidation being sent to it.
-
-The ZEO cache now handles invalidations atomically.  This is the same
-sort of bug that is described in the 3.1.2b1 section below, but it
-affects the ZEO cache.
-
-Fixed several serious bugs in fsrecover that caused it to fail
-catastrophically in certain cases because it thought it had found a
-checkpoint (status "c") record when it was in the middle of the file.
-
-Two new features snuck into this beta release.
-
-The ZODB.transact module provides a helper function that converts a
-regular function or method into a transactional one.
-
-The ZEO client cache now supports Adaptable Persistence (APE).  The
-cache used to expect that all OIDs were eight bytes long.
-
-What's new in ZODB3 3.2 beta 1
-==============================
-Release date: 30-May-2003
-
-ZODB
-----
-
-Invalidations are now processed atomically.  Each transaction will see
-all the changes caused by an earlier transaction or none of them.
-Before this patch, it was possible for a transaction to see invalid
-data because it saw only a subset of the invalidations.  This is the
-most likely cause of reported BTrees corruption, where keys were
-stored in the wrong bucket.  When a BTree bucket splits, the bucket
-and the bucket's parent are both modified.  If a transaction sees the
-invalidation for the bucket but not the parent, the BTree in memory
-will be internally inconsistent and keys can be put in the wrong
-bucket.  The atomic invalidation fix prevents this problem.
-
-A number of minor reference count fixes in the object cache were
-fixed.  That's the cPickleCache.c file.
-
-It was possible for a transaction that failed in tpc_finish() to lose
-the traceback that caused the failure.  The transaction code was fixed
-to report the original error as well as any errors that occur while
-trying to recover from the original error.
-
-The "other" argument to copyTransactionsFrom() only needs to have an
-.iterator() method.  For convenience, change FileStorage's and
-BDBFullStorage's iterator to have this method, which just returns
-self.
-
-Mount points are now visible from mounted objects.
-
-Fixed memory leak involving database connections and caches.  When a
-connection or database was closed, the cache and database leaked,
-because of a circular reference involving the cache.  Fixed the cache
-to explicitly clear out its contents when its connection is closed.
-
-The ZODB cache has fewer methods.  It used to expose methods that
-could mutate the dictionary, which allowed users to violate internal
-invariants.
-
-ZConfig
--------
-
-It is now possible to configure ZODB databases and storages and ZEO
-servers using ZConfig.
-
-ZEO & zdaemon
--------------
-
-ZEO now supports authenticated client connections.  The default
-authentication protocol uses a hash-based challenge-response protocol
-to prove identity and establish a session key for message
-authentication.  The architecture is pluggable to allow third-parties
-to developer better authentication protocols.
-
-There is a new HOWTO for running a ZEO server.  The draft in this
-release is incomplete, but provides more guidance than previous
-releases.  See the file Doc/ZEO/howto.txt.
-
-
-The ZEO storage server's transaction timeout feature was refactored
-and made slightly more rebust.
-
-A new ZEO utility script, ZEO/mkzeoinst.py, was added.  This creates a
-standard directory structure and writes a configuration file with
-mostly default values, and a bootstrap script that can be used to
-manage and monitor the server using zdctl.py (see below).
-
-Much work was done to improve zdaemon's zdctl.py and zdrun.py scripts.
-(In the alpha 1 release, zdrun.py was called zdaemon.py, but
-installing it in <prefix>/bin caused much breakage due to the name
-conflict with the zdaemon package.)  Together with the new
-mkzeoinst.py script, this makes controlling a ZEO server a breeze.
-
-A ZEO client will not read from its cache during cache verification.
-This fix was necessary to prevent the client from reading inconsistent
-data.
-
-The isReadOnly() method of a ZEO client was fixed to return the false
-when the client is connected to a read-only fallback server.
-
-The sync() method of ClientStorage and the pending() method of a zrpc
-connection now do both input and output.
-
-The short_repr() function used to generate log messages was fixed so
-that it does not blow up creating a repr of very long tuples.
-
-Storages
---------
-
-FileStorage has a new pack() implementation that fixes several
-reported problems that could lead to data loss.
-
-Two small bugs were fixed in DemoStorage.  undoLog() did not handle
-its arguments correctly and pack() could accidentally delete objects
-created in versions.
-
-Fixed trivial bug in fsrecover that prevented it from working at all.
-
-FileStorage will use fsync() on Windows starting with Python 2.2.3.
-
-FileStorage's commit version was fixed.  It used to stop after the
-first object, leaving all the other objects in the version.
-
-BTrees
-------
-
-Trying to store an object of a non-integer type into an IIBTree
-or OIBTree could leave the bucket in a variety of insane states.  For
-example, trying
-
-    b[obj] = "I'm a string, not an integer"
-
-where b is an OIBTree.  This manifested as a refcount leak in the test
-suite, but could have been much worse (most likely in real life is that
-a seemingly arbitrary existing key would "go missing").
-
-When deleting the first child of a BTree node with more than one
-child, a reference to the second child leaked.  This could cause
-the entire bucket chain to leak (not be collected as garbage
-despite not being referenced anymore).
-
-Other minor BTree leak scenarios were also fixed.
-
-Tools
------
-
-New tool zeoqueue.py for parsing ZEO log files, looking for blocked
-transactions.
-
-New tool repozo.py (originally by Anthony Baxter) for performing
-incremental backups of Data.fs files.
-
-The fsrecover.py script now does a better job of recovering from
-errors the occur in the middle of a transaction record.  Fixed several
-bugs that caused partial or total failures in earlier versions.
-
-
-What's new in ZODB3 3.2 alpha 1
-===============================
-Release date: 17-Jan-2003
-
-Most of the changes in this release are performance and stability
-improvements to ZEO.  A major packaging change is that there won't be
-a separate ZEO release.  The new ZConfig is a noteworthy addtion (see
-below).
-
-ZODB
-----
-
-An experimental new transaction API was added.  The Connection class
-has a new method, setLocalTransaction().  ZODB applications can call
-this method to bind transactions to connections rather than threads.
-This is especially useful for GUI applications, which often have only
-one thread but multiple independent activities within that thread
-(generally one per window).  Thanks to Christian Reis for championing
-this feature.
-
-Applications that take advantage of this feature should not use the
-get_transaction() function.  Until now, ZODB itself sometimes assumed
-get_transaction() was the only way to get the transaction.  Minor
-corrections have been added.  The ZODB test suite, on the other hand,
-can continue to use get_transaction(), since it is free to assume that
-transactions are bound to threads.
-
-ZEO
----
-
-There is a new recommended script for starting a storage server.  We
-recommend using ZEO/runzeo.py instead of ZEO/start.py.  The start.py
-script is still available in this release, but it will no longer be
-maintained and will eventually be removed.
-
-There is a new zdaemon implementation.  This version is a separate
-script that runs an arbitrary daemon.  To run the ZEO server as a
-daemon, you would run "zdrun.py runzeo.py".  There is also a simple
-shell, zdctl.py, that can be used to manage a daemon.  Try
-"zdctl.py -p runzeo.py".
-
-There is a new version of the ZEO protocol in this release and a first
-stab at protocol negotiation.  (It's a first stab because the protocol
-checking supporting in ZODB 3.1 was too primitive to support anything
-better.)  A ZODB 3.2 ZEO client can talk to an old server, but a ZODB
-3.2 server can't talk to an old client.  It's safe to upgrade all the
-clients first and upgrade the server last.  The ZEO client cache
-format changed, so you'll need to delete persistent caches before
-restarting clients.
-
-The ZEO cache verification protocol was revised to require many fewer
-messages in cases where a client or server restarts quickly.
-
-The performance of full cache verification has improved dramatically.
-Measurements from Jim were somewhere in 2x-5x.  The
-implementation was fixed to use the very-fast getSerial() method on
-the storage instead of the comparatively slow load().
-
-The ZEO server has an optional timeout feature that will abort a
-connection that does not commit within a certain amount of time.  The
-timeout works by closing the socket the client is using, causing both
-client and server to abort the transaction and continue.  This is a
-drastic step, but can be useful to prevent a hung client or other bug
-from blocking a server indefinitely.
-
-A bug was fixed in the ZEO protocol that allowed clients to read stale
-cache data while cache verification was being performed.  The fixed
-version prevents the client from using the storage until after
-verification completes.
-
-The ZEO server has an experimental monitoring interface that reports
-usage statistics for the storage server including number of connected
-clients and number of transactions active and committed.  It can be
-enabled by passing the -m flag to runsvr.py.
-
-The ZEO ClientStorage no longer supports the environment variables
-CLIENT_HOME, INSTANCE_HOME, or ZEO_CLIENT.
-
-The ZEO1 package is still included with this release, but there is no
-longer an option to install it.
-
-BTrees
-------
-
-The BTrees package now has a check module that inspects a BTree to
-check internal invariants.  Bugs in older versions of the code code
-leave a BTree in an inconsistent state.  Calling BTrees.check.check()
-on a BTree object should verify its consistency.  (See the NEWS
-section for 3.1 beta 1 below to for the old BTrees bugs.)
-
-Fixed a rare conflict resolution problem in the BTrees that could
-cause an segfault when the conflict resolution resulted in any
-empty bucket.
-
-Installation
-------------
-
-The distutils setup now installs several Python scripts.  The
-runzeo.py and zdrun.py scripts mentioned above and several fsXXX.py
-scripts from the Tools directory.
-
-The test.py script does not run all the ZEO tests by default, because
-the ZEO tests take a long time to run.  Use --all to run all the
-tests.  Otherwise a subset of the tests, mostly using MappingStorage,
-are run.
-
-Storages
---------
-
-There are two new storages based on Sleepycat's BerkeleyDB in the
-BDBStorage package.  Barry will have to write more here, because I
-don't know how different they are from the old bsddb3Storage
-storages.  See Doc/BDBStorage.txt for more information.
-
-It now takes less time to open an existing FileStorage.  The
-FileStorage uses a BTree-based index that is faster to pickle and
-unpickle.  It also saves the index periodically so that subsequent
-opens will go fast even if the storage was not closed cleanly.
-
-Misc
-----
-
-The new ZConfig package, which will be used by Zope and ZODB, is
-included.  ZConfig provides a configuration syntax, similar to
-Apache's syntax.  The package can be used to configure the ZEO server
-and ZODB databases.  See the module ZODB.config for functions to open
-the database from configuration.  See ZConfig/doc for more info.
-
-The zLOG package now uses the logging package by Vinay Sajip, which
-will be included in Python 2.3.
-
-The Sync extension was removed from ExtensionClass, because it was not
-used by ZODB.
-
-What's new in ZODB3 3.1.4?
-==========================
-Release date: 11-Sep-2003
-
-A new feature to allow removal of connection pools for versions was
-ported from Zope 2.6.  This feature is needed by Zope to avoid denial
-of service attacks that allow a client to create an arbitrary number
-of version pools.
-
-A pair of new scripts from Jim Fulton can be used to synthesize
-workloads and measure ZEO performance:  see zodbload.py and
-zeoserverlog.py in the Tools directory.  Note that these require
-Zope.
-
-Tools/checkbtrees.py was strengthened in two ways:
-
-- In addition to running the _check() method on each BTree B found,
-  BTrees.check.check(B) is also run.  The check() function was written
-  after checkbtrees.py, and identifies kinds of damage B._check()
-  cannot find.
-
-- Cycles in the object graph no longer lead to unbounded output.
-  Note that preventing this requires remembering the oid of each
-  persistent object found, which increases the memory needed by the
-  script.
-
-What's new in ZODB3 3.1.3?
-==========================
-Release date: 18-Aug-2003
-
-Fixed several critical ZEO bugs.
-
-- If a storage server fails or times out between the vote and the
-  finish, the ZEO cache could get populated with objects that didn't
-  make it to the storage server.
-
-- If a client loses its connection to the server near the end of a
-  transaction, it is now guaranteed to get a ClientDisconnected error
-  even if it reconnects before the transaction finishes.  This is
-  necessary because the server will always abort the transaction.
-  In some cases, the client would never see an error for the aborted
-  transaction.
-
-- In tpc_finish(), reordered the calls so that the server's tpc_finish()
-  is called (and must succeed) before we update the ZEO client cache.
-
-- The storage name is now prepended to the sort key, to ensure a
-  unique global sort order if storages are named uniquely.  This
-  can prevent deadlock in some unusual cases.
-
-A variety of fixes and improvements to Berkeley storage (aka BDBStorage)
-were back-ported from ZODB 4.  This release now contains the most
-current version of the Berkeley storage code.  Many tests have been
-back-ported, but not all.
-
-Modified the Windows tests to wait longer at the end of ZEO tests for
-the server to shut down.  Before Python 2.3, there is no waitpid() on
-Windows, and, thus, no way to know if the server has shut down.  The
-change makes the Windows ZEO tests much less likely to fail or hang,
-at the cost of increasing the time needed to run the tests.
-
-Fixed a bug in ExtensionClass when comparing ExtensionClass instances.
-The code could raise RuntimeWarning under Python 2.3, and produce
-incorrect results on 64-bit platforms.
-
-Fixed bugs in Tools/repozo.py, including a timing-dependent one that
-could cause the following invocation of repozo to do a full backup when
-an incremental backup would have sufficed.
-
-Added Tools/README.txt that explains what each of the scripts in the
-Tools directory does.
-
-There were many small changes and improvements to the test suite.
-
-What's new in ZODB3 3.1.2 final?
-================================
-
-Fixed bug in FileStorage pack that caused it to fail if it encountered
-an old undo record (status "u").
-
-Fixed several bugs in FileStorage pack that could cause OverflowErrors
-for storages > 2 GB.
-
-Fixed memory leak in TimeStamp.laterThan() that only occurred when it
-had to create a new TimeStamp.
-
-Fixed two BTree bugs that were fixed on the head a while ago:
-
-   - bug in fsBTree that would cause byValue searches to end early.
-     (fsBTrees are never used this way, but it was still a bug.)
-
-   -  bug that lead to segfault if BTree was mutated via deletion
-      while it was being iterated over.
-
-What's new in ZODB3 3.1.2 beta 2?
-=================================
-
-Fixed critical race conditions in ZEO's cache consistency code that
-could cause invalidations to be lost or stale data to be written to
-the cache.  These bugs can lead to data loss or data corruption.
-These bugs are relatively unlikely to be provoked in sites with few
-conflicts, but the possibility of failure existed any time an object
-was loaded and stored concurrently.
-
-Fixed a bug in conflict resolution that failed to ghostify an object
-if it was involved in a conflict.  (This code may be redundant, but it
-has been fixed regardless.)
-
-The ZEO server was fixed so that it does not perform any I/O until all
-of a transactions' invalidations are queued.  If it performs I/O in the
-middle of sending invalidations, it would be possible to overlap a
-load from a client with the invalidation being sent to it.
-
-The ZEO cache now handles invalidations atomically.  This is the same
-sort of bug that is described in the 3.1.2b1 section below, but it
-affects the ZEO cache.
-
-Fixed several serious bugs in fsrecover that caused it to fail
-catastrophically in certain cases because it thought it had found a
-checkpoint (status "c") record when it was in the middle of the file.
-
-What's new in ZODB3 3.1.2 beta 1?
-=================================
-
-ZODB
-----
-
-Invalidations are now processed atomically.  Each transaction will see
-all the changes caused by an earlier transaction or none of them.
-Before this patch, it was possible for a transaction to see invalid
-data because it saw only a subset of the invalidations.  This is the
-most likely cause of reported BTrees corruption, where keys were
-stored in the wrong bucket.  When a BTree bucket splits, the bucket
-and the bucket's parent are both modified.  If a transaction sees the
-invalidation for the bucket but not the parent, the BTree in memory
-will be internally inconsistent and keys can be put in the wrong
-bucket.  The atomic invalidation fix prevents this problem.
-
-A number of minor reference count fixes in the object cache were
-fixed.  That's the cPickleCache.c file.
-
-It was possible for a transaction that failed in tpc_finish() to lose
-the traceback that caused the failure.  The transaction code was fixed
-to report the original error as well as any errors that occur while
-trying to recover from the original error.
-
-ZEO
----
-
-A ZEO client will not read from its cache during cache verification.
-This fix was necessary to prevent the client from reading inconsistent
-data.
-
-The isReadOnly() method of a ZEO client was fixed to return the false
-when the client is connected to a read-only fallback server.
-
-The sync() method of ClientStorage and the pending() method of a zrpc
-connection now do both input and output.
-
-The short_repr() function used to generate log messages was fixed so
-that it does not blow up creating a repr of very long tuples.
-
-Storages
---------
-
-FileStorage has a new pack() implementation that fixes several
-reported problems that could lead to data loss.
-
-Two small bugs were fixed in DemoStorage.  undoLog() did not handle
-its arguments correctly and pack() could accidentally delete objects
-created in versions.
-
-Fixed trivial bug in fsrecover that prevented it from working at all.
-
-FileStorage will use fsync() on Windows starting with Python 2.2.3.
-
-FileStorage's commit version was fixed.  It used to stop after the
-first object, leaving all the other objects in the version.
-
-BTrees
-------
-
-Trying to store an object of a non-integer type into an IIBTree
-or OIBTree could leave the bucket in a variety of insane states.  For
-example, trying
-
-    b[obj] = "I'm a string, not an integer"
-
-where b is an OIBTree.  This manifested as a refcount leak in the test
-suite, but could have been much worse (most likely in real life is that
-a seemingly arbitrary existing key would "go missing").
-
-When deleting the first child of a BTree node with more than one
-child, a reference to the second child leaked.  This could cause
-the entire bucket chain to leak (not be collected as garbage
-despite not being referenced anymore).
-
-Other minor BTree leak scenarios were also fixed.
-
-Other
------
-
-Comparing a Missing.Value object to a C type that provide its own
-comparison operation could lead to a segfault when the Missing.Value
-was on the right-hand side of the comparison operator.  The Missing
-class was fixed so that its coercion and comparison operations are
-safe.
-
-Tools
------
-
-Four tools are now installed by setup.py: fsdump.py, fstest.py,
-repozo.py, and zeopack.py.
-
-What's new in ZODB3 3.1.1 final?
-================================
-Release date: 11-Feb-2003
-
-Tools
------
-
-Updated repozo.py tool
-
-What's new in ZODB3 3.1.1 beta 2?
-=================================
-Release date: 03-Feb-2003
-
-The Transaction "hosed" feature is disabled in this release.  If a
-transaction fails during the tpc_finish() it is not possible, in
-general, to know whether the storage is in a consistent state.  For
-example, a ZEO server may commit the data and then fail before sending
-confirmation of the commit to the client.  If multiple storages are
-involved in a transaction, the problem is exacerbated: One storage may
-commit the data while another fails to commit.  In previous versions
-of ZODB, the database would set a global "hosed" flag that prevented
-any other transaction from committing until an administrator could
-check the status of the various failed storages and ensure that the
-database is in a consistent state.  This approach favors data
-consistency over availability.  The new approach is to log a panic but
-continue.  In practice, availability seems to be more important than
-consistency.  The failure mode is exceedingly rare in either case.
-
-The BTrees-based fsIndex for FileStorage is enabled.  This version of
-the index is faster to load and store via pickle and uses less memory
-to store keys.  We had intended to enable this feature in an earlier
-release, but failed to actually do it; thus, it's getting enabled as a
-bug fix now.
-
-Two rare bugs were fixed in BTrees conflict resolution.  The most
-probable symptom of the bug would have been a segfault.  The bugs
-were found via synthetic stress tests rather than bug reports.
-
-A value-based consistency checker for BTrees was added.  See the
-module BTrees.check for the checker and other utilities for working
-with BTrees.
-
-A new script called repozo.py was added.  This script, originally
-written by Anthony Baxter, provides an incremental backup scheme for
-FileStorage based storages.
-
-zeopack.py has been fixed to use a read-only connection.
-
-Various small autopack-related race conditions have been fixed in the
-Berkeley storage implementations.  There have been some table changes
-to the Berkeley storages so any storage you created in 3.1.1b1 may not
-work.  Part of these changes was to add a storage version number to
-the schema so these types of incompatible changes can be avoided in
-the future.
-
-Removed the chance of bogus warnings in the FileStorage iterator.
-
-ZEO
----
-
-The ZEO version number was bumped to 2.0.2 on account of the following
-minor feature additions.
-
-The performance of full cache verification has improved dramatically.
-Measurements from Jim were somewhere in 2x-5x.  The
-implementation was fixed to use the very-fast getSerial() method on
-the storage instead of the comparatively slow load().
-
-The ZEO server has an optional timeout feature that will abort a
-connection that does not commit within a certain amount of time.  The
-timeout works by closing the socket the client is using, causing both
-client and server to abort the transaction and continue.  This is a
-drastic step, but can be useful to prevent a hung client or other bug
-from blocking a server indefinitely.
-
-If a client was disconnected during a transaction, the tpc_abort()
-call did not properly reset the internal state about the transaction.
-The bug caused the next transaction to fail in its tpc_finish().
-Also, any ClientDisconnected exceptions raised during tpc_abort() are
-ignored.
-
-ZEO logging has been improved by adding more logging for important
-events, and changing the logging level for existing messages to a more
-appropriate level (usually lower).
-
-What's new in ZODB3 3.1.1 beta 1?
-=================================
-Release date: 10-Dev-2002
-
-It was possible for earlier versions of ZODB to deadlock when using
-multiple storages.  If multiple transactions committed concurrently
-and both transactions involved two or more shared storages, deadlock
-was possible.  This problem has been fixed by introducing a sortKey()
-method to the transaction and storage APIs that is used to define an
-ordering on transaction participants.  This solution will prevent
-deadlocks provided that all transaction participants that use locks
-define a valid sortKey() method.  A warning is raised if a participant
-does not define sortKey().  For backwards compatibility, BaseStorage
-provides a sortKey() that uses __name__.
-
-Added code to ThreadedAsync/LoopCallback.py to work around a bug in
-asyncore.py: a handled signal can cause unwanted reads to happen.
-
-A bug in FileStorage related to object uncreation was fixed.  If an
-a transaction that created an object was undone, FileStorage could
-write a bogus data record header that could lead to strange errors if
-the object was loaded.  An attempt to load an uncreated object now
-raises KeyError, as expected.
-
-The restore() implementation in FileStorage wrote incorrect
-backpointers for a few corner cases involving versions and undo.  It
-also failed if the backpointer pointed to a record that was before the
-pack time.  These specific bugs have been fixed and new test cases
-were added to cover them.
-
-A bug was fixed in conflict resolution that raised a NameError when a
-class involved in a conflict could not be loaded.  The bug did not
-affect correctness, but prevent ZODB from caching the fact that the
-class was unloadable.  A related bug prevented spurious
-AttributeErrors when a class could not be loaded.  It was also fixed.
-
-The script Tools/zeopack.py was fixed to work with ZEO 2.  It was
-untested and had two silly bugs.
-
-Some C extensions included standard header files before including
-Python.h, which is not allowed.  They now include Python.h first,
-which eliminates compiler warnings in certain configurations.
-
-The BerkeleyDB based storages have been merged from the trunk,
-providing a much more robust version of the storages.  They are not
-backwards compatible with the old storages, but the decision was made
-to update them in this micro release because the old storages did not
-work for all practical purposes.  For details, see Doc/BDBStorage.txt.
-
-What's new in ZODB3 3.1 final?
-===============================
-Release date: 28-Oct-2002
-
-If an error occurs during conflict resolution, the store will silently
-catch the error, log it, and continue as if the conflict was
-unresolvable.  ZODB used to behave this way, and the change to catch
-only ConflictError was causing problems in deployed systems.  There
-are a lot of legitimate errors that should be caught, but it's too
-close to the final release to make the substantial changes needed to
-correct this.
-
-What's new in ZODB3 3.1 beta 3?
-===============================
-Release date: 21-Oct-2002
-
-A small extension was made to the iterator protocol.  The Record
-objects, which are returned by the per-transaction iterators, contain
-a new `data_txn` attribute.  It is None, unless the data contained in
-the record is a logical copy of an earlier transaction's data.  For
-example, when transactional undo modifies an object, it creates a
-logical copy of the earlier transaction's data.  Note that this
-provide a stronger statement about consistency than whether the data
-in two records is the same; it's possible for two different updates to
-an object to coincidentally have the same data.
-
-The restore() method was extended to take the data_txn attribute
-mentioned above as an argument.  FileStorage uses the new argument to
-write a backpointer if possible.
-
-A few bugs were fixed.
-
-The setattr slot of the cPersistence C API was being initialized to
-NULL.  The proper initialization was restored, preventing crashes in
-some applications with C extensions that used persistence.
-
-The return value of TimeStamp's __cmp__ method was clipped to return
-only 1, 0, -1.
-
-The restore() method was fixed to write a valid backpointer if the
-update being restored is in a version.
-
-Several bugs and improvements were made to zdaemon, which can be used
-to run the ZEO server.  The parent now forwards signals to the child
-as intended.  Pidfile handling was improved and the trailing newline
-was omitted.
-
-What's new in ZODB3 3.1 beta 2?
-===============================
-Release date: 4-Oct-2002
-
-A few bugs have been fixed, some that were found with the help of
-Neal Norwitz's PyChecker.
-
-The zeoup.py tool has been fixed to allow connecting to a read-only
-storage, when the --nowrite option is given.
-
-Casey Duncan fixed a few bugs in the recent changes to undoLog().
-
-The fstest.py script no longer checks that each object modified in a
-transaction has a serial number that matches the transaction id.
-This invariant is no longer maintained; several new features in the
-3.1 release depend on it.
-
-The ZopeUndo package was added.  If ZODB3 is being used to run a ZEO
-server that will be used with Zope, it is usually best if the server
-and the Zope client don't share any software.  The Zope undo
-framework, however, requires that a Prefix object be passed between
-client and server.  To support this use, ZopeUndo was created to hold
-the Prefix object.
-
-Many bugs were fixed in ZEO, and a couple of features added.  See
-`ZEO-NEWS.txt` for details.
-
-The ZODB guide included in the Doc directory has been updated.  It is
-still incomplete, but most of the references to old ZODB packages have
-been removed.  There is a new section that briefly explains how to use
-BTrees.
-
-The zeoup.py tool connects using a read-only connection when --nowrite
-is specifified.  This feature is useful for checking on read-only ZEO
-servers.
-
-What's new in ZODB3 3.1 beta 1?
-===============================
-Release date: 12-Sep-2002
-
-We've changed the name and version number of the project, but it's
-still the same old ZODB.  There have been a lot of changes since the
-last release.
-
-New ZODB cache
---------------
-
-Toby Dickenson implemented a new Connection cache for ZODB.  The cache
-is responsible for pointer swizzling (translating between oids and
-Python objects) and for keeping recently used objects in memory.  The
-new cache is a big improvement over the old cache.  It strictly honors
-its size limit, where size is specified in number of objects, and it
-evicts objects in least recently used (LRU) order.
-
-Users should take care when setting the cache size, which has a
-default value of 400 objects.  The old version of the cache often held
-many more objects than its specified size.  An application may not
-perform as well with a small cache size, because the cache no longer
-exceeds the limit.
-
-Storages
---------
-
-The index used by FileStorage was reimplemented using a custom BTrees
-object.  The index maps oids to file offsets, and is kept in memory at
-all times.  The new index uses about 1/4 the memory of the old,
-dictionary-based index.  See the module ZODB.fsIndex for details.
-
-A security flaw was corrected in transactionalUndo().  The transaction
-ids returned by undoLog() and used for transactionalUndo() contained a
-file offset.  An attacker could construct a pickle with a bogus
-transaction record in its binary data, deduce the position of the
-pickle in the file from the undo log, then submit an undo with a bogus
-file position that caused the pickle to get written as a regular data
-record.  The implementation was fixed so that file offsets are not
-included in the transaction ids.
-
-Several storages now have an explicit read-only mode.  For example,
-passing the keyword argument read_only=1 to FileStorage will make it
-read-only.  If a write operation is performed on a read-only storage,
-a ReadOnlyError will be raised.
-
-The storage API was extended with new methods that support the Zope
-Replication Service (ZRS), a proprietary Zope Corp product.  We expect
-these methods to be generally useful.  The methods are:
-
-    - restore(oid, serialno, data, version, transaction)
-
-      Perform a store without doing consistency checks.  A client can
-      use this method to provide a new current revision of an object.
-      The ``serialno`` argument is the new serialno to use for the
-      object, not the serialno of the previous revision.
-
-    - lastTransaction()
-
-      Returns the transaction id of the last committed transaction.
-
-    - lastSerial(oid)
-
-      Return the current serialno for ``oid`` or None.
-
-    - iterator(start=None, stop=None)
-
-      The iterator method isn't new, but the optional ``start`` and
-      ``stop`` arguments are.  These arguments can be used to specify
-      the range of the iterator -- an inclusive range [start, stop].
-
-FileStorage is now more cautious about creating a new file when it
-believes a file does not exist.  This change is a workaround for bug
-in Python versions upto and including 2.1.3.  If the interpreter was
-builtin without large file support but the platform had it,
-os.path.exists() would return false for large files.  The fix is to
-try to open the file first, and decide whether to create a new file
-based on errno.
-
-The undoLog() and undoInfo() methods of FileStorage can run
-concurrently with other methods.  The internal storage lock is
-released periodically to give other threads a chance to run.  This
-should increase responsiveness of ZEO clients when used with ZEO 2.
-
-New serial numbers are assigned consistently for abortVersion() and
-commitVersion().  When a version is committed, the non-version data
-gets a new serial number.  When a version is aborted, the serial
-number for non-version data does not change.  This means that the
-abortVersion() transaction record has the unique property that its
-transaction id is not the serial number of the data records.
-
-
-Berkeley Storages
------------------
-
-Berkeley storage constructors now take an optional `config` argument,
-which is an instance whose attributes can be used to configure such
-BerkeleyDB policies as an automatic checkpointing interval, lock table
-sizing, and the log directory.  See bsddb3Storage/BerkeleyBase.py for
-details.
-
-A getSize() method has been added to all Berkeley storages.
-
-Berkeley storages open their environments with the DB_THREAD flag.
-
-Some performance optimizations have been implemented in Full storage,
-including the addition of a helper C extension when used with Python
-2.2.  More performance improvements will be added for the ZODB 3.1
-final release.
-
-A new experimental Autopack storage was added which keeps only a
-certain amount of old revision information.  The concepts in this
-storage will be folded into Full and Autopack will likely go away in
-ZODB 3.1 final.  ZODB 3.1 final will also have much improved Minimal
-and Full storages, which eliminate Berkeley lock exhaustion problems,
-reduce memory use, and improve performance.
-
-It is recommended that you use BerkeleyDB 4.0.14 and PyBSDDB 3.4.0
-with the Berkeley storages.  See bsddb3Storage/README.txt for details.
-
-
-BTrees
-------
-
-BTrees no longer ignore exceptions raised when two keys are compared.
-
-Tim Peters fixed several endcase bugs in the BTrees code.  Most
-importantly, after a mix of inserts and deletes in a BTree or TreeSet, it
-was possible (but unlikely) for the internal state of the object to become
-inconsistent.  Symptoms then varied; most often this manifested as a
-mysterious failure to find a key that you knew was present, or that
-tree.keys() would yield an object that disgreed with the tree about
-how many keys there were.
-
-If you suspect such a problem, BTrees and TreeSets now support a ._check()
-method, which does a thorough job of examining the internal tree pointers
-for consistency.  It raises AssertionError if it finds any problems, else
-returns None.  If ._check() raises an exception, the object is damaged,
-and rebuilding the object is the best solution.  All known ways for a
-BTree or TreeSet object to become internally inconsistent have been
-repaired.
-
-Other fixes include:
-
-- Many fixes for range search endcases, including the "range search bug:"
-  If the smallest key S in a bucket in a BTree was deleted, doing a range
-  search on the BTree with S on the high end could claim that the range
-  was empty even when it wasn't.
-
-- Zope Collector #419:  repaired off-by-1 errors and IndexErrors when
-  slicing BTree-based data structures.  For example,
-  an_IIBTree.items()[0:0] had length 1 (should be empty) if the tree
-  wasn't empty.
-
-- The BTree module functions weightedIntersection() and weightedUnion()
-  now treat negative weights as documented.  It's hard to explain what
-  their effects were before this fix, as the sign bits were getting
-  confused with an internal distinction between whether the result
-  should be a set or a mapping.
-
-ZEO
-----
-
-For news about ZEO2, see the file ZEO-NEWS.txt.
-
-This version of ZODB ships with two different versions of ZEO.  It
-includes ZEO 2.0 beta 1, the recommended new version.  (ZEO 2 will
-reach final release before ZODB3.)  The ZEO 2.0 protocol is not
-compatible with ZEO 1.0, so we have also included ZEO 1.0 to support
-people already using ZEO 1.0.
-
-Other features
---------------
-
-When a ConflictError is raised, the exception object now has a
-sensible structure, thanks to a patch from Greg Ward.  The exception
-now uses the following standard attributes: oid, class_name, message,
-serials.  See the ZODB.POSException.ConflictError doc string for
-details.
-
-It is now easier to customize the registration of persistent objects
-with a transaction.  The low-level persistence mechanism in
-cPersistence.c registers with the object's jar instead of with the
-current transaction.  The jar (Connection) then registers with the
-transaction.  This redirection would allow specialized Connections to
-change the default policy on how the transaction manager is selected
-without hacking the Transaction module.
-
-Empty transactions can be committed without interacting with the
-storage.  It is possible for registration to occur unintentionally and
-for a persistent object to compensate by making itself as unchanged.
-When this happens, it's possible to commit a transaction with no
-modified objects.  The change allows such transactions to finish even
-on a read-only storage.
-
-Two new tools were added to the Tools directory.  The ``analyze.py``
-script, based on a tool by Matt Kromer, prints a summary of space
-usage in a FileStorage Data.fs.  The ``checkbtrees.py`` script scans a
-FileStorage Data.fs.  When it finds a BTrees object, it loads the
-object and calls the ``_check`` method.  It prints warning messages
-for any corrupt BTrees objects found.
-
-Documentation
--------------
-
-The user's guide included with this release is still woefully out of date.
-
-Other bugs fixed
-----------------
-
-If an exception occurs inside an _p_deactivate() method, a traceback
-is printed on stderr.  Previous versions of ZODB silently cleared the
-exception.
-
-ExtensionClass and ZODB now work correctly with a Python debug build.
-
-All C code has been fixed to use a consistent set of functions from
-the Python memory API.  This allows ZODB to be used in conjunction
-with pymalloc, the default allocator in Python 2.3.
-
-zdaemon, which can be used to run a ZEO server, more clearly reports
-the exit status of its child processes.
-
-The ZEO server will reinitialize zLOG when it receives a SIGHUP.  This
-allows log file rotation without restarting the server.
-
-What's new in StandaloneZODB 1.0 final?
-=======================================
-Release date: 08-Feb-2002
-
-All copyright notices have been updated to reflect the fact that the
-ZPL 2.0 covers this release.
-
-Added a cleanroom PersistentList.py implementation, which multiply
-inherits from UserDict and Persistent.
-
-Some improvements in setup.py and test.py for sites that don't have
-the Berkeley libraries installed.
-
-A new program, zeoup.py was added which simply verifies that a ZEO
-server is reachable.  Also, a new program zeopack.py was added which
-connects to a ZEO server and packs it.
-
-
-What's new in StandaloneZODB 1.0 c1?
-====================================
-Release Date: 25-Jan-2002
-
-This was the first public release of the StandaloneZODB from Zope
-Corporation.   Everything's new! :)
diff --git a/branches/bug1734/README.txt b/branches/bug1734/README.txt
deleted file mode 100644
index a779f11a..00000000
--- a/branches/bug1734/README.txt
+++ /dev/null
@@ -1,195 +0,0 @@
-ZODB 3.4
-========
-
-Introduction
-------------
-
-The ZODB package provides a set of tools for using the Zope Object
-Database (ZODB) in Python programs separately from Zope.  The tools
-you get are identical to the ones provided in Zope, because they come
-from the same source repository.  They have been packaged for use in
-non-Zope stand-alone Python applications.
-
-The components you get with the ZODB release are as follows:
-
-- Core ZODB, including the persistence machinery
-- Standard storages such as FileStorage
-- The persistent BTrees modules
-- ZEO
-- ZConfig -- a Zope configuration language
-- documentation
-
-Our primary development platforms are Linux and Windows 2000.  The
-test suite should pass without error on all of these platforms,
-although it can take a long time on Windows -- longer if you use
-ZoneAlarm.  Many particularly slow tests are skipped unless you pass
---all as an argument to test.py.
-
-Compatibility
--------------
-
-ZODB 3.4 requires Python 2.3.4 or later.  For best results, we recommend
-Python 2.3.5.
-
-The Zope 2.8 and X3 releases should be compatible with this version of ZODB.
-Note that Zope 2.7 and higher includes ZEO, so this package should only be
-needed to run a ZEO server.
-
-The ZEO server in ZODB 3.4 is currently incompatible with earlier
-versions of ZODB.  If you want to test the software, you must be
-running this release for both client and server.  A backwards
-compatibility mechanism will be provided in a later release.
-
-Prerequisites
--------------
-
-You must have Python installed.  If you've installed Python from RPM,
-be sure that you've installed the development RPMs too, since ZODB
-builds Python extensions.  If you have the source release of ZODB,
-you will need a C compiler.
-
-Installation
-------------
-
-ZODB is released as a distutils package.  To build it, run the setup
-script::
-
-    % python setup.py build
-
-To test the build, run the test script::
-
-    % python test.py
-
-For more verbose test output, append one or two '-v' arguments to this
-command.
-
-If all the tests succeeded, you can install ZODB using the setup
-script::
-
-    % python setup.py install
-
-This should now make all of ZODB accessible to your Python programs.
-
-Testing
--------
-
-ZODB comes with a large test suite that can be run from the source
-directory before ZODB is installed.  The simplest way to run the tests
-is::
-
-    % python test.py -v
-
-This command will run all the tests, printing a single dot for each
-test.  When it finishes, it will print a test summary.  The exact
-number of tests can vary depending on platform and available
-third-party libraries.::
-
-    Ran 1182 tests in 241.269s
-
-    OK
-
-The test script has many more options.  Use the ``-h`` or ``--help``
-options to see a file list of options.  The default test suite omits
-several tests that depend on third-party software or that take a long
-time to run.  To run all the available tests use the ``--all`` option.
-Running all the tests takes much longer.::
-
-    Ran 1561 tests in 1461.557s
-
-    OK
-
-
-History
--------
-
-The version numbering scheme for ZODB is complicated.  Starting with
-the ZODB 3.1 release, we tried to make it simpler.  Versions prior to
-3.1 had different names and different numbers.  This section describes
-the gory details.
-
-Historically, ZODB was distributed as a part of the Zope application
-server.  Jim Fulton's paper at the Python conference in 2000 described
-a version of ZODB he called ZODB 3, based on an earlier persistent
-object system called BoboPOS.  The earliest versions of ZODB 3 were
-released with Zope 2.0.
-
-Andrew Kuchling extracted ZODB from Zope 2.4.1 and packaged it for
-use by standalone Python programs.  He called this version
-"StandaloneZODB".  Andrew's guide to using ZODB is included in the Doc
-directory.  This version of ZODB was hosted at
-http://sf.net/projects/zodb.  It supported Python 1.5.2, and might
-still be of interest to users of this very old Python version.
-
-Zope Corporation released a version of ZODB called "StandaloneZODB
-1.0" in Feb. 2002.  This release was based on Andrew's packaging, but
-built from the same CVS repository as Zope.  It is roughly equivalent
-to the ZODB in Zope 2.5.
-
-Why not call the current release StandaloneZODB?  The name
-StandaloneZODB is a bit of a mouthful.  The standalone part of the
-name suggests that the Zope version is the real version and that this
-is an afterthought, which isn't the case.  So we're calling this
-release "ZODB".
-
-To make matters worse, we worked on a ZODB4 package for a while and
-made a couple of alpha releases.  We've now abandoned that effort,
-because we didn't have the resources to pursue while also maintaining
-ZODB(3).
-
-License
--------
-
-ZODB is distributed under the Zope Public License, an OSI-approved
-open source license.  Please see the LICENSE.txt file for terms and
-conditions.
-
-The ZODB/ZEO Programming Guide included in the documentation is a
-modified version of Andrew Kuchling's original guide, provided under
-the terms of the GNU Free Documentation License.
-
-
-More information
-----------------
-
-We maintain a Wiki page about all things ZODB, including status on
-future directions for ZODB.  Please see
-
-    http://www.zope.org/Wikis/ZODB
-
-and feel free to contribute your comments.  There is a Mailman mailing
-list in place to discuss all issues related to ZODB.  You can send
-questions to
-
-    zodb-dev@zope.org
-
-or subscribe at
-
-    http://lists.zope.org/mailman/listinfo/zodb-dev
-
-and view its archives at
-
-    http://lists.zope.org/pipermail/zodb-dev
-
-Note that Zope Corp mailing lists have a subscriber-only posting policy.
-
-Andrew's ZODB Programmers Guide is made available in several
-forms, including DVI and HTML.  To view it online, point your
-browser at the file Doc/guide/zodb/index.html
-
-
-Bugs and Patches
-----------------
-
-Bug reports and patches should be added to the Zope Collector, with
-topic "Database":
-
-    http://collector.zope.org/Zope
-
-
-..
-   Local Variables:
-   mode: indented-text
-   indent-tabs-mode: nil
-   sentence-end-double-space: t
-   fill-column: 70
-   End:
diff --git a/branches/bug1734/doc/ACKS b/branches/bug1734/doc/ACKS
deleted file mode 100644
index a7fff6b0..00000000
--- a/branches/bug1734/doc/ACKS
+++ /dev/null
@@ -1,42 +0,0 @@
-Acknowledgments
----------------
-
-This file lists people who have contribute code, bug fixes, ideas, and
-other support for ZODB.  Alas it is probably not complete.
-
-Steve Alexander
-Anthony Baxter
-John Belmonte
-Johan Dahlin
-Toby Dickenson
-Fred L. Drake, Jr.
-Casey Duncan
-Jon Dyte
-Martijn Faassen
-Jim Fulton
-Marius Gedminas
-Florent Guillaume
-Shane Hathaway
-Nicholas Henke
-Jeremy Hylton
-Matt Kromer
-Andrew Kuchling
-Andreas Jung
-Brian Lloyd
-Ken Manheimer
-Dieter Maurer
-Chris McDonough
-Gintautas Miliauskas
-Tim Peters
-Chris Petrilli
-Christian Reis
-Guido van Rossum
-Neil Schemenauer
-Tres Seaver
-Sidnei da Silva
-Evan Simpson
-Kapil Thangavelu
-Jens Vagelpohl
-Greg Ward
-Barry Warsaw
-Chris Withers
diff --git a/branches/bug1734/doc/Makefile b/branches/bug1734/doc/Makefile
deleted file mode 100644
index dba69a37..00000000
--- a/branches/bug1734/doc/Makefile
+++ /dev/null
@@ -1,36 +0,0 @@
-MKHOWTO=mkhowto
-
-MKHTML=$(MKHOWTO) --html --iconserver=. --split=4 --dvips-safe
-
-ZODBTEX = guide/gfdl.tex guide/introduction.tex guide/modules.tex \
-	  guide/prog-zodb.tex guide/storages.tex guide/transactions.tex \
-	  guide/zeo.tex guide/zodb.tex 
-
-default: pdf
-all:	 pdf ps html
-
-pdf:	storage.pdf zodb.pdf
-ps:	storage.ps zodb.ps
-
-html:	storage/storage.html zodb/zodb.html
-
-storage.pdf: storage.tex
-	$(MKHOWTO) --pdf $<
-
-storage.ps: storage.tex
-	$(MKHOWTO) --ps $<
-
-storage/storage.html: storage.tex
-	$(MKHTML) storage.tex
-
-zodb.pdf: $(ZODBTEX)
-	$(MKHOWTO) --pdf guide/zodb.tex
-
-zodb.ps: $(ZODBTEX)
-	$(MKHOWTO) --ps guide/zodb.tex
-
-zodb/zodb.html: $(ZODBTEX)
-	$(MKHTML) guide/zodb.tex
-
-clobber:
-	rm -rf storage.pdf storage.ps storage/ zodb.pdf zodb.ps zodb/
diff --git a/branches/bug1734/doc/README.txt b/branches/bug1734/doc/README.txt
deleted file mode 100644
index e5f17934..00000000
--- a/branches/bug1734/doc/README.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-ZODB Documentation
-==================
-
-Simple text files
------------------
-
-This is a brief summary of the text documentation included with ZODB.
-Most of the text actually uses the restructured text format.  The
-summary lists the title and path of each document.
-
-BerkeleyDB Storages for ZODB
-Doc/BDBStorage.txt
-
-Using zdctl and zdrun to manage server processes
-Doc/zdctl.txt
-
-ZEO Client Cache
-Doc/ZEO/cache.txt
-
-Running a ZEO Server HOWTO
-Doc/ZEO/howto.txt
-
-ZEO Client Cache Tracing
-Doc/ZEO/trace.txt
-
-Formatted documents
--------------------
-
-There are two documents written the Python documentation tools.
-
-  ZODB/ZEO Programming Guide
-    PDF:  zodb.pdf
-    HTML: zodb/zodb.html
-
-  ZODB Storage API
-    PDF:  storage.pdf
-    HTML: storage/storage.html
-
-The documents located here can be formatted using the mkhowto script
-which is part of the Python documentation tools.  The recommended use
-of this script is to create a symlink from some handy bin/ directory
-to the script, located in Doc/tools/mkhowto in the Python source
-distribution; that script will locate the various support files it
-needs appropriately.
-
-The Makefile contains the commands to produce both the PDF and HTML
-versions of the documents.
diff --git a/branches/bug1734/doc/ZConfig/Makefile b/branches/bug1734/doc/ZConfig/Makefile
deleted file mode 100644
index 671d436b..00000000
--- a/branches/bug1734/doc/ZConfig/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-# Rules to convert the documentation to a single PDF file.
-#
-# PostScript, HTML, and plain text output are also supported, though
-# PDF is the default.
-#
-# See the README.txt file for information on the mkhowto program used
-# to generate the formatted versions of the documentation.
-
-.PHONY:	default all html pdf ps text
-
-default:  pdf
-all:	  html pdf ps text
-
-html:   zconfig/zconfig.html
-pdf:	zconfig.pdf
-ps:	zconfig.ps
-text:   zconfig.txt
-
-zconfig/zconfig.html:  zconfig.tex schema.dtd xmlmarkup.perl
-	mkhowto --html $<
-
-zconfig.pdf:  zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --pdf $<
-
-zconfig.ps:  zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --postscript $<
-
-zconfig.txt: zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --text $<
-
-clean:
-	rm -f zconfig.l2h zconfig.l2h~
-
-clobber:  clean
-	rm -f zconfig.pdf zconfig.ps zconfig.txt
-	rm -rf zconfig
diff --git a/branches/bug1734/doc/ZConfig/README.txt b/branches/bug1734/doc/ZConfig/README.txt
deleted file mode 100644
index 99d2aed0..00000000
--- a/branches/bug1734/doc/ZConfig/README.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-The zconfig.tex document in this directory contains the reference
-documentation for the ZConfig package.  This documentation is written
-using the Python LaTeX styles.
-
-To format the documentation, get a copy of the Python documentation
-tools (the Doc/ directory from the Python sources), and create a
-symlink to the tools/mkhowto script from some convenient bin/
-directory.  You will need to have a fairly complete set of
-documentation tools installed on your platform; see
-
-    http://www.python.org/doc/current/doc/doc.html
-
-for more information on the tools.
-
-This documentation requires the latest version of the Python
-documentation tools from CVS.
diff --git a/branches/bug1734/doc/ZConfig/schema.dtd b/branches/bug1734/doc/ZConfig/schema.dtd
deleted file mode 100644
index 37d2ac3f..00000000
--- a/branches/bug1734/doc/ZConfig/schema.dtd
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--
-  *************************************************************************
-  Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE.
-  *************************************************************************
-
-  Please note that not all documents that conform to this DTD are
-  legal ZConfig schema.  The ZConfig reference manual describes many
-  constraints that are important to understanding ZConfig schema.
-  -->
-
-<!-- DTD for ZConfig schema documents. -->
-
-<!ELEMENT schema (description?, metadefault?, example?,
-                  import*,
-                  (sectiontype | abstracttype)*,
-                  (section | key | multisection | multikey)*)>
-<!ATTLIST schema
-          extends    NMTOKEN  #IMPLIED
-          prefix     NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          keytype    NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED>
-
-<!ELEMENT component (description?, (sectiontype | abstracttype)*)>
-<!ATTLIST component
-          prefix     NMTOKEN  #IMPLIED>
-
-<!ELEMENT import EMPTY>
-<!ATTLIST import
-          file       CDATA    #IMPLIED
-          package    NMTOKEN  #IMPLIED
-          src        CDATA    #IMPLIED>
-
-<!ELEMENT description (#PCDATA)*>
-<!ATTLIST description
-          format     NMTOKEN  #IMPLIED>
-
-<!ELEMENT metadefault (#PCDATA)*>
-<!ELEMENT example     (#PCDATA)*>
-
-<!ELEMENT sectiontype (description?, 
-                       (section | key | multisection | multikey)*)>
-<!ATTLIST sectiontype
-          name       NMTOKEN  #REQUIRED
-          prefix     NMTOKEN  #IMPLIED
-          keytype    NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          implements NMTOKEN  #IMPLIED
-          extends    NMTOKEN  #IMPLIED>
-
-<!ELEMENT abstracttype (description?)>
-<!ATTLIST abstracttype
-          name       NMTOKEN  #REQUIRED
-          prefix     NMTOKEN  #IMPLIED>
-
-<!ELEMENT default    (#PCDATA)*>
-<!ATTLIST default
-          key        CDATA    #IMPLIED>
-
-<!ELEMENT key (description?, metadefault?, example?, default*)>
-<!ATTLIST key
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no"
-          default    CDATA    #IMPLIED>
-
-<!ELEMENT multikey (description?, metadefault?, example?, default*)>
-<!ATTLIST multikey
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
-
-<!ELEMENT section (description?)>
-<!ATTLIST section
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          type       NMTOKEN  #REQUIRED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
-
-<!ELEMENT multisection (description?)>
-<!ATTLIST multisection
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          type       NMTOKEN  #REQUIRED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
diff --git a/branches/bug1734/doc/ZConfig/xmlmarkup.perl b/branches/bug1734/doc/ZConfig/xmlmarkup.perl
deleted file mode 100644
index 769a17ef..00000000
--- a/branches/bug1734/doc/ZConfig/xmlmarkup.perl
+++ /dev/null
@@ -1,59 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-# LaTeX2HTML support for the xmlmarkup package.  Doesn't do indexing.
-
-package main;
-
-
-sub do_cmd_element{
-    local($_) = @_;
-    my $name = next_argument();
-    return "<tt class='element'>$name</tt>" . $_;
-}
-
-sub do_cmd_attribute{
-    local($_) = @_;
-    my $name = next_argument();
-    return "<tt class='attribute'>$name</tt>" . $_;
-}
-
-sub do_env_attributedesc{
-    local($_) = @_;
-    my $name = next_argument();
-    my $valuetype = next_argument();
-    return ("\n<dl class='macrodesc'>"
-            . "\n<dt><b><tt class='macro'>$name</tt></b>"
-            . "&nbsp;&nbsp;&nbsp;($valuetype)"
-            . "\n<dd>"
-            . $_
-            . "</dl>");
-}
-
-sub do_env_elementdesc{
-    local($_) = @_;
-    my $name = next_argument();
-    my $contentmodel = next_argument();
-    return ("\n<dl class='elementdesc'>"
-            . "\n<dt class='start-tag'><tt>&lt;"
-            . "<b class='element'>$name</b>&gt;</tt>"
-            . "\n<dd class='content-model'>$contentmodel"
-            . "\n<dt class='endtag'><tt>&lt;/"
-            . "<b class='element'>$name</b>&gt;</tt>"
-            . "\n<dd class='descrition'>"
-            . $_
-            . "</dl>");
-}
-
-1;				# Must end with this, because Perl is bogus.
diff --git a/branches/bug1734/doc/ZConfig/xmlmarkup.sty b/branches/bug1734/doc/ZConfig/xmlmarkup.sty
deleted file mode 100644
index 6650f319..00000000
--- a/branches/bug1734/doc/ZConfig/xmlmarkup.sty
+++ /dev/null
@@ -1,38 +0,0 @@
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%
-% Copyright (c) 2003 Zope Corporation and Contributors.
-% All Rights Reserved.
-%
-% This software is subject to the provisions of the Zope Public License,
-% Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-% THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-% WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-% WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-% FOR A PARTICULAR PURPOSE.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Define some simple markup for the LaTeX command documentation:
-
-\ProvidesPackage{xmlmarkup}
-\RequirePackage{python}      % fulllineitems environment
-
-\newcommand{\element}[1]{\code{#1}}
-\newcommand{\attribute}[1]{\code{#1}}
-
-% \begin{elementdesc}{type}{content-model}
-\newenvironment{elementdesc}[2]{
-  \begin{fulllineitems}
-    \item[\code{\textless{\bfseries #1}\textgreater}]
-    \code{#2}
-    \item[\code{\textless/{\bfseries #1}\textgreater}]
-    \index{#1 element@\py@idxcode{#1} element}
-    \index{elements!#1@\py@idxcode{#1}}
-}{\end{fulllineitems}}
-
-% \begin{attributedesc}{name}{content-type}
-\newenvironment{attributedesc}[2]{
-  \begin{fulllineitems}
-    \item[\code{\bfseries#1}{\quad(#2)}]
-    \index{#1@\py@idxcode{#1}}
-}{\end{fulllineitems}}
diff --git a/branches/bug1734/doc/ZConfig/zconfig.pdf b/branches/bug1734/doc/ZConfig/zconfig.pdf
deleted file mode 100644
index 3d0c0849f9d02bf5d8c1f9cfd4b4fa0155d19ae8..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 94001
zcmce-WmsIx)-8-X!JR;30Rl91Hy+%AySrPkAi>=w!6CsRxVyV2xD(t-Ah-q1?Ih>C
zd!K#I{Z96KpXcKTYjr=Xy5^cSYt|T}X3;5%Nw9!`5HtvY4Pb9<4dCNLW0f_tvv9Tq
zfH)!CfS-S8tP)l>&Ss7PRtXy;XEQM~6MIuLGywrLCuc`9BU?1Lg##@e`$bNCpR0=T
z_ZY(%71T`)?6&@dk7qd!IZF=$sVCkL>YUQLOI_}}N`8zmk#e!2eE~wieEKfA$LW5Q
z2OpK~j>bRtI5g0dr+Z{@CXC&l`>kazjk(2lQtz99Kz#MK$40p>__~Xa!(I?DKj%{(
zxixkcxXH^C3KEg7GVJD27SR!5&3_+1r;40FQ)J4P-jp|WuJUa_E7rIF?dSmBQ?+kC
z!!L>HO(J?q-0bewkRfCc@|4M~a1!5l?fmW|Yk+`S>?Fc%IMCAo)ILd^?aw}J=y)2K
zun~o;NX*!8h@-#OqiTdH!YM=`&}5_=&wS-!`gnpMuh$h1qu`>xHS&r5m=umQfJ*oa
zKW;$@SO*5vUo^+}m=)i|jtd!&D%P(Ik6KBrY4I4C(S&Cwr$^S|!<z)KcYH?_Y3#E<
zFe<B`I;lj3A(kwZs|1UU6Lf8MG&F4ep{+x@;_){4qGN1-^6dho8~cTn*qh${$=v+m
zxRSl@Jzq@65{DJZ<Ndd6I2oK9if_W{ARZjHBaZ^Za4X?(NI_p((XAc<sT*}1XPW)t
zF2oo09kk|3C>7+(D=G$X9!&_EP>Z}kb<4`t%=ns-$36KL2_9(UE*1+{Gme5Pr;TOW
z>UsV}TrK&faznkXhHO}V7+%)%()WrDULNn%vJm9EYb9r8D#P!Cd!|*T-s!zHuWo1e
zX>A_a|JIMy@4jaszuIw9v;UD@RO|40+!ONIyen=ltLKDdlaT$lK3^QwwmO9&O2c<<
zUwt{tzglXzv})EOymZquUwbi{Y1@g%hL2F3c+~Adwv)R2_LGnwu|q0Olx5Wf>~P)E
z8+uwBo{(@lZu>NgnMsw!(y_gd%;r8{<&_KIiM2Ge1g_9c1eNKM%}H*vt<B<w4<#3z
z+?c`y^LXyAGFP(O-3n%3bncS8Fu@uxctyJOvJy2I#>|fnZ{Io&!voT&ijG8<Or3=p
z_43=33Ox*hFT9^R2QI1N0RA6hh#;6#ov<sfR!6mJdw8OWyTx>HVnXgi9puSI$9VNE
zCwf{=N8J%#1odTM!^H4ThN8pHN99>k*(stdh+^`<Yu>fMZsxBPtrcKNSJA8xQ(Mqv
zyEf>d(wR0HL8wON>%I8Ng_lsq_we`|XBE7<yRfLO@T=dbh1YfszNvj`Te=tjegu>2
zg4a&@;F{EduVl{kFlF0zhnhW#=*3jHBb-%g%X?3`5}f3_#kZHwYQN}vo|U(~M`4tT
z4xpYDny~jfwnezwREDEkB5Ty8sfNwW7z_qWhx3oqQV^tQqRZ{34x1ZU8zp&5Ns3;G
zY_T0<mu9q`F}`&@T&ioGRpbh*Hy0Q_t{phGa3#=HP*-yszZ{nk-%5Rw{ymmn$cj}o
zpyjhTNAD_9+m`U>KC*M(itNt~_sK`VE%jXnoasp&y<_H7u9EuC!ssK!_Udg>`;F~-
z7uz7qwzoH=<5F!EC^Wg~F5@3e3~z|<2o<vZk&&W9%E@b}T*kvhYeuh%-d8XLf7GVb
zXh#tv{WQHyo0p~TV*vlTyiAnnWg^|<FMuL238oaif1_9Vb61#jxU4J91e5@7_V59r
z8|NvxJNycD+h9k#xR#Hmyz1OReg|*IIhRr}EnYEiJDCeke&n}Bj*L;i8*I!A9XQB&
zJ9e+Od@m43eK}*dNw#}_^x@k|5<d$fexEYAYYPqs;}?^<8AY4r>`zPqIgB6dtu3F*
zX87D2cj_}>?vfzV-8}H&9Z0o=5>Qm9TA(_7xE6N5gd~XRpArEPo91#Wu8U`x6sL<B
z_TRk1PZ;VEM}3WNd>e^dM~_Y!@~Qfuqkii~^Ajs(i>nj<iP28>L(*8)%_`_<-0zi)
zvm6f2*FqsBEOh(?JEG+)DV$*(s`MClEAtZ%rCZ8Zp71OUCHaO9$=&7|YJ?^q7e_<!
z-zc#!T`i_s$zLrOZain%xqwfsW7wFcZ@+a`4GLqjI()GgScLZdoz+?2M_kJ;lFB}B
zZ}k1ch9_hhfeoVxsQBw`ywuU!>-&T{EAA}ZBtGrm@Sb)T@4^#O01-PCuVPP{V7yFs
z__NO05i|o1qtS7FgOB|fq>{c+56N-KlBc;ivEehySmw~%#gj39wOH;r@XcqgnYP*J
z%in0%mCc@eGLhAx6crn1W6)Z=cSBV5VfW0;&h#Ja4fGDWR-u9av}UQgJD34j6^$&+
z&{#$7?VQc*oSgta7ByuwR%J6MdlyF&GbaG(*B5eTrdCEG_HF<jHs}|eATWT72dsw%
zZSDwd2Y_(?dQ8~P&K~+JIsnj*$CLq}pT7qHvH$wS@$2&QVE~Bh*C*~@7oMLN@UM2@
zU+uuZ+JS#P3I6pY_}7z=Ur$1Q8VG>=cr#@H<X1b$uXd1M{pms1UBEva@YAk;uG+sm
z@Y7~ARuOwgQ|NmA^ZnSMZ>DT!;tbGX=imUc@o)n;*dRb22oHdRoejVN0t4AWT+nA!
z0IX`tQosH&SUNj9@UpVHy1D{A>>bR2Cib?hjDPO>hi!kOYc4PV!ov;(v2jBE2LW=h
zfdK4mARq|zqhl9i=bytTYi0KWja3*5LI6Lkd2X+24`3AoFgQSmm5mL|2!KM4KfV1A
zzx+)TxY&RYXxAWa5Ri=n0O8^Rf_{$t|JH;bWAZml;N}GKu(Jao9Na)Kh!X(j-~vK8
zpr1LpfFRDFWAJ;ffc(`JKZ5XoVFx?Z4lZs07|a8NK;r@ejSn#B$4vN5f3W}6jvul8
zH|+Qk6<~HCJM;<$L!*I{2O9JcAcT|s_if<#s|`OU*59z<pJM`o+5wFT5H|$K&Iy9r
z1%<^Bp8p3se$4Fug&k~cKz8WN2k~$Mxj3M9utCR$`}h3;V*86fekP&6VaJcL;Nk{y
zLgN$~qd+i}1c14@fE*Cc-}VIv^j91HezpK}K;It>og<ta&};$udF2Fg|Go`>O&34Y
z@89slKjH$+4xJO6P+xFB;}Yr`=!Js^^8aAQ-<%>K==gJUK*s<A26A$9LFWk@^fL$m
zW(NVG1nu_?`D@Pjxd8ny{PAO6KywzDg9iwP=8PY?gOmGjIOMN6<L3(aH|&7MCy0j|
znn$5A!4A!*KVpN83&;)O_<dh+{v|$tF28@%1}-ij8<+zcmp{h=nm0IrJipe_-^$@2
z?!VgbW9|PNHvA(#L7ePRUqF{M=y*W5p}ygQ0J;Chng;%BzW50~{)Qbt#sj*dgE*kM
z9Ev44p-2dN4`gHi{hamhHvIj30fNRQ7>bd=P?W(Ay@DWYKoI!%a~AloIqN6v`Wt@u
z=XgNpD+t;>6lw54rw0hc4Fx^Fk8;3&O<O-<-`_9;Ivmh!4dMK;vO+C^W-jPL0?k^#
zJvsi(f<MQl1Qc5Tgl7`aY5NP1vPwW_&o4|1y#)h)0b1xSI{*r-f8bc?{g1r$&wFUj
z_=TCFXpU9U(cVPG42n&m;9X1tz^ZEI<_!2V#^v~PDEvd20<&|o{jM@KqGcPsRgd}5
z-eK&Q6Nb#)61Tc+zxV=<wlQaM1&JXAT`{`s9ca`<fu+TB%D_N5_l`HU%?m}5+<`_3
zUtBr*+eYFu*za4w6W4J343s#%m+sj5goi;y!R8ldJ((Gl5661ghhWyoz~!@vn9QL}
za>gq`!S66HKGZ%Doqr7O5nOE;sp56_;Qrjc7us%3wgSeZ_g=-z9`h8q_NjKgm*l3r
zEw`3u*Ozhoc6lvaec7JrVHE*U%?Qu57<xzl{%w*$T_4u0@iH3*Uo7G?4QbZMoH@PC
zRj1eVS9S;~zPEH8x-atPHcVE}#P3oj!-)_b%TLoGeB44|t9$gr)vMovzKd?dMpSKv
zk}>3F6Tx@uQpNzL3@#)ouI%D+M+Y`afjs`#W40h&)v2kk?wLWO2gPo-tdB3GP&$eZ
zjr97nlf^1b_|f?Uz47*S;CmQ2##R%<iEgRvzF+ky2uA9m7To#Gnx7mZDx7ySFY8fs
zb8o-AKR#Kve@|QGMDyj^h}U|}f*3!5O}`cExDF=MclBIP=TRRsm^tR~COjDtBpyG@
zEtNl-wa(wg;N(b+f3OV)hat6^BJf@0yp=Kh+qZLmr8l<E6X3{~IX{1y8sFaW5`8W$
zx1my?+RV!l5q!KdJ#kRw`$=YfE1i^}u^w}YasOgc&AWnBn{O$Mo8-9tpu8*+>HTYR
zgK>#TgcxG6@H4OS=E=4ly=b@%OK&^!XRN!$+*6g4y$N&8bZCdP+Mcx1@`9x(%&M;s
z&zkEmdlu+ACJbSat?K4A)Cbxqd7jVUcqq|r9=Nhe1x;lxt~;A^q_jnmC)Fu>z2GRy
z;ASmGdY%GNDX<Ucwv^#!%8BBVEumu}V}<V{9njC<BfTBn%vG&EG3RaJ&@3mV7Fh1Y
zF(gqaD<6=*kIM<#cJ6lkJP;ESXJ)=6vQp=@MJe(it%8ELGc3vLlA6pZN($&KobMzh
zrq_fI1!4Luma9H4ix*lil)$>gM&JnJR&_x{33?{ZDoyEtN&)imd#$tlReB<)=6m4x
zE=KHG>73L^5wc-#IDdN#Nrj&5@`z_B*NOyL?>wH{FsszKbo;|~lesD(^yrOU0pVtA
zzKwXIP?-wn(wD!`_Zn3wG<nOrOMZY3+0DXG^_yl|Cfhf>Q@*g!_H71P`|Wx9zszuW
z&`$mw`NqvW$#R2T!KE%$7d<flz)B5E!;h1E&lVFS%PxFst%P~_ZHet(>Pvtr94sTP
zmIR%e2H~^WZ7)w_Wqjdz(cY1kxP;OY(PY=CwCjVI=_{GE94{0gXO1v+KXP500FIl|
zCC3$g>B}vF7|iw7&&7dFPiZLkWokGQre<j`B)mSuu@2@Qj^HS2Jn`WqF!4VIEp7@g
zn0sb!g2&y8D~b!p52=q%NJ!>j)8o*OV@>?wUN-eP+=|=;Ot%Nh@;IO%h7M%-nF@9=
z#vcc;bpysz0k83TRk1^8Q#5wVJK(8FO!eR9+Bfr@-qDh!i>q&<_V04Dfe3I`$J5f}
z`Oe?78Zu&jKS{<qq~fA_d{K6`!`|7`1IH3M?3DjFxDAI*pfAEQT%!f+h&KJM&`Mq(
zn<N}|#xZ3F9;V48BgtpP_OX4*YdIe!=HbRy+A`^5#OR4qkd*DwqX~xTip%qZM9mk$
zQTX`Q=5}sGNQ4+m5-es<pJ1M_9m7w*A_L1jrNNOcGF8oMpAs!93O5&#d-~jpy#@>0
zt%wk2!9UNTJ~&T-8dp{C9g89kjJUCvJjC;D+M~099##2Cy0_sKITQ&ywg9A8uX;1_
zriRl?*(+)$#ac0%4I2GUcl<@CzwM}6;jQwib=^f&pQfD2pd#}=kDm@{Zr~ZPmN|Q$
z2<mWHQF!CuWs0M$5y1Unhx}Ptb%Axa-^dhkx}2nKstAkR=V`U=7?eH6Q9ElpA__<?
zlIoq)Hov%m3H>eZGlJrBeU^S(5%ZR1O*Ea8VV#Kat8VrHepqM1(~Bg=<x6^en~YQW
zz<qUNyf1WJCHGhey|iBMZ9^hkw)mG+sTEJqhIFd~)Gdcm2{qRAMi1xHmnp&eXT>Jv
zt^0suK)(a*YH@6D1^VI;jN*fdib6Zyr+aH0UM0z%PjDkI8kk39L=0u;IFYgiJy-lG
z9SuWZ+ehN8p7aX=K!#!;{olcujhClgYL01que;lo7B*%DC<@usBpTsM?p84~c}Vqg
ztEY2=CQNZD{6Zr<7=#N^w`@9dSlPNboJsSfnO&CHQ2XUciFRz0jcnvBUnj9rXfQY~
z;G+cIc?2uRzqGH1!Iqf7X%VTcXIK{@AR2k2=SkJCw2^-(LG9kzf|r?TTN7m8aWH10
z>7Tku3`v^aIU_PJ&aa3X!mKMN-LBU|yev6QXBm3RpIjT|lsqeq&;|$pnRUjfe6`vw
z{|$0)&!_2=m;PuScOsotlRP*@RW1&)jMk<0{q0tJN&TgkpKdeSFOBG3^jy69>Q$p`
zS+Ry(7OmA*mvr~*3s04h{g4aUIClEqK6CHhN|~RQ<@r{-_#A11O6dEubDZt8Flrd@
zF;Q1hDdQRv_C-A%hBB*3OgYin&Y(C|8wH+*u-c3#$$QQ_>ry9Kdl!Xc#j)@k%tw81
z-;=|OrJe7#QbvF75T-eI93u?c0T(Rgj=?mY&TuN*thI(jTFF&ljG2K<;$}}94{);Y
zVIbho(u=#5>gp~k8jiK;EJ64qc@s~cwUi9Mpl%qMM{!t`iiW+_A9HNdWP9E8?EMmE
zP%AU4=MsJ2xg>|vau`BoJB<DZ$XlWS03==1#>4J`9R<&tgXU%!@yyp2FF5m?KKK=H
z$Kp=#;1KTOh*yvHo)Q*rH1LR>a-sHhN%p2$Jer@F{<&(esqRAW*D2O<lLU3`AzRc-
zg_3$x8^T1F>bA<4i1+6M6YcoZTz!$5DPSbSui{pn5+W=3-&|y;1|N3LP4pV?>y3jE
z70L1hyfbXoJGGExt4|lYhU$;HP3^xFGOqZfFLdlWKbKD03#-e!L}NgQVIF9hPOQ;D
zQiHdgex%n0!Ba)tRLoT|=THs!(wJ7ZXVf)_9|WNx??92SNDfM22$Mx#o)sOd_PM(a
zb&Z$C*gJLMiLd7Okx0oM?_3j^Cp5PE%xSY)mX>w+dNT}G@W$(wIfzK|gN@nDda3~f
zFTC(FVVVktA_K4yG|eQ{8s;BuK;Yy<Vzko|UWP@4qT^NBaA5?<hEJZj4D^o4yX(?7
z|MabkVjvaka%#2VEAs6@CPvhVaDtovwaP-pS6ao3@32Pts^0&m@j_X}Us=&#MBbkO
z@n2M4C_DbC`~F>-7s_gXz*UZ4xR&D=NQFv4Kfe4INc#7MM=myQAXImRK*b$yZV-SA
zDy;CZ{l3r!{wsO^Y2)8u1^*x?oIFsH3MH)|_Maq$i;W$~&Gq}bBlxe}{il!qh8a+D
z!pQ|yfq(D;$S-{hsvSUi^zZ9z|7OF_0s0#@fPM&l+)(lNhv@pl7f|2)5Mv=+oIp0N
zpM>#02;rvoCQd*HcV|m`JD|Oz1*?sfF{`b;sf&#n3v|1|+{yyFci?2^2()yzwfVD7
z4t>)<;#Uly^D~~Ilt@!c8vykN7dw=_vvC3JTx@LqCB*(en*Ct-U`}oTCsg$S^ML=Y
zE$5HDgnv5ZFI|EmQ1u470|DWIwuLhFf9v-@lfC~dm-~b6{h1H`55r@XfGQIlzZl_f
zu|3W|3)%myL*V-TI)oWbt>{HgY-k+<8#aEdzog8bdKE(q(g)F%2t9yMJWQS$ulf5P
zrjIZ0j-K^Uxy&!rx-%1Mo)C{-k6>7}n;#F%rBwkUrd+h~VTVp#v?_djb(h;BPhBYR
zt(tw7$UY7%KR+sIwYf_G9i<k^JDy#i@Q(-Y!+oaCJ?mLIK7G#}=4dy{N3qq9RWpCv
zygnRHmf=RE8`i3=Yx$hR%w<6fJRvKYQhzdrJhRw#3gA_`te&d1ZS-c{t}3|qUYuBC
z;9S}_oubT5;+_ANHB$JYLGG<vKitLwXPe^dZN}n=q}v__f~MC2CiYCXH3^s=FuI#g
z6!=oc!+9#GEnp%-v6}Z?Le$IBEgcI*sx$^&Y{1r<J*?V71--e9lyvL%@qm*kfr!bB
zJ=5i!PWx{PhXKNE%#xzYN3dSDMML%FU7{)G7gAU33k&!4REUa1GPP|O9<$o&w9h-5
zl^<Bqqr4C#2IKt(mbyRpu{g$M4T4+lC--&(YzuPU?YrXZ3A$!xMWeGK$UX3W2K#;5
z6(=^;vK&v@n!v3dIY#t$uE56SM$o+W3(7EOAnfQr&K!<F*19aVO*`u|vr7%ide&ox
zdA##TyiBiexHEm|i5wpSch-RmB%^XGWs8qyCpWLh^hNptPQbK0?=&u&SYFV~t0QM@
zxo5>zbS(ad9Mx-hi6hRcnDqQ&c-I3|0E%IbtkCeR`#}6>G5C<NTyb_e?fH;T^!?qe
z1@cjRt~m__$czPLNec8r{3DpPETYm1F(_7;7RY*%IEVPKe6z(k{NDh=0}5rY=@GH`
zj9uDKKe>D>JrmlYRYu<3f5?)km}9@`#m&!@&#7^&k9neMs|!A&73<GC!TA(n9ml?g
zverf92#qz|R?nP(t#n$Zv~1lv;q%*}mtq3Zm_)fz@S}SAzOCE%qSVa?64Nrw-E0gI
z)01B9EG%h=B-8o}-w)J-#(LPpz>mHldx!>lsvk@UAk7jb(QRymFe(IkX`NJU^^MCv
zjxKh5xg0b+yIWFR6pQ{E2xB{5kvj};v)>M1+VYv#X33dYRSb{zJ^)!N2?*~LV6?|+
zd#l#cZ_=c_4Z|e;olG(8{(d<i6TtoDOz~Y1XcV4;;EZiF!ZHKVC~Y`L5~<II@%|I-
zeWD)D`MVh^&I-}bHq3&}4T*i{Nhi#SSW_bOskSBrAc|o7<yT3jxU{|=M`0kM#3qH_
zLp!#oCgse`XDjQ%h8~_{StZeh(TR7$ZnB{@>qV^X+jdLn{5+Cbs3KtzxejvUyYcEs
zc=T@a`10W*&k-=tC124D*N5nYw>GX5$~B22tRha{wut9Hls&$3)u?|cqzv6bnVqH*
zRE0;ZB{Jc8#o_ifIV&XhocyV$R=B*9U_Dn3%ahxp%rh9WCEJ59xx51ohL8~-Cz*ZT
zN`xDH!+`f0cLkoK-QF<R0va+U+WxR-i$cEG8XBdk?cxr4;Kl}ll!Zr*i~FSKlqEyo
zi!hh9riJldGCs2vLoLlq=r@8vMOI+jX}e#MacvlLyt)}|;}W&)k=_tOCBvdy>h5D{
z1>q^#ehi*lyFvLls@2|mHXoM5h8QZPJ+KhAnvJ$$hj0?LF@K96GJy<JH-T2PqD!Q$
zODnE#3l2dx=Jntl7dGG7c;l?Ybc#XVHCuFtzt1E;L<cLaBk!=gzfjzr2Et%P4^|?x
z#dg4`Q{46sIrlNlMFL`Ls?AI}uhw0Nz(;14ZUGOEO?gGi2IRdqFS*g<y4xJGyh6nT
ztcuENKTH(y9<P)+zaWUq%_;*|PA)!vddc5L^v*K{9F;P&vPJJjz!|il#wte;XEBZh
zv$mB#Z5f8_7VB|vX&=^_AHE(5|B_FA{zirjQ5QnGxdyXb9M|7FervcP8=7dQA~P|+
z?jGps>UsP%%T!FWd=7R%oZ~Zu?YThl;<cN9n`SYh&aS2q&3kUh4o3YNoR*wPDG5p?
zH!_~Etf)z#`h33S+GiUy?awQ7>HI#HbzTy~K^zI{hf<``%!xF7nuHpMoqWS03J1v5
zk4}jxC>Eq!S;U;M?odfuGz<vW(CDZmDqbF}Fl2j*iz>{kpB87H?;zvM&6z7zHYf~v
zSBWp{&km=C1j~;ePYPNsnuymZI=p8aY?5g-5PD6R!6{hpR8W~*3<^1WrBP?h<(ntW
zTFFFL58S7ls#3S7)L425;ap;v<VGcCP@63&m>C$eY165?xFtZ(p*F}g8iv!y-}fLW
zXnL$s5;{W(UjXtrbmjg`6+d5{xlfvPUh^8hUkleg7P3RhXP`9sZ#bCq-*=k-f`fl9
zo&SP^q1x}?z`=iPtV5UEe*q9b`~L5k<!5*Q#2tSK#Xt5Be?jSgk%#{P2CRQ*$-jpY
z{=BjJPmI6=-7@{nTE#xi{m4ZL?C*yi-S^l}MN6l2N;vT+Czav+6Cf<KBcXM*0<04y
zab85<uhKugOEmqIyd|1Vo^}O-JcHb(4tzB5Jl?JF9&_qp(4yDc@hH;Vo!Q%6-aBki
zEDcVj`{3cqme8^hcqXD@JB68QyWu|CQqCxUJ>F8GQ+M6y3D4Zc3!j9oRqxMc++eg7
z(#`d?<^4kEk}Fc(iOKGnA7qS#RKIR9-G}>B-|6k;XADsK`fWBD?H7?ICbgV-OuyHK
z0=8abpn=Rxhk0kjbj5TJ0jCPWo9Sxh<UPq7cUOuxa&kIl!-5}>FKC9v9CcPZL$kv)
z<F7i{FN6j8-dEH;weKv}p6M^Y;6xMQ!t5+-KQe!5Be_s6>`CF0Jx<7G?VG>4&VWo>
z^@`nn((FS}t1_pfX%YvPb2``KMpe7x{(f;MJqd2e)wPn^B--=^Jf=e1b``nX#{e$-
zy$*g>H*Ocl@&3fhFs3}m*L^F&MHN_qdz^~^a~$rh7rs*Q`rEVvF=;xAdKc_W_#exZ
z7<<%Nw`4Oor=&BiDtEoBcS&a75jdEtD10oi!;Lw$PnFkFLn@LX?BAP_Cb;wyg^-4C
zyoFm1>qw6?bFbG1XMMV9)<}N-e2Z&dA^s%aLH4q%^CnnI0=XN3+_Dw9r{Mi)G<(8h
zlV_(ERSdXzM?u<6N1=~|8PCXINs#QmYWazFB2bGOT$YLe_nS#9TX#8L2^5z+Q>-%G
z=6LZ@f@Liz3){bD6~V!qxr#S2&`w7D1piw%!Amz-Fm{})b!U8!^G=L|BT8vt68k)4
zo!lVXWxg(RviCI%wke-sgDS!ED{AzhazC62oXIupO-<*$+&AdLn{=ZF&k?V1g(!qN
zQW;=9%n^|5Y>S+%<Y1-DJ^`-r7HK~9o*<kmtTp*#f6WX^@K<5Q;laXrB!(Tz^C}8!
zUkNYC&x)ygBLePSn$d|}butd8mYk98ru?iyq>`-utG8J6S}S4mW{naib<IE?McVgY
z$)3t@UCWC5%}mX7diNzi4DdX8{xO}6MCECet$sJNjb$)=;t*8_RTuMjorxH_7d#A1
zogS-U^=|#Bn<Sw$`g4o4j_<!w(>_U4*5`KA90l%x^@amsd%m@rrmcm23r47XX1&uU
zly5YtS7mF)HJ{b6K<ty>J$Wad2va(Txb41Hn>Xw}71qCM=U<jH4zJs%>X3pM_Z(4a
zP5d3_{9MmVB`M&Yqn=2X*=PTbo)|8Wxld7Q96^X$k()im5f_oomf5?hOl<^51v0a<
zV<E}&LG`=52+Y70cF{qz8~Re}#=CZ_cYv(S98eKs#ECVCBbM;hI9FoxOG<|!0Oi^X
zpsg}EJ+!nd#=dKd2dh{;J^U-aN|%yxyYKW)vvPYo&%Gr;{%InmS=lWbo(8GPZQ$0E
zhTOtaefXSah}^zPoksjt2?Ax8i&=);v7e~}E+yhFKHahLnDcv+`ZOnB#Y93XST!;E
z)neWElo(-#J%*1sJNZTD5^CLYVR9Z<lc{&vX&0%kigL|QRX5@33fPTXD^_(zo_X*3
z=qs8w#dPm5=P^6HY0e~ySQ5K7JwN)$DP7M~?Y=_2+(}N9U&1c+IR2im;yau9@d=B`
zD%~*|`p5H%X`0@rVK*NbHa+bk9@itInK?%aDNdH`D!JU|Q)ZL-&H6|y3v~)7@9|KT
z^;$kBagDBSf!}M{!^miaUr%LWc%;C1UMAHw_qE~>xV4(wSh*>4RAsn_)(7q|CrW4g
z>Qul0M90LH)}|NhT!KsM<*c1gom68(<U6RhMXoABPI~I!lW@mgL-jQ6VF|0)I3Q;d
zI}1BdbjrIU++;vGdQ9^DV?c9@|C4wmk@cmHw$+{L#M?N}BEl8Bh?au6Z`2}BZFu`3
zhIR{%vy}NM$**5>SKJEY9?jc?i$4_9wNp{^kEh)}@4A1#=891l&De)gI*Etpq|&PZ
z(Jf06p&aFkYP+Ys_5cLU9o_DIB~2z(gcCT}9*t#vi2sK3UG$FGcuw<X=>zBXE!?B4
zhGSo{fs<LRZp+qy@RWF_7-S(IESE=c7%KS1?MEpJg{3!#?)(<bUNQ;DpBCT@33osa
zv|IC?!Es+b9d7L4d4+K?`NK%}f~g!16~@6|)=gKv0}Rx<ubl!fD)@>Xn(-u%MV^Lt
zoU+h3id!25*W4LtzaI2@eqvlOotV#xQOWs&iTcJF20wiv!$bT9ijsASgSo&5nBd$m
zH0sQ%@6;9eUBQYxE`7^5QK4_<sK&dzzLtH0*SwiT(WT33$GdPlHS|z5<t3(b(laKN
zG2W%~&7gAH#o_VP-(;d}COdq{D*4QMJ0$-uAMdvD1k%eq@Q%}k`Bg(ln3-SlbUUd5
z{$AD#moxQlHm^f(aQhls$t*0Iqiu`Pt?~eswe(FDV`zDiCA+?uz4YDQ-15g=g*P;k
zcy0{t?a`~`-?hBIeJ+bkZ1AZ$U1G!ArziF-&otY&CV)+_(k(yK@QsSg-?9+Y5n&$&
zNE0e2bkiWOSyx$#ZX|;gQpnPc*B&sw+}5bw8=Q}}8~!`Y`7`wRk2Rgp60x74=8vLI
z&YzplXso{{_x%ax{250;brPtg@<ZwSZ_>=q?*4y(IZ$=z4;1##jc;yf$>(pvoJ6hL
z=*@bZ2k-A_-(zxC3`fGKFRTLyt$?sVNi_Bb@j(4Yg*Fo}E=@m7Y+hoVM;~EW6o%$`
z;ztI5BaiEOXWyQ^{lrhONvd0p8Xl+6)QNlb{^I1Ygi^KfgBW4S$mRs52eZCCrNWL6
zkwjO0o4VdZ#>au?Cf~DfG{ceuBrgFu%v)Bz48s%ZA<yNutXUE!`99YBl5XX)QTOAt
z&=M*&fBGQSo2@TB<wz~p1q)76-`;&GjJj`0&nhd1Psg`&5rGj)+kUI3e^~++i8TXZ
zdDQUD$cI!(n$~_Qk}RZ6F9fPCx?UO8y2NFZ+U;*T;>8B(l~R}z<35cw^%-oke}>y`
zQN*LBbLMGZ3}1mE(`0Ye?Adf6J0)N!TF>$|{-7|<?yML<;;q&hBf9)H6<Ei=tMOEo
zgLUbZ`_x3M#bSD4tm}HOFQyrR-Al6yR6Z@w4_Jq84pXsVxUJTV)eMfvqEidOu(HqI
z%_>fe<m7DIzW}<&yYj8gPg^iErisgOB&2+B)=k*GEMqL(x@=?HVw*Gstw?;o(9?T-
zFcm>36Kmb~UQrKh>LPdPKB!YuqxPbB_7&m5+HzY|y@Zf!egmt9tT0m5xN?Q|)--XB
zjWdnF*{4_Oj1-bhq_WD$6S6IYKv^k1H!b7vVPu`OjmoYx*5$;7h4QKyJR3EOFPC(>
zo3SB$a*vd|J`fP26{m{F+R6tDX4(u~jUHIykM*k#ZH8(wtUVDtMAPntpUxUhmPRP}
z9)v_UpW#|ik&J6hRimKS?M$YhN9B$}F{K__&8ms)9(>b0xm?IjXvNo(QuGGfWIv+4
zznYE)X|bD6@kEf{a@+_;Rr>Z)zgw{w(_sp%9q%d>{(N2Fm5O3Oxr|+^dgC%;R9kID
zN9<XVTcajJQ)IBckq>%6rHHRGXoabf)iqnV3BE=9Fm>X3+_c&+4Y7d7rb~WuNzBLO
zRS&LVXq9~ZDlbP%d;MW*;Of$fUzsvaR(2jsS2s%;tZkT|s<5VWdXM{nC-*b~@Y^(e
zt7a?0E^5J&j_W8(rS=Con&PJ7K(<!j1zu(uw74kW(QIQBtj`UjMzZ>KE0|8NCcc~)
z?m%)1`O^v|NaNSnjBKnC(R+0?DKLXHRvKI1)fF_eqCdr}746zjvtDs@%^tl&O?q`2
z$pZ(c8dUvs;T4C^W>L7|j0sPaVM|bd@Mx8%c|TjB>pBeGv#8J{|F}l81^AW9<=aGt
zI3d`99-pIj-`03pp9J~fSz`Ar?4UPrh;a9M+-q2z_U~}>WTbm>glq5H4{YXISXP+<
zW=J4}Dp)$F&Bbibf=$p@?<k-2{Tx=1HiKfnD>{cbmvz7mL<^M=`L0V^KGT<VGkq8?
z8!p<#fcYg5mqg5J?}0{Ua|YN8o3&Lav1N}OO5QBMYR~z>(^)e-?_=YB#Jv7m<Xr0#
zwL>=`#wU`=6QMjGIb_F-|DD|{x1(B0MKNaa(&_Jc$T}}85l3OfoDXfk3a)+d{xsq(
z5sjhr!TD9VrYj8MDr37yVmfkU)blTwlhbQXWt7HDH?Q=aBXOn^GjQ&mj&VXCzhkOb
z<r8`t?m6X6Wgd8hV=jtCqZrY?xa~7z4hLsPbb%4FL6uJg$*O8JZpqK4#P+a$<O|Gd
z9UDuYeBLEIz%^mRAsj1AJspGE$}yK1MuGCUSWB!RxMw0H++}0qA_A2W8z3l529C{N
zc{7U(K*eW(wQfRJ84Q|cMqKC<7AhS~453R~f$7e%;Z7$z?i<YNwuzg_vSB`?HjFEB
zj-Tca^W*uB;2oI1uFjSdBE>6AVIdkNFn+S<0N(W~LY~Lm(t8&n*p*%I$xx=6VTZkR
zYZr-!CA2J!WTZkcu<fDui7MqyO%X3==Htk;kbENdP$eBvw>JGWE9(!BazTlMWSUJ`
zDK(~YMikFjk-U4TV~o5>_b$@D3%U5xecJMx7u0zt{Ti}KkN)+>gYvl!%oW3HV)V4(
z_bOn+{sV-0DW|;tcpA>eo=Ji9=F?`}qp|CFer^<bz<8it$tVS2JA{^_bGoN49-YpK
zC}6MaB_hMuru8(}+$UTm`SzpT&k&GQpI%LMElavOF!N_(MAj1I<|Ow!>!-D7(!Y@v
zi++-WD|88abLIX5>)A#X(?bb3RxCb&IlTX@iGKs>gKQ^(DXqoL#$w#UYEl%EcEvku
zs{zB4QNW^?d#-Rs-4_<UbQV-rc&r|H55G4ab|+So4DQ?K*=T@ccd@6|!$+_7rIZjD
zgEjC1!64&=rHPch4?MP=+IlD5fVl$=-tq>P!c-U7>O+^#FZxs9Pr@)=h%<biM<sy;
zPr49<t}<+A`H7hlZ<E##CFvlwWR2?5g|PG=WPH(cU>>$Ve(~>-%#2Kzb1mlS#b$jM
zgvL2y$tFjX^Qn*Rxe(EN%o{dwFSyfEURJwY9LsPVBP|k?%_3aXEe~l^FqI!c9Mxsw
z0B`5`k<2EHjXQ&wlGxX;PY6XwO&teLD-ouQ76Gs<`c9t*C~Dy>`=z5~CljU*SJ2nc
zmnh3ln>?~^s(>=qFOfr&?i1U2yvaY*6SW&*?<Jr=W)6GWa})B2Q;+=;6=0VDE&f6>
zm8z+eUObicU~QqoX+^1iJ8qIJe)aJx@90Z&|6x_jk?w3>fK73)jUsh)@F$;dJ(h1(
z0V)%Wl<t!peR7`*oXpRj;|j34Vlv({JlTHxMy^(*Sr4`3=?8La#LPz}D@5Z<^ey&x
zo7mWirjL9Dc<wGN9+g25%TJl{`P6KD>hngZObz(q8bgH%Q-Xz3V?$J?(BCYndbTPA
z9<0T%KIx6|X06CL9-;Bwpi1gXlXd60+(y3UI1#r06vQ+5rFCxPI>b}G>B6(x6U+E|
z`s`hY6wlcL>h-sf3VIS`+Rnpjo-bM`O0kkAlph~jYY~KqVi7*@`Mf^7PDal0^$X&`
zMg!o5*|g7KhhM6Bwol7l(am9-=xiFMeMKVs-Xg;4DCtW!c<O*3a2f%F60e06uXo!i
z)#QT|ViM}#@R)4@Rj(Kc1Zv^A;gZ3WtGk;y#btXZBe09Rkt&0-`z(Cw8gGB8_&Lf}
zX%LHL)gm*Vzr4UemHoGBO$u9jju83tL&@GPv>pDS0Asp-G38u;F80=D0yLy-m#`j5
zAJ>QLw)qRWroddlvhmJtd)U@n7@&&t8dxgrjf_+`HL8?K(r1!Z-$+)9lt7pyT?vd;
z-@8+lXZEntmW5(y>vkS`-MAfLhE?ymxXAYpMmY?Orrw-1F(7%Z#tM9{A^_BF^UG!^
z#5U5HO@5T|SZnQ~8yrFezQA$}=1Jh$UIYrj>d|Qf2O7Kz=~|E2PzPvH`@hwH!6Re$
zG)CliQPVGqwQ}I*7aN*%FKL%H;)&hFaiA!WpCA1?iO=+0q_<&4GLA5$T-2|hpd)p{
zZBUNp$n~p8$t#K=>XGT`(NKp5c4wF%Rqy(2U+O78lb{9UjrBTV1p_j}rmr<&Ex0eM
zHwa0`>Cr6~^85(JGI~YAO`av}h_$`X?C3}C0!WBF>eo{?8=E>E%Bww(B<|=&6nKHQ
z!|Dp#aoXf_Gr{;uB7j0g<B*G`ehu)Dg_1&Cw&T&cf&A+1Wx6;uY8y$@`Th<pVX3l(
z-${%R`KW8hYMT$9O+<^~+2&(fG_7X$ehn?E8XKqfo&n9L$*vWL4+4YZFBzPfiLmN~
z&Y=HdS3AGMe0|3>_-~wo>(6_d|B+MtgxdciopSv=kM+Bpg7tq+igEp@R{hs%fuCRf
zlP~<vAuVWuKRY}0kQTI{n;Tk^@bi!sCpVCr?Y9ctZH$b~Y*_xWHLd+?ZyM<2V(jE>
z<?P~YWpDSFvUBJWus>)9lm-3t)_-R~Tz{^C_)9Y|H?%w-s`>udWafgt{$Dqlx&EwW
z|F!Lp3JD$_=#dlXv9$lt^v~A*Nn`$FANtR7^MBNs|Ftq11TDt+2k-k+mGW=t(9mN0
zKRow81nu{A=oxLT=*@l{pG<@721)-uGXp+jnl5u&_?K(w&m{fkeX?k+dzr;Q@`0ZT
z`Lvca7faO-^1P~%#2$uk(HS|cDBl0p%xz52ovDc$ix56tq8TJyxH0qKb+)^hs$sy?
zBpt}ET(bB+-QDw>Rof6(cj-|?@v=?co7pRI*BtebXgBW<sjoGOF4iG?t_5kN-xnln
zMn4J^e7SgeHgv#MDFwEpPI#pJ?pQtfi6G&T`z%6)G#s6BhO<Ee9ZTb8@Os?=UB#m&
zQAP4Ep`1qnBkI9lZdm!^)EI{0J2LB{YL^2M3hvVCrpHM2o0%7Yf{HDLpTkc_5I#j{
zA|5|w7mFb^$=*47<~K8nILSM{fSTZ<r}C8(tb3*R23IV_h4{k-m7twi<LFIw59~T4
zLGwU!cKC9UrPZKLw9nF<1$Uo|Oq+>Hrqy00W(i!#LfZ`do*dcaBDWF-US4WgcUe92
zxav2IXsHjT%bR5Lm63+;M@G?~M#TYMYte_64mF3$v}ZLFGbdYkhX4+m6BnuW3xm;*
ziGe0MrabctpJJWGwu(rQpIG;otJTm4UZ8oli@OXMYrXc{!LS;&mIz}A^B(CWoo!Dr
z7gaIWn_-7a&N#EIq(P4Ih&}DXon$J3eyivfb{-pM;g2>3D`$%-rrD;LI-|tEPt~6)
z&KFg_5oXFf8}5h6(;DM${EShA66CyizbU8pII=~Yo|x0-%v)ocpiKYh0aZ^sv+XfQ
ze7r~^T&~oH6}~J+I=kGQcU`ENGm5qU*$1*?eQs=9YVdZ#7&%#~wIz>AeYy%X*H>(!
z5F_{Eji2Dim+kFLO5-5Ms*Y)wqjq7ZO;Mg^_9&_&O24)!4bl{x<`?F^vVG1{T`dz1
z`^vz18y@p)u@nb2<9(j#Yq8OO!kn^4Ib9eT2*p8XAhHYDLF>T&#P!V>h?h2FhFBjh
zTAV687_Sq4?wQ$4c2E>(RSiMvbLTyIii6z%ngdUy{}j{ZqU&e$>mHGhl7Vj3Z#6rU
z_5k5C_=_!f>o~8ylx?&{^0E0E><I4&nCVEE^A0=18fJ(g1)ZwceP0sO0Jve9Xq(rA
zEWr*V*-8>2qBL*xp2>;!5W~nX(LN7qcy%ZLswpA(;+d=_$GxP;BJJWN>p*jbTU;Go
z>N0}{$?nx#+bFm&#E#lAx2Lw|X`xupv<7YGOe>#6kb6|iD=L250-)MW@3LTOu{~`*
zwY!~adtHfju>1Cy>iUz<$dr^Pah$Lx3nnjr*JmEjEKnpl(&=T~Q6xq^P(}j2oBTE|
z|0PlwJ;k%Sr*v@fkJ{Z}!VsGu;^3{qnzJEH$JK}}0jjy7eZWua<2Hu7EXcF8n6;;`
z^Yc*)nhz*IohY~+HJ&9n!@g7?WI7CbSxZ?+aiu$2hM~CA+alCj_t*<jXPnr@G{+7$
z3X?tokh+c8IZVIyv;$y}C^0VDaC#_erZ+478&7`<l2KtnRHCJ_Wv9VwqN1|&+y#p&
zG<HiZo8q^9)+{{1Ur;0Eko#d+qWkjS=n9z_Rx#ch7ANBci7i#(9WvgK4s77uM2zzc
zh?EL1@X#?ADQGH>U~q013Zb$n3FqA7`;J)qX8=mfEX<`#aGxi{`AhYwNRxrF+#IPf
zEyVcVb}Vp}dK9(E8mSe+urcjS*ucF>vE<0s<~4mJ(-@{hcgC=jq(jAp49@1m<SC}*
zIhUm0aEt8x_PkZ;rd8<_Bx}$te;&aj$Q-AoXFZ!oPEzK4A`5>1xSVjRA$(fG$!ln>
zm%2#J3V(2ZsIipn#wY`eP#MIQV;aVxUikJX;&2q+uA$~_6J!BJwrL}R?G!@;Kjzki
zCZi79_9$*hRmr+f73b2V7=`6I0dhu|CAk7QLJCd9h7jYKa2WTlU7ey3-^j)G4vkw*
z5Te+yt}GvkcGqcqw(&7HQRH{{n}i@?(i_5aQ4_Um3Phtva6x^%DewC2Na-g_LCjk6
zu%Z{S1?F-ZrgSck47F(5lZXmnV2)}Z%jOH0Hxt{I0FP5gjNBTOl-U{}J~rt#AA`^(
z?$~%O<T+VF<E{yNAx!9R#=Ql4jYVco)=y34E3>_Nduf;EYlo&x5hBGiOlA4aeCnMs
zi2B4VGapm&n%Q2oyzca~8E|zUCEkB6w~^G#*|)yd?~27Nx@}q<v#}A=FvTEQueuQ~
zYnV&vocUA>letTUH?;$M>P4J?No6NB?h>ulI_r);F1#n%QS?DOBk*#PPY27Pzn|`{
zAW$*~&oHrl_2GDDXs)4xs8WjGDTH-LM!u>oMHIQ=%_foPoW?l4$1>^TxATI5tjhtG
zccN_lDa5Ia7rK5yOz`5llCKdfWma{qH5N!ECZ>&e^+M;<sjs<Raf^Ip2aB*B3^XPZ
zixvZPS|94|NbFuqCB{xABq^>fhvDiGBR|2-01|`Dsu5|BVx;awqQbGAT!YkfJmA`M
zvY&+X=WnlG739>4C)4V7>{K}&_>`6O(FVO-Z6!|D?KZR5CtTZ(vxpj2K|FmjsQJ+%
z$rZ0<-FSeum&LSXd`LwoZB*+jfbU|tp@F!z{rkSg>gys2I3xTA93OkS21aTSWF?h2
z8IUDDw-t1>g-RO=zYR!Jo2v0cXdPQ>x!P=*V)8ZnfOs^2tlRFSFw1K(^6UW<JlEIt
zA(Wu3p)on1v{f47HHEB(HC9Z9YdMHYNG!U7@d$G{ir(U!o4_vs*JcobkTld|^}U*@
zt`d2ZWc<NERe8CLCq0byxB8Bjq3buM-Jr-vi{UQu-;IhC;kguFU$77m3ybG=*eXjg
z66ddaxp>knJttc8K^oKUGYrZKyT9I<OU?8_)d)>>xEPTmE@anQ?opWsufNkNtv}s4
z7>VvNRsw9GguRh^VqkxMw?9HwYg&*~p;X?bTQFa*s=gY)@gy2v2Pf<7H9n*6Ey0zy
zDgIsq@x~;MHaOIr=7a+qk;oL~wN3)_jTmVJ5;0hh4u&SB8wR3AMLbXfJEryoS*$zw
zVoWNw1zfVCRD%#`pI!ymM57Q$QGZ0ro<3BhH`w(OLt<@r2Fb?Z6_^%Tf*c-aSLKE2
zmq>&7j)N4R)eAYF#^v6?TV@>8=8y8hmMvEb9|3}G%dCUP`NP_cxRXjz8;ZWbgaJ1(
z>p>-PeBwL<k%7~zPuI#jXML?L54Z&I9t3pXnVw!6RTi+z2Co%N2A(dU*Vk53%JVF|
z>FTJ<e8bp%rl|L6XrFf8^Mub$nuXPTY`1mj(e=bE*~8fU+1a>Y41Y&IY2l@0@?)Xu
zk_4u;1(GTI+$q**%?n6~U|cs-!Ag?*(>)Fz7&{w1ZbuFuuk>#z-Emv(U$L6@v9n1X
zOk0|d+?GjQTX$c2eBD3G8yM!NCs{FbGbIge9ZH3q;eTGuBtIZS^AauV*2=0GE#6VJ
zj=UK_j(+n^jd}H0FnGqiWmfrZl?X(SjZnC>7tg%K;WA$I`;>2wo1jQQ(S?MO;qc6i
zJ%htK&oNK4t!~WAQiX#0pd02q10W%zSqD*j?eTdWR)Vp*2lxp}p8hw7(EiX$21$wv
zfsa7Rs60B0IRHB(`N{V>R?<5L0YSURpM24%BfXRsOlE+Ty92!wMZzMg;oalb9^4}2
z-?$Ls*!z^X#|H*TmdYD5xbI+Uls<H1D#cTM3Uon0UGq%3+hfq8Z8tf7H$FlF3TH#!
z@Fgf5Q4fMSI&y3B<6N5NEtcs;oqWaqwWYlZC-{KK$L*Qwm}0~#jIY3Zu7Bfg&^qZq
z@b!P>Z9ftCzwkEbxxoKb$qV<NPZ>ff$<Mz3I}7^R-9NeAf2AY;w<_8{bCCZjqy0S!
z@*m1&As}d7?{DrAjr^b>jW|9>2J6X&2ZqDzRC}FBsJNzt^WwO&;B=U-L%5RqEh{6N
zA*Lb7@^=BX(i|}@=V*Gao32(db%K(1-g#@Pbp;l1+h79zq-o^H!=dYyYek2WwNe;0
zN91jpj<bPx$KA}{!0nN~ugU4lr+8Y*$LB9HM*FRpBA7S7sI+}ay&M;$e4$ND5vv{h
zDM;fXg==#2ZsAF{xp|ekH2!R}Sx1F$j56ikjJ8e_^B0RL3QoP{GeD+Px=&t`L*v_c
zMk50z)%>M)wNJ@3iBtrH^COrl*MYTh$+VleG*PGIf(ZrYH4l}_j#`3at}?ab;X8G<
zT&w8iv_oyqCG|)9*4_+PpzbKi8ZTPebzH-zRn)BKV%AsD45=c|IE%L5y0FBoJdGWS
zW-?25nVWlz>FZ&`8NFol>X|kFG1vA<<BPqB2J7p&%+r^72h~gIO^bH-UQRiL2C<rD
zc=5oF<@UR_@5ND)0wZF|TMc0^4(37vzEBQPa4Ez>*1mf?aVvdSnG#ek$;~ub=CHl6
zuHyus<>sp>tC+f&13%_d$3|H3y}GD(Pl>m1jAa^%Z0%Gbbc@I=i@wM%=0_Y!dLr)<
zZ7l1Ezj<eqh`89EwUy<fA4rxY)MBrg#vCNdvHDodTI^A2Fm+j`N7fm>(!CgQd{C&L
zcLsD**P=N3;x3F94-9G;w@=_Iv7I{{V4-;{RE$-YUyKEV?H!o4p&8s2x=Zv~$xrIz
zloMHr*{bVEk2HJ@MUKcJ%{YSNjEFg&U$aM&ngilx&9{Wr5QTgg*$gA;{B@5ho?K#+
z<k1CP^n)d03%2>kN+g~=n1-QtFgnf}-v~Ber@j(8>DJiZ*>#$lX)P2p>0$|Es@=56
zfGtZuQkhBB`;_=%AT@0RmYFx3p8(b2bD<&y(`#ivHVy+}T<+>t>;<=?#|u6{^~OOj
zO{I+6R|c0ONGQJf-l_L5HIDX06_As%5{pdWo?#WVGjZ{kk@LL6=CtwCj)KR!tAoIj
zcX}4ji$g2KIluyT=xJA@CgnA~+&2AGNP!w*^OFYHQ3~y6fdtRKL^LDh*(C>>G*-$I
zVj#Nq8%*WB_B>-NR59R;mm^cKA6CBa%RdpKZB56o<T`=Tizin?Jq?&4mkf%5k9ojz
zktjC<>B7C^c-ZKh<d;0E6>(8Al&>ci!n}j-mqxs$Ce}-lg=bvAQxN0IYO72*zhZ`W
z{M`TeHP)E(885tP%C!*kbM3Jh>5{z6Q)VMi?5iNgq9ECsFfb0!3&yx%RwFuyGRyaR
z<-*-H@#|XKMLlwka6$A~YnMocn10sC8`i{b{@m6L(R=b$<W5~}Ja{M8`ZvLSmA%G}
z<2$*Jc93`Mfw`mYo#A_vthQf>s)Fw3s#YrFBd*agZ;OjMh+(KUz{)W0@!u`<B&ft6
zz5L3DRu`g&RIh7^(y{IDi(J~Hzg9N_NC$!YgeUbZgLyg=S>*uu(|l@*pTnIojyacS
zj_^+KYppWk6&zTaftCmk<!8fPO=nWvM3pG~oMw7krXL%<HgjR)5t8RU)@s-c*J$eR
z_M=Tn;pB`?g=o0$ADsd8KeQy@b?sEGIHlsoq08$i&Kyb;ySxYM*Oh%YJI<Nv-4!ju
z0y;1!VRco`@!BZ6Ju-mKPZI%z$H`b-Thn#x!}a`>Zm}(hy0Xnl3jjuNmirZ7MkTLM
z;G-IS)R1`c4~437vP$&Av7GM|w1lWo?2QI-;9*q}wGrbNn`xGVixj@OUwhq<Hlqf8
zEMGl!jH4(O`I2rqK7=Z9wGDaA^U)~l)q+;{1n{(r%Fx_%WdaPR9FJH@`7}TYW=>#m
zU_5>E5*YAFnt^BF!8G9hQ%o4wKy`65L0YSMZ%7Q#-;V`@N?ZGQKI4W7m1oOcilUKA
zjD(2=QRyQx|2Rsu#8>5|m|ona?(U3+M0n-FbFW9w-B}JF@6^rj8e(YE-5cto6Nwa+
zO=yJ4mgm-!gwfyXTI--N#<SJAfaL*ba*$_A>fJmXa;_Y`MhLFN560rife=(2UUtgs
z$l?`VIsPLLQWVJ95+c~?@gTtb!z3#VQHAmw7(9ihGKLVt)4VL|$1_J_>~Bc8oO5s@
zS;Pde@04$NR+lUm0K=B5b(|!M<poviGUw4GhsktHrgaW3-=Cv$`4tW(q;r=wz9WI>
zI<{v*?&nVmK9^0^{7M@-FXj04JZ`un-#oNBfACzw*Lx(cs<!Yf_1$-ikI7B=!D$}*
z4e3MlnK5LS?rVU@szSa&Xm3s)u|JY_|J=iXmz1Qpn(%B-6~zPIn|>v2&!71EqVg?L
z6TT&0F&7ySG3R|qR;K+E5*Xw>IhuzhP)1tu&XU$M1sEz|lbcDR3G(y8L8B^NbY!87
z=*oxbGmnm^b|NvZjNWy$zOf{qVaEn!xz)b*uA#8GWJSC;i+{<2Z}Fyw-^d;}jGDfj
z+CPnrlEF85k}LE|HyNhl#i=PZj_mEbxh6)1n*YPuIR|IDrdvO@ZCf4Nwr$(C(Xs7x
zoQ{o-)v?WvZR^YIIdi7x?5S_|Isc~Wt^0nTs?@uF&$_O)iaZifI$6IX+s9$CO%nJt
zBo0N>yte=vc(2~wRKlbQSbo8y6QodS9j1e#v3q~Kf4@L1cLsXq)p@SwY3&eqe(tq`
zmQUDMa(kscL+xyYISTZ03wQ!fmIs~3+29titT*DfgI~-$CW_9vssJ>V+1FXBUGt{c
z^qYXzk-8EK^EhU`DhR&!I-MjpeaP>myb0xp49s*6%vm*ci`?m!abr>ztFf4r2HCSU
zzr>Af;ILkWiwrv>biY=phdGdI$p0yEe<@WL7TVRA(*}$=buYqV-9Fr@j;E%+$bZ!i
z!=KpL147zvn&D8sM?He<O9Xgm7#{?nNN!#s!kXj<M%6#-iw;OcS=U?r76Ip&V&~}k
z%ZgYjm$ZlA0841bT6<<}51rVW(+AOOlR#4`CJY803@P;)!NZ>-SaKXTA`Ny>1fF7W
zHl3d_{9!!SUP77`Z3o~6v|u{Ge5-UDsTC^HDDBrzLO*N?sFtF;iDAUx_5PvasB{G)
z#}mXubizUD%_T<u%Sg#Z4BVK>M)8XmC$*R>C<lTL#Z827+U=2FzEj%AIda<l^bi3~
z=0++7s6&w$JG@ScqAO_@dh~dK5j`8BbIn_ovF`w+rL)U@W&eUv!9?ltKJDr>R&lhF
zC!r<?RpJ;Uxh8g4-51;9xy>6JOwis`s7}T9etg}JjAD+}5QLM(XP@t#qYb@B>fg3_
z%`RyvKDra|O6@Csepr}>Xb}%@F`0^mScg2hzZ-YA&d+F~HJA#N2!Okdb?AzcjO(;B
z(9r!kd5|r83>#1YL#kvRG8__55Zz{<Jh|_V`uT6KEF9&qaH$maQ~Qw65*-oQ|FKuA
zTg@EkoN=xcmDr<GkmGfce<Gi={sb%2X$Y0<Fv@_~cR*!I|B593K%e)3h_;mvfLukI
zI8_|(E9x(#>!G?X4)_Rg7)*@!vq#5sSF7RfYm*<9M-MgO`6;NXWtbxb=eogRes81r
z{)9i%;|jkE8@?f2XU;f42Lam;*E&=BFen(V%oRZSfM6E~<Rg%u$#=}0V<w?HH&#H~
zuD$gZRPhoLWVg%60ON4~+^(b}m_)v%3S^&9jY?6DrvjUC7*1y>=O86QE8c{aGthkQ
zCH6(@<m`n30)_L^FI4bEqh+@U2#Bi~?bG2w0v|2E#?G6119MU>p1z7(*ZyLMBDusJ
z9ekcR#W2RkUUsp%<Ea)aU-NA{GJ}b8U5JGe{?S0qN`{~J0^SCFGEF%J;*83ftwb8%
zJ6T8uk6XlG4EhP_{o&`FZ^d)NQ3f&6D>gu;GLV`aDQ|`TBHDnX-osXDlltWib<4}>
zhiHZi*Ea)ikg8r;;OR|KXTd{5vdPFKlhTrfKB2^4i<*0*;OxKXfkdfpQ7II|21ZP_
z+3^oYIYBhl9cVIR&^%uK$wHRtS8W0MEyhCV=^Mm^cpOT2*wy8gd#2v`5S<^tz{K#E
z!&2R!#_URs7nfK?(L`1(EZF04aKlM@VnI`ZweQch*L|D4YiE0Ki}*lCiehhcZ-1<g
z<J<K4_@=Gw=6JQ9?OeT=n@8I-fD-wpld!+GvHqj|#i<2UY5yY=NicEQPYEku)rwR=
zZPR0=-usEny-bIs4DIJ-T#kq02Ow4IwDF(u>~|OE-{aXIWB<SBFS7r&jRf{T9GTyu
z{yuZB{)vKr!?VAg{TJ};ca!R$c=kJQ{caciOG?dej?!;IHviU9`b((B@jK1_!!2U@
zbh`c`s-0;{#qO{obv{&I(z+rsd50qf=%X)Nqb=bC(iGQnkN_4m%^MLm?Z)p(!<@!$
zQlU{MCakOnW$)I(;tMN~@*ZEQT-Emn_Q}zFH%K{%)V3c~>0H7&e|PnJ7~spZNN2Hh
z_&5Z)Xy&6E?YuY+bJDBy?fgI!-c{+RNWtFT^ZkYSK=e#ikcQNnTH35KyU6E2S7FPU
zMx|bL=RzMq6zP($U$ksU3sSGKkGFAAKCiiAxSRyOX^DTd5+=5(_pGxjpXpkY;2qv>
zuXEvcXNs;{CO7x&Li$*z8E);7A2VCUbJ0BKTPn)*+LW+dryaic&aK%`U%hFJXoJ+0
zpBLo%RJ?NQvijQIPe=y!lLec3G3&W*SvktN01DWYT2!*fh9a33{y-^~c4r}q=b+fv
zt*Z3G2Nv75bn^_9>!dG#Wx?cPkIiN_sO;6fg%*YC=w3Loxbxld_VfMhT^4Kmv3%V;
zCzI)u+=nF}jjb^>TdUt(!;e)wxqBB*RqX5ZvxB^as4t^~dSa#_bQcIyo_gO*?wxP8
zxY*9NA?ma?y&!*#OFGq%V{{%rw|PjtwWG54I;gRbw^hG&>;g`SzmcYQ+8ue?HWAdE
z%4RlI$c|xiMAS6~R|*Dy7a9ES%C(W=b9Bpj^_9lVBVQNC4t}p}<GV$2>XEr7V_8wE
zP5Y5L0Gx-xkM5>30Vk{Gfumj&CJ%;ioqQ%$^`kS3f~|4rW*N(P<e%L5nLzrzkiq5y
zre{$ZFaE)9BRdXyT$#V*E4xB9C?qL`1(rh0c|m+~z<&Hp*=iZFJZPcd<CCq0u}ft_
zn?jp2)h%T0Yk*65t0MW(v$9Q7W`qm^Z|-k)u@{9g!UuEaP}NxvMZnZ%)@5C5+KP--
zQbwXODuAf6KP?}6e$en&)ljc&7DCa!TzOUx=n4~=i*@EvdCS^?&Sc0o2|rMa-SgO-
zq>!uuO6Ycq!RNjN=>|~X#uT+2CN=$4tkDxJJ}-06*7Hv0n&|Qi+SG&QB&H*zqnBot
zn8>)bq`huAv?s6|qs?(=a~$xZTNH8^^|&o+dhTqim`xOp(8zl@O(M<4%>KdKk-u~*
zQ(uHmDX$qfR>$kqyJ~77tQPs3Hh>S4!RdyG$kt8O@uW@=Lj||0p&kBsZ8clbB;#cv
zx5v+Q7ofSxRVQ>X$OwTw0T^ssN@7_y)<jk~699X0fp8_UWpGaY{jIr{Bnupifd&Xr
z1+^bsi@20-yAwEBDkOI*b$FU?oJ|X3r3eM7$IQ4YyJO{XxTZ=ny7WIAW7)TNUG<aR
zv^nkr2p^=Y6#T=NDK14T>N8MVBewPNn3EkH@hKrEXkictjkL?t3y=ec$e*}xrrF?;
z(xkL6B@cek^?G?mAJ?<vZ7v-CK%t992NSy=cZATqxZ!2YQU`Wa#`Y{V=gQbiErXVz
zPJ;n^aUR?qgOhkgmJSx-1?>e1r~|w<EgsGH9?nUpQ$I?~s~}5K5|~xK+_#6SDHG8C
zPDQEDfV-f+cQ5!LF-G;38XTWO9K9w1V}$k8emfRc?TZmX4$ie;{y7754&uEyZzC{o
zbVDFxY!z0_rlh5aw_s^hths6zcjwwdRVQ8hD2zKU05)VQhw*h4kG@nd^H;AqSu-F8
zM|EqCLYH5iM(V8UTG$j`SYO2NIKSGCAbX}DdE#A@D1#c7LfK}a<}Xk_BBzBZJu5*!
zsQQ=9j^L~L58A&I0Mb~ekcaHa-#qp@4vQ~2g0<>&#8P2Na|xMXs@$ryF7vPieo=z#
zdF!wTa<4o6V%X3gPgirgsMJrrj|iX8=a=CMcASU7HhWAC{(YA9s*Wva-vB#qO5Y5X
zqUsxViJ-MK5g=Ee!u;kFVCeWX1g%bf693(ss$IqO)NDETgNg?cT1a-TO?kQ?lkQh^
za#?$p>~IgWun}!WI_HA#<dLewOCfZ&?j1q6_RfNJ9+M^D!Gdes-%d)~CIn!dJqW^e
zFUpOeRnYkZ{h%Y!&>!;S`nq$vF=C^`h*nb?KwpY0rMJ<a0JKkc2JkPAkopMMkZ^P%
z!)SW+>+HbL3!sT=r`etEQB~^%gHd=79uN#?Mku^Pw|krq&CGex;Tu3pPTWz|R17j8
zcg4`GpuvS~NQDPA9Rb90dgoMhI_!^sO>ZMP$ih8lWpScT1{pNp#!Yp!NS3%T4TCRI
zbXcJ{4?9=J$D+o-^`x5e=u!4K;}smHjIL@#Jz!Vh3-Pi^3&;<`C8G$uLvYj}T`4-I
zgm83PWu_|=&%2=FdF8Lf$mZyyl`%v(nUdsRq$)*$&CLhAv3I+k=m`O0#uhuVW@UXa
z0%Jxn8SO$a$Yxf#n)P@2#+ae{w{4F0Ip}TMn4VqB-xaAj#$7h&PMi+mTHP2&d@&oT
zHRsZkcEeUPnmkpFYNFmb!!mXLb*U$lYN%mazuYQXLP7~lqxNzv&!Wy|HA3Y+z!|Mc
z$JLjtECsoyqU`Q*d<qx)x~vmP_&L}(rZ{TSzRcgB%hlGtP)snJ1rML>H=hk`k9e1V
zP3@$ef1eJ89l%nw69PIcxqYiy4wEeDrF~IhCH?p-_;LPge6S!X9rtqs!15wFKvX(Z
z@KEEB6V69uY(O>22lc=QAWD}<(c_1EgdVrhl^=H~pO?v_9_BkkpPk`Qmke75U#6{e
z>(ocM9gXhOV5awunIGNe<1+pT8`0|h2-|ztb@Vdz=40)nC{rJsB<fZ4%*Rt%QK&e7
z9F{`N)P<y(cf(a+BR-iSu0vKz73B$qOB(;UGE~T~eoo!Re53_L+XD@;zTkKIh3Q?j
zXDU)$mYpXliJ_>yk-{)j7+Pq&*wgutm5GBhjCSkC*aav<tg~c`h90AxMPk7bMGpTT
zDCeqUSTHZdzhKUZGUCzrM0CCmP}_wDK<+%h`>5o2BmX2={5p+j3yB-jOmHs^n+*ui
z@=cnC70Ya5N)eKMcE9;ZOjr6y48u;6uXooN0N$^PK?Xn&9F-G;?CT7GJLBFJ4{4c)
zzLia(=oyq$k%uH-2EZZ#&ISH1k(Jd)^A9M$)svr__d#9$V?xQ&UkgEax{b^=*pdvl
zAkz`Bw%Tgg3cN4h<7K~myWQ<}utcT+2?c(ntx}ZWDoQ;)l>3N6Y6CX+%UGND`rzRA
zx^V(XX5zuw&mn6=QiMGru#d2VxNpQof8@0>=StdhsW0-Z+Q=Dwdl!^D=(_4gPa6>d
z>e>T~saoBZ$6=g2&51HFl|P#;St94NPunw!c0_pxzpM>cK@LQkK-65{GAas%EyTEa
zTlD)l-k45u+g#g({0KkgugHL*7JC)FUtf>qyBWBT`O^6#2zSNs3U4I~(h5C{D>}mA
z>}@@>-47O6M~Bth6xx>UX{?FAow%q_)vAX>@7=!;s3Cv<E*LaRv@7O4Vrw*?{!5D8
zSF~HN$1kVM;zxfH=^Ve$OZ{7s{)aC8R}baCHf;XGKFyy*`ezi$zliid&i)G`{r5GT
ze~R?KAjW^!?!RmL|DA`z@jJ)-Lzr`Xewn{0%!@QP?bbd$lm|V7$D;Ps?=%U|9x{pz
zB0y_*+nW960UNmXb*lGJ3zFCp*tr4kSJ~Szl$Ii^nP|T1;f0}n+hc6_VQ8s@ui>N0
zy2V5=g2}omFOgkq+S-0dzZl`Kg>^R8js)0NbJZ>ux;9?j-y}?w)UXN_Ce|0%vSrsD
z;z}sp>?+!SEk!X8qvD4XM54q#cMI;$btxFckz6|x3&#!B;@#I$xJlU*LMfB^+_YX7
zu*=i)MVDt->NW0oek-tM!6~nMCxtO#kr(PYbMe|#zJ)PdB+|B+ZreW59IPnds2Y%j
z|EWf=KYw?tP1A^?fmB$lpbsB-9CrNkn;Dkz3=H%_CP|bkTYH8|CZ#SKEzeqAijPZa
zMD5DV>QcTlb0L&5(j<+?Vf$I__pH19iefx<dpEHzm-b4BY`v4ST66Qc`jk?zN*kD!
zI*~z3+F$b*qN`Q7k!>t1^R)0QDmg_w3$T-B3$7HQJsQiet2PX1JG1#S&~RqdAr^}Y
z5SoEhDtvR7ZSf8*ZGAs_!BOgNFDYs!JnnUAfeeTXVMwf>Mfs(LJ=4_UKm<I-0(~KO
zE&*-6k3aNl{yM%zwvCzA;;S*}(K_-em9!r~RE@w7U>5N8H%PCwLE@R+v_CneA({?N
zzJzKzAHmWN*Tp|^7y@=(V|D^Qt+fTvCAf+5LGwCf({-`wo5cZ#$)x=;B|0W@o$6Uu
z8bf|~Lobi0HD;&e-xLMjjWX&F6GS1YyJ_Uj=om%#N=3TG1QxbpsQ1KAD@TZym>$+k
z<uhV}R=Gg~I<2etfbX!H3`c<}wNzkOSW<^UHCxOEm=ob}da~LGqc#-JC8MV2!?EDg
z7y_$Ir1M5U(GH!WcgGVFL*f>&xx*Ywo<bs6&9d#}g(@=m!9!KIt`J%DDhw9RGI;Zd
z4@wmycbR+1Fd8I0el=YXWS1*~FtQlH4F-<tc+Rew_dfkDa0Me;FApNBJC<5OFg|<Z
z-QzF68z@f&glM=+4UjJ#`?AOhh=0y-i^$&jV3f%yp)1rTO$^+SbXOw~M)6t~R%BW0
zGbN!D?;wN0>@z6DOq4|2!{kbzq30KYGeZC<m#k3{<h5aqkD0kF5#c!J0mEXC;OY)7
zgLx`3w9=@e0gHVP=YRf4Vi;CXok`wq_O*%EIuwRwiA4qjZalWV&Dd{ARh&n27jLGJ
z+1iZ>;4~R-#Qv@i7nXW#h=<fXd%Y=#gX?sAcX4qQy(B-!3<<W_us_ixROkEEnM7cn
zsooeH(}==>n>Fz;ByEyM!iM8s(vy5*Hx&m5w^Av)*+NoqMKM*=T;UUltIOHC!&Ep5
zxOw4DHM35@X8xMW3lNW}31#Ul?u8poaLvW9o$(O;P}+KSgsr$daXjI&{bfR=0Q#@Q
zJo^Fe&DiImZXYkzc``nJRI}HnNdq?UO=_SHqCO;QbbELa*ZD2g?gQ=R-WcYsSU2XR
zgQ~IZ5z(NRAq>?*f~NlB<a1f4feuNt_dRyAs9M>wMCn+(0OLZi=iM+S?>+g4Lv7rw
zBGuoZMdo(t_p?!Y$#blZz|*k-(_Ic}gut;)@h7vVF|CN^#K<#?+2aT<*<nAz204qQ
zB4j&szCU7<KjK&9@R6QQTgQ;;r*`^qI=pJh9&a<Ad;7x6GV?F3f*1)#mU`4Sx7~n6
z%s;RKn{080hptP1qZGD?3lqlOoObuI7ivl%5)8B(;&4lJG@c?U41RcrYV7I-3lIkI
z+SCj1JX7DCSMg&B2j*b%NHfIcu&X+ZP8fIi0q7RE@8iJE|ESATk&+MoY!vtOeq41@
z#S62JCsI?c7eCn9zF&*~T}3bH=BOlhBEJ5L!UYy6Yh$)ZkE^L+1OW1<$bCU219PXP
zOOu+a0wagDkdE!vo#dz#0Lj*xV12^W@k6a;Gw{U{ONAMjr)deMct=jU8S3|Ow;SBP
z^<Q5qopUX4*tqg%7AWJXqzqYt;acfq;N;NKww4<#^nuhiiK4<B2R>gP&IC=cUbc&j
zW#UTa#h2#ieABF>r7R$jNjCZBFDsbKD<oVpK!p-D0Xv!jgjIx$!l9YE{tn<e>tDWm
z?6jVO2O1m*f`|?3{_XDe(_j=Q3lyJdttNDI_g<PilLripf*V%+lA~BrR_h0pc!F$B
zMGjeC-%Ka1U(d_hZ8y995Q~-N&dXg%<RgAFZ`Ua)ht_|u4iew49WUM^G=BWi9o4WG
z?stQ~f<$~ZyNu-3)N51X@pfW$C!I|$VD0gS8R;RVfCR}Y+p4u+m{dm@nk^w0v_m{G
zR&!*4)a6Q$xbHp=?q=)=dx1H6p*ewff9t2HT?X}C<TNM0A+lggR2X3%2vZ4*gfRz|
zK*<kqMtIx<b#P21jzYYy^#WLc?&aAZ=wnwwj=bCXe&9n5?~Qd&k23asdB+Ih%M9D%
z&uMj%>jq}3<N|)x8F{CHcbG0?3YrN6KY?gu|DMPz3HLR0Ip!+rQ6o7UP`TQR(0HzC
zH49BNfkHDnbu8oH{VFy|T-In{QCRd2<+#G>@4rmy0dp8vK=CVvo&beYg&6`oBfRpp
z=Ib?VEY?AO*4bNnNsXy)-3$_cg*|d~zW9lb`KZbaxif=@aLt|Nbsb$c=THO)ZQquK
zCL_|(1#E6rVJL92yA)%>#~M{L<Yk`K1f;k1I%|FOl+tB*G!27%da4fQ-4s|H3?j$O
z(mKgJ%h&idgj5mLxXeLwSkVaZ962pQ7AcsDt2T^xt`vj4y~?Q~*m88r(fZ`eP{c}&
z;x~A&h}6L7J6}DUGQ5a0bq{|XLytC1xF9IcMzCJiYZadI60~dn;cbV6!<`O1(`2q?
zFdh!KZNwg@=cZ=#VbeJBLqVM;nttP%M<8d#0Nh;L)`2N#tg=bmWui)jh_L&0EcsUe
zZ>v1FJPzudR2%}QZ)m#3Wh}wnC_`w6)AFu9EFE_=W5}OBtS^s|_r0wU*Xa5T>tVlI
z1&xNq_i~18eV&xSW<qb>Ml~3>P^y!1W@$c}B67Sc2~4oQavK03d9J~!%kBh4VK7Qy
zbVcwjdqgSh+4FPs<RYJ4Ll7yo9v*H&7u~iv1zh0_^aEk;utS(M2#b=+FmENo8T_n@
zggtF@=LNX<0%kdI>)#bckPry2b&l)1cCG(TYff^VS2e)@gYO=cI9M60Z2_af<Zi)j
zJf5TR=0evMbz$Me!6MJmp=!v+Fdd0if@8VA$LU9Bk5WXK1x+otoEAT|3`F>9Xt(qZ
zhdq;hX_gTZUPSTGO7@fqw!0gB95~FY6yFVJ><A2D{;J5UNMI1@LL5D2W}5eIJ$JF=
zMH{JnG`AljqH<Pe6oiB)84<ylSTB44ksDp3E&-_n5(M=n3b;Z1?zVZk7Tt3h#!}r_
z?kJCW2SgNjX5hWllHFxbqu#Fv?8|F*RI``p{?W*VQ!~RG&E7B*^ICFL$w)}jt#sX4
zv0b<+=TV*YPP`!t1vlnXEmaai6hW_|mwi8NBn{qy`naqsbs>4Vl<=pB`}j$}>{gSE
z^UV9_dzTU0=ZC3T5rH3fjyhD2g>=Q4tAdjwTd#>8_B=N`&q^$>C3iT!Dmca|(6atH
zKzU+LY_F&9z+oWfo@*-JWu})>lHYT)x~u0XWX9q*)%|s{-sO?e%w3|Gk!edJ!TAw7
z3mw{Z{5-%oP#>je=8O~7$^lze0cJqI1kstKzYbyno8AE$dQmFqrWZd}6J?hv0t|F~
zQ|+Xb;%@3yNUYR9ZJON=b?s!6G5|1b-EXOs?dQ1vxy)PkWCnyWbRS?ByY`C(ksa;Q
zpzp%_E`dUPJuxMFlX47O&hbLoUpGu3D`4CZ5QIU^J>Yup*MVN&=$VN7(~bzlGE=*E
zKQual?$h48ATsycnznc6V;CxngSrzqa9V3ikV1gEoiM~_MjMX!to2=d%!cPtssd5=
zh_bEn`<HKs4QPKRq~AS)e@{q%m<_+%7#x3W`TWQ2p#LJIe{KhH{E<}fFIl30{OKPP
zWPd;Y|6Ae2^jR77S<Fbl^0^VjO#fLO#KA#F|5>WU@>xgmS>N+lQvUzlhS2|53gY;E
zuI$rq;rPS1`H#^Ee>|6ee3^fu-0y7mJG1=-#QVSdEuW<ezv1XVxQ_jk_WmN*ooTNB
zEkD6;v&&E~!gH00*jK#3ULcay+<!mc+#&LQiiE#0r2|bi#XG?p8~tgAs}EB$iB#`g
zvEfjtR{bkc8KD{bO(wkNhS(+fMNva9l~eABxG?9BP23CL5BG<GMa^|HBB|TAosDsC
zn$GoInX8(4MVnBeaM;%6#&I`=!kkGRUS%7hY?WDfk9ukg+vyaZJGL`^9hLfuNzg=n
zP0QI-uEHy*4P{Q+t8IaS%nsh0?^WE5>+)!?4qlD<ug>Ka>n)g#9%aWFp*bUA7x-O(
z6V}7as*>>owL2GbHaiC(Yt*!ilo}v`L$em%R)%qTppv!2QJE~-vedy1Xsq+nhwT(G
z?h%m15VT4;S4j`^wlC)bqM6rqg0#pMG~|{@C2Mv&>caL}gj%s`NT%`Ic-i{8aP#N<
z*+bq-xaIpEtX&7SP9FeB2gKYt$JgqtLGDW#G|R6Jel7@VLkqh@2}ipf0k38FZQg8C
zuLB#p9KQSD%r<fZ4mCX96<FR_D}b&v&d6M~CHs)wQ~Z_W=N1pQW9doAh^{qX*UC#A
z)d<rQ(DC3n63bo_gMnu(LmiNm1O-r-7Le*H6|ksc)<%F#5IKIf(G?(YK+Tmyu>+`+
zF3Ade#m2qpsKM<CU(BOsEBx{WYV1%ZN?>IPNOivfww1Fz9u-3J5(4OhfFX2{uS+fy
zN73j_mGY7hv1D9W-D$<6>WT;LQI1gdLj4$@@ocU|WKdTwB@$V_XgdxD@oTRWF%d>9
zs{hidjRe>HIFJBbbHn`hgaablAwvYX3W2)qHhN~<o3VNla;r$(AmeT+fd)mMMv@6N
zPb|T9fLDv`h43jINcN=xJe@qHM*x{R?u9-Wy5#lRSF^d^3kA+vrlg#Bd6RJ`mv1DK
znS4(jRU>p{q4R+mOn&YAT1Cry`W7bIHsM-Xd94JZ0{41%7ma4Hc)C%<e*dr~Oq5B%
zuJsTlfpT9^fsZi;o&JU}dSd`^lof``FV`TB{PffRd@R|zQLD!ZTG9;7-L~gN_zGz<
zSp^R=Z;!+V)3^5OVAS4z2;An^mVij+O?RIz6>G4!H#9x!Cfr#oUFn;$E-fN~cT0%F
zd=Z{iBBYVw{uM0kotl`T1*Wg#aBD0L0+xUq9(>_Erd4yu;nrL0v3MU-gj!8c8W^qO
zjkS|)`Gc8lmvUdg?TqISkO;BvBP#qy)GcQU$nouH6fR<;pMplUx>^pZLw&Ne0}*_E
ziQ3|jQ%v>GiNPTIRY}*!ETKB3YbXz&R@`-Yx<^<WQ2h=2<~eYO*#&1?m@n<G_!$UD
z=)`pK=2yT%ooY%~nvm#UxeZ7!m<E%{$=YXhOmq`a)=QWpAJW|SEQV-QVXQ|P=@hTR
zK3E%J@1Vp7%Ch!RyO2Z;670`|s3%op(0F2xxWE~K<hQ<7=K?r<!~L_)ya3p8q&(9l
z{t{$}BGfHd0|E4Cg8`RNp>4b9a5OR<U2pRVQUE|_d(*DeXuyhJd7R%pMSy|*WDNEe
z_*W8_IND$;79he1e<XPKqxXl4J9B2D6P#mMYr?TsLIO#k<ODD|<>%b>6ht2&U{^!Q
z701)7ZlK{Y+L6No8i2!LH*IH&OA}Z+^t26x=#2#O*^9H<(7b*H0x}$xP{pJ+P8ug!
z#jEIPU8!uMtWAH$0Kn=t{k1HesKP2B@{8o{5iv|#6V)D)*`trrDV=OlyCCcv@VD(w
zo|K&ujBiuW_{0r4EeCUr=Gge3IrKqj*W#Mxpm6Q+1MK<^7bF>jC!Ed6h`}MIc<5C|
zX_CR=yRePWhS~mF$4f8jmZ1KO%q_b&fac>W6p;N{`O)UwoJV_<f?GCf2+LK(*F~p(
z)+?wKp~2h|X!2Qsl$ouDosfwfu$qsK6h~8+A^b2{qm1CrbzP+E-3cK#Wpk(WPdE8r
ziRWi#Eu>BxLOAK&`ptCqdDv>9wy^|Qs|Mq6oM{$P{8JuUU}yyAR2=6En2Vtt6LuA&
zFut1OvZH4!TvyE%pK>1b__M32qlA22LY_0Xzv79mV_u99mE^GC&AV}QA!b>KPO8Q(
ztFbNgUKckQhT4oSP+*7@jVTC_mB(_D;-}IOQ-y#}ie+ACfHBNtR|=?eJjD<aZ4}E*
z587it3;Gny3h)JxXcRSq_A1}H$E5>skgO=(K35<zx*fYW;@6b<pn;YjGn6>7l{LGv
zQ+N!aG>SFZg?qo&l(WXtEtXrR!$jQ@i!{b|ht8G74UQ3weW=a9@2E5ihhpj%u<NDj
zW~)Y_n;F2XhDv(g9i{9#M<xo90BaqVaiF21hzPz!>QUksv-lYFjDn%2N@7{A%+?j7
zeQ2?;aL0Y<%D}2+&d({kUZg`rSo$x{6vuo4C<c#0EEvbuyAnU!uef&y(&{5|$zO=C
z@U}o2U2V2YKba8&9%`cQ)5$*W_z4{Ar^1+f?B(}SZgW3><0t}*hWzsNJ6|q>3r>#J
zNEqVDkqg_|>W@Sqg1U?0RdZJw&f6T&_AKaV6cxV_uGFgbFJc*o%~87*&}r;of^Ub(
zF&(zpbj7R}8J7CyB)X<kOT12bc}TFTf&<LB<>Cb;fhY}>TY_Q$`Vk^py|Z{KNV|Ah
zc!(p|gI-Hc3Q^Hed?Qw#4`rAbkbe8<kd^B7s8)6io?wSk?X7g`TMk3oS%q_;&KNLP
z!?oWasd7Zaouze$jMS%NL5Q^B8<ITIj_f=90(r?b+|fuijdy0?V9TX$4(oqT7<|R4
zWC>sptQNvcHT&86{VFnMHX~0^ZEC9ZU?$Ch!2iV@+%Z>+Zps9RgfkJp0Fqcp54VC7
ze>8=O9n;F$+@HUWcEZJ9EK0?wk)RtUSWvecG!jHscL9bubrlY)fDln5*Wsg{Hb<DR
zLFZlDS27r0-S}$6su+_Psnt-UbQE(Go!S66lv~NC$~N<$lgiy0&xD=R7edxxXncy=
zSpNYx%W>RJrxiR=7=7Ce{V7fkm4jrsv9IFIR&9)63ULycc^@N~M-JUnmPIWs`C=o0
zJ$sPyM5<}|G-)e2x$W9>Bw5;>)y56PNq@$j)3-Ik3@&Hiy#(~kFN&_e7m#dXhUh55
zMU9Nomwkbn^h~_aNdJCxG0G1TLS({)+wIN;V8N53olJ~eRT17MjEAZtku2kDAmHI)
zUm!A^J2BI>C@h&$T)4YgUG?#uu)5g2D3Nu`u!j}k7N^7z=kc`2uob8$^X>9F6qQoa
z1Ry9HF;ahy@%^xUA?O!6zWh_;K%&bpwl+U8;`@C!1DM=<OjjNDd^a1#^b*}-=llE0
z`QryM=qO&5lJ+vFHH!<>OQk<>XoaxD6OJ^EOo<&q-rtYk0}LgQDk4Ov4$9t=vx$&&
zy&F>C)zY3gTL>975UOz<=ZM-&+BCkzQ)Z=1^PS!P5~i?1Uvv_0?1~_Xbal%K9c8#R
z0a4Y2ddwA|hm<#P`T3$yM}3%qrbKA~<9w|{pi1b8g~=VR|Maxj=?a<{#15xNKIf*I
zPjG{sh9!j)iGk;O1$^rfu+6VFJ;My+qx$QKf!kiNHNsauLWZHMBke^8C=aHOq8i!P
zJn1F6Lc~ujA&f1$y2NgoxV5NVUT3Aab1XFjC2=&OUYYO~;%$V~>y~5jM6DcSCY&WL
z?`?R!O;*^@R=2XNz>#l%mG`-~bhtJEm~SR%5^%ZCcqO^y;{DQ{{Ap3l$ThKFy=OoV
z|3}x${mWrzOXPW1Yh-XJPbWVY665H3tQ%SFTGhqI+M5XpGiAgFAhQ4LFK}r_M|Ydg
zwM?KpwkWZ@Xub^sBZu@Pa51$(1OH$^IIOgAKS15w1(H8u7X$r&bf*5U;Qz&%`W%<~
zE9*UfvZg*U;$N((zn}h}y(tFz|KVx<%Lf@hyWQCTR+IV3U@V`x3I8+a;E&t>8HaxF
zNc%k-_xFyp-$u@Ur>FmRw*J?_`V61drvH2d|G*)(&l2Fjh(nE<HnFQMh@U-by_-bn
z)K%igs1#5n7D~Ht=-Tlj(9Gyt#*w<n@uX@A?N@Kl6ZYa7buG=ATM=*i5rv^llO2u?
z-l#qJdM1X+S6@XKwQS(o?9T1ZHyk>t)Z<6y-zKcvs(C^Ebp1L%w$HQ2zDQw?{_3UM
zT;4ma%c>z7J-u$?!C4h$0k+A+ixW>ljTzIU!F14oH|kN$sTQ2jh$k^!ebcQJiWlyg
z!t2wl6hV4*8&Bk)XRfq#>Fyirt2#5Es`jW^uMm=mC6?pSV#M$9fJA0wIl$#=oYhP1
z7=RoVvGw>Ml_Y8>?X-@Ub_^4ohYKq2E#@vIrR^phkAS9)(NZ<Qt0fvSo%vWR(<>17
z7GW^-2k<kgQMiTa2o;-GE-&y{pL(sbf<(k@;A(F?1=Cr$m2U)?9M-c7zmR&7<?A-h
z@Sf|-5hbm}RVi7Gs1%}XnU#UT0&GmWlNg7`y>BZ?cf{cQp%F>J4hx15^eiH~2EGMa
zf@ji$I1<=pqBE->Db04tO7U@`|3K_#dr+e$)wjoJz##Y5#4#%YHP+(uO#xHZ^?s4}
zLaL!^e?H?NAbW+vB;7oohjFTER%LIW&5T-hr{79hOo$r>LbD1WMUWn{igRG-h!BW}
z#ux>2XDxkS>7Lq&4B0H3$wfUCVIX2ida&$Dcmfo8tAem2l`O=AS~rY4qF`%Bo>gU8
zg_u0`;W~}9H~adc*`|IL9`D*>Oqk}hsVhZ@BP&)vY}CJdC(DGg29ObLL8#}LGh`}2
zAqM!_LN*MxCj*1HzZGhD_A4Ai-_VbZbBh+z<o>~mOSPaGuxMMcCX67J<NbirKrDV0
zUR-`}@oe#A)SpJCTRV^sWdSjknWSee`hqR?S0dS|${Yp6XECd!A;BiCTn%cg0z%%G
zb*a4*#-b2`jM8KZ4E1940I}6YG?6&DJRy3yxYyjdFB-8|BoRT2Lq?U&q;+8qLL*!9
zITrfsc!9V)@MO2$d1}D0(o^xJb}+UJ1!E)o($rdCNfBt=FZm}o{0Zx#MAD6Fl;GR&
zmBRoxL652XK_a1=-||9zv}B&r?JZ%Z38*45CgRW(>MXy(?`&tsqfVYpFxM*#zT1lf
zB8kz>+ng37LoCF;`0@gi!$y<0mb$gh;WjF*ds{-=4a7qm{BRX+yJtQeL@f0m0?;XR
zI6WNvp{F3sfXHi9_VvKSdvU=f^68g@af<ewy+LDW0Gy9jKd3M>RC$}D8Am+0SiNF9
zOPHGMoqhg}0Fw+*)7WZv>1W2M@k(ah;_4;z%uOrSX@~3#i6&w75=AgFObU$<eE}9I
zm}`ivAlc+?g1~iMi~>r$Z{AR)52s69l?VTGfJfGMz$f~YkR5UV+aD%_SR@MSmKggo
zRiSQqk^nJy(If{s3*QI3)d|!?R_oN7>uGXg6ngx9DZuSN-X*0}QA^qySjA+0PU#Ep
zLlLPk2cY;v{X-Z)B?WWH=@B7iKSK-M)nhiCmv72o{9axuncF&_R^MQoL2Celk~pkU
zJM?jSS(17_TAyw?G*#H9kHa#*N?h)VVTeTu-E*z+-}xtJc$C}mZoC>5f>IyeSH<OA
z??72>5kW;x`e{a>me62+8(c6lK~UBvxl^Yp6q*3IjXR*(26->Gu~K3EQn%F1gv|ef
z(7S|<;_@<^Z{4d?Lb$MCrU`UFXckfniIRGDuFk}^5I~mK4ut5caH;{e3IeiMi+X!X
z4VWWk!$3l#X5WB!R5^F<vqg4e_N6DE-w_;k{bwSs$Vdx@lmvXR|9FDHl=5!E#gE)7
zW6<2Xv~Mq<UOF#GDC)Mm1Xe3%q`C+YJM4xQB&JoX=dN=4U&x{i)O~^4xgGX9s82SC
z3;WBP+>v-B1*C4f`3LKcvvU~<5@Ja~HSvV2n&ppDv(L*CFlHgiC?It6Z9T{IWvan_
zS`}B^^cL@Jmjw@xwLQ6VXfk(U7LBRbQ|W5UMh@z^X$kmsIL<ue4XXW=zBsNfrXdiH
z;?FP4l_*nf7v>$pMUwIm$;_$KtgizuTrz9&@~ckCTVr0u?OKFo&FUcMA6np@;7O`q
zB88k>+keMB((}1X3^qSy9u#?>%`{`ahk~XS74rrua@Rnt!rKxJU=GpjP|ODnh(e*N
z6z_dA03%9HDlRY3i$!1r^Kxx^-pMrcKAk^H4A$4<+~D=|q$E^ZdD=pyiNWdV;***(
z3kC4a5Vs0lzFChThjF+KuC+bQAI8cp0|)4e4#BLxP^}VLEh9D47Bn8swocSiKR6n+
zCo4>J9OGOd_X4fbgIk;@YNR{GW45mD-0I{YBiV25lHt@@uunyh!W7V?_C^T$7H`sp
zvR^<yP7}t=JrPIesI`cSW`X0}a0xPopqO|SctbOW`Nf-FtBFj!FG>K*7tlf}L)G;_
zNgsMxDEtwu#*TsiW@|~Xv4vp(@~ek45993zCOOCa)WA|>L0b1V9NkO>lI{Z?5A)5X
zeXBz&b%HHbM^T2T|JhIJa}8$JD&x^jM|+hkm0e8WR<8;r7Z22Nvd<XNZ>>FCw$-mg
zo+lz^+UP;bdOyGRO1i0^;7f@kf2keutKvA!`_8gv8%0}~j?6f2(+ID`uWA#vux4QJ
zzT`@P2`}9#SvlX^G(*>SgvOLsC=L9YKEY2F<i>7r>Zy%;BTptB8M7oWY`YQ{{8T|r
zVm*~|r8ji03`bNlmme1utLoRRB?X~-wqAV84#CcuT7L<<=57w;{nRoUe+zPvR^-77
zyFuMCS$oPEmi=sCD>-2)wo8|4H3KZF=LN_rG7)BW{xX5z?UDz;{wS4UHckcq9o9S5
zV7#LwE8J*|iY}$nKP>!(d9l;ImO3@7YNKk^Mxz}j0R8wgSo-+VD~+(mkHFgb6hADr
z=-?<5!1D{p$%iT9k@o!syg&liPtxS>OQX_sM3)PMFs)T$oM;G~-m&FFpv;$>{DpD!
z*)t~OIo{o>_#yOCiF5fxZ(GBe!S<uoiGGsYbd0q4vp}pggL&Yay1nZkX?@dmv|)B-
z_r<g#FPvIrcPLP{*xeg!?H8?+=tdQsx1SjvChwKaGw^%h@MT{IdKs8>_c9h*r<l;&
zXo9ZIbtOK0dG>&t9V)v>94(~Or25T*6G_|+w5Ox7dh5z5&>r5TJb<K7^120FU|E;0
zAj=3qZ?-hu%Gr{|O`4ZfCdH^|xFWAIC-;CY#}4jtW*0lp!hZUYbUg>gP0*T4nz#b@
z*7bc98IIMzck&jVU&R@f(e4{$75BX0N-VRg2mTsnXM-DaFfF7-F^Ue`GjuuWBwBu}
zk71C_TVnQ{bCE&y2SDA+J1mPB8!Csv?&vFHl5zL$t#}5CWj0V8^i>}~t_du(NKyQN
zgPeRC6Xj(hJ_On%GMtHLyqd7qTX?{f8dOEY;+%Ow2jiir{dwb=!JKV^0~15+!VYJK
zC3`ORcnC3_YLrE9gA<5EXU&)^<Pn^KZB!!Vbm68Z0!ZnSv8@TSh&6hywMlN)u@L0|
zUTxATYsk$C8eMEQza~;~-i3vxIlhIK`z2bN+h8|RmHnPtBgb%6yO<{S`oL?O0q^iA
zc<Av=TbozCJ%t^mJ2Hj+ASV6;P?~T;?B5{Af3$l3u9E)+IT$|s#s6>2@h5cnYbFZA
zr<?OnrT%HYFnroP|Gn+P_*pal>GOP+f3bfSdHuQk>+je7Gj#k;5&ugM<~Qj0EOh?t
zqyPJD{<qa$41csi|Kn!=AdtUR+y6xZ8PGiW?Dj(X==2Mj{xS{3K%G#4_X9i*43_Q<
zp|E+iFCTu(I9&I{P>NS#!u6v1Z+X^3niE(QaAEqimf~<>eIBo&5!xY_a#cu;lExH*
z+214j#+x75^txuJKXVDbt37D39d^EQlpb&JF5TZRzFZjiV}X~78ynPVN>MTztt~an
z_+W?h9B5ept=>0VnamIji2(_l1bX|`37=t~SC(O?_E@weF$A>eSXBnS=*&;I3|V%S
zEDX*>W$E(q-f^j?^h=Td3Mq@zU_LiA0MDn-za?&>S(>1OIt49L*T+i*v(e?VXWG2L
z&bl*~!fNo~M3=@QHT>2^k|r%&{PuORn+h%n49yx}018t*t5)-b2AL|k9@$<J@(Yy+
zIZ>#aVTavu7-&&H5mxdHV%NfbNmEnN))g|BMoHOl0Y>4lFpM1ASz4-zS4E=!`jpY4
zvrjhusfy`stQI9HMLpHcMga(wBev2CzN8A$-J95X$e#7$G=};N;#-EfR6fjxUAQzj
zc(h)xe4~zBVua4dzKETNEsI{$cf4{uctMk$*RlZV5@&bOk{}bz?4mxWzMZTA;7PMO
zuKB=~k_i6z#$D1zX|ectPDIQXdmoZT*oOn*hdL`<FVSV?`y=hRY!J5!*16VNWZz}w
zO3}fd&DWnJ-Aoo9f(-EB{cI+k8e2X^I!hk$ZmvI<z^pK%Z8lM!-Z}d`mA+BP<+)7_
z%zeJQT5fT5YY+)(bKM8H&qN1(UY|Y#gv2%#90Z7dRSg-dTRF%S$kPyJ>`j|~vFDjy
z&e7}&CJW|kMSvAF$I5BqnHSMxX=JiQ_U592$M)(-CbYm8Id$`p%a-|(`_+pK9R~~o
zt(ZyWhv$nWgTAQbBrE?_&rtZeGdHXA%;U<Ov@IO>C4}YL!CR9B{QAt3s3`wdCZ3&7
zV7K4+wl6=Q)HqYc2^G087C5Q~1#4`R2&DBB`CP%iQgEwMYUm>IW6&zJY<2j46O>#I
zGG8uJ=!gr*c@L&_8b)knvg--lX~RW&+ye)bA1C8OWuz(p@prBVP(4hL!>U^h?gAGd
zPU|Y^6F)aC!K?Zyk$&lk4)D_91kSWC(Pv-x1@Ss^$)Ch4BwDJU$us0x4{f<uL7A<U
z!SnDQxI@pdmw`Plt=;+dZS+t}d?pODIHhpIUc)Ie1LQRen6)P7=}Ix0A2Mj%hZLaw
zmIt}@1NywfP~42^=g#h$U+vela$vmq7jksLWCXmTLe5d|2djvMP!&#k7MehVuY{{b
zbQ29>?7(x!%S=BBq^7NGvBh*}D3Ec*3bx29Le9LPNv}N^t_QpRG&$e35}C==G`og@
zi;73|J+G5prdx~Vj;1Kact3Y|vWV7rD3T@9FDO-hv)0D`+N_4kpQEbX4X|#T!)H?!
zJ8GrG_cn`Cl~J=){i!7rzx-|XX@SL@I?7`zjZK~2PR4&eNqooSE3SZ)DG(H>`4%c>
z3nV>pRwQarl@FZ94cv=eXM0yX%yEylFiMX>;H85x!t%$#{^uxyiU)F4XBT8h=+D?G
zI6xe;ioR$>g@mis_x<zzumIGDr~v^_U7U`O%k?*+U}@s&#9|%)Zbb?kI@{i0e??-=
zXL#}4PJ-81{EZJM0e4zPcs~aGuHruT@6YRkl9T)-dPd&nmR<swKmG69H}*+ugX5eA
z=@q*#>r4w-iN@;qzGtAr-v`>7f@{sP#_;CuSTP5ih?~j0K?#9%aVmS?h67z2d|6`a
z#jr<g2LsJ{A<Y6(QNZ$&@jT#2WQYh(*bp$4jX^M{AaOyn)~4LcfZ3S|mpgXmm)Dp&
z%b4vXISvhj+u}!7F2-Er5N%RR!)*hdWeAUiAUP~9A035|FA?;h=2E9PlLY@TEVmeR
zVZs+>R}Qd+RqgRab1Mh6V2nQ=RRFcp$S19F*dY9nTM)B?SE*G^t2#oc<q_Tl+U%1!
z4v^(^C9!jNm#rS^*fe+xl%MK9#{&4NOxaTtE~+z>a8Hm1i7SeJSAs;D!2Xsi)&RZj
zjx-I{sIdZ@Hv&coYyqU>q@iI%6cmSE?m+}hiyX=l^-Y?+jF}7r7|1IP&JFrbs-{<1
z9_G>B3GO77KdS#LOedod0|GFb9cSlPXCC=UglBv?9+nb)J}Hc?L68U|T}T6)q<A~o
zW#{oSTJF&p%&C)D7Zv8BY{#BNsOfrY-ETlR8p{GrS)}EUKOJB-vC*oQG~fhD<t#%1
z4-lG#-lK>U?~1Xii;&_;y*+EkI^u!#m3nYrN46@hx(+r;U!cRv;~na{mawJ7I_02q
zlSRby>L7;XH9$tzJ)?Ng_{L%!n9`s_1H+UuACjwu;_LM266=)HC^DC2^l(qHq*BoK
zo=$_?s!)7_YK^ht)WK|$oDjVCnZeKr1GU1Ng?v!u2+7Z;h!TS1zMk;;M^G_Gqzq*@
zvE!<}j<-sP?a<dE+=dF&7wqEP3O+{>`wc6e>9%C`dxtFHx^L*HXIBO3n{>=>6ChC=
zD6&jSx|p3*9aq%W6bOy_NC?fZ7Op)l3gSfqzI#U1peS~y_~dx)MfV1OH;8>m@xUfk
zK4$Z6$#}>wukCeDciZ>4Nbw*)=m1P<P@34FPEqv_B)A9H()}Tr&gbGhe8Brnh*sMD
zoVjQ{Xh?U5!VQ%~RjpVIPB^;~(49T1L$TUfyqoIEA9;naK<pe086Q8l24b_d6rkFD
zH0t{RMf<lGOGS|*kQSpM*FiWMT{e#kg3s=Fngp&@xv-MR^YAxXjLp6bZen1W4C~@=
zZO_%vTk&{&<kHNAM)^ui(^H^lL1ZeYjX=Clu!O^)pW~3-Q3sEYb=g7s=9IAuzF5`_
z5h?6qq8OMy%V6m2Uf3OD7yynL#(J)~po2^gtps&Q;o((op8Kz3N2wk`v!gwv?G+w!
zfyo4M{%q!!!-4csf~1;JcQ;wluGKz2lOZkkJ6m*Gnx0J=V$$-h!|@33!wdknI|^0n
zzLhA0TLSDt@Ad8%0vTZ_1e~ZXO9Jip$p@k<8h|*#>gM?8D6&nhRhd9&UwDc`duYL3
zN8ikdW9$(x%h_dmYVpw2#OBLJy`D|r>K6gGIVFJEW%eObJ+<ipJX>KF`S11!pqdIe
zbq>}mI)g~>_#|-^2J=Rd0hp|*{34z<rD(3=hr*hZ0ih8E)X|%Gh3Fz45XtIK4dG|s
zA1Hb4@qr8)!4^~=!ZoIVRBPmCSCE0OMEWvD$a;6q3)R|m+b>7Rp(9-F5jnf^LkLCa
zp&+TNovGe%MBhQg3XpJ{_8fRq6OPkCe&WK-kj%PS{pCB$5@ax|8NOf)8AFr;z&R2R
z8zh54`_J}rcD^o_$-W8&*u2AE;~>81n7pUjllak;ia`NWm;zUEMU&r2N4kOFIDouQ
zv#<a<l^FbkpgJH$W(JWA%pQi$!VgqfVtb!(PgZe>TT;bmJ-CNs0casxtIUcT7rueK
z&dZ58L+UBAX;AwTFw+rof<QRC=|NAn<&zitQoQ4}top5vJe&&wBxrgCA0wgFjd}Lr
zDx(`%HQ4Az4;Sf7mg6u^m=z7Qz_;b#H@=@@t*<jZDw?oUKG;B?kl}%j7oT}3%YGOb
z=pxOca7$j`>4F54tZTR^KVAUf-OM2Ab||r_HXTuc_C+jjFpO}#fc8gVe=x}urj21g
zj;iqq9FYZ?c)cB;bmgoATflB>8;HwxI};{%x#uDpRH?U&V?@gN<pajpyXF(3o+FEN
zVmyk$0O)3v-wJN!f}6Y<t_|6Iz-d)oIQ$uWKE?k(iQ?bRxcyz6|10=>R?Yn1z~_Ht
zsWJR<{9iCz82*1BWMXBcWBr^%V`O5dV`F0Zli9-X$94aK7yleS`<KwQ-<JA+b6)-%
zaQ@OFKf`BN^go~Ge{l>0%jebpBF8lTvwyH9I8myjWPVf;kCd5Yil5L14$4FRzVJ?i
zheun2cZl|FldGOdQz~UPh5rit%xRzPLSwWQnvX-z>!#5;<fQt*g5%}g)b(xUVLx1)
z$j`T)A<VW8eu6sLHkEoaGRre)vvJmjg|?xH3Tl0dq;gb8=cwH3NO@z%Q?=v;DaB>*
zreoSd4JWsk87dpcmPJQ*eVT5{*6X;<Kj>)&Iy<Zg23H3s+_9XBbNbLa*U~F~dXMzW
z9&UB%OOZ3~_-F3d)6qVxcyodxiGg_`jV0(K_of?)FHnh0bDOp#7wvUBZJsXB?obN~
zNBM|3e3Crzh>#Qt)(>iBOX1!!fzTi<QCOW;6&tvIr3_e=FmQN}W)pgml1xb#X#;P1
zd)~HuFR#KMK2{v2PheNp&cI?{Af!0Ou=1fVmBqeE`#I}W&%ANMYlcAG1@<4saOqa)
zSAw1=6nDo`Ht4TQ6HJ~g>N4JvP5p?21Z-hwSggJIwsHo+D!q~4dJ;j#CUw+0;Q+2V
zBW!z`1t9(?Fq3G7p+2EV`J9)dgx04hwk?j7^fSV*qS+FX`{#0+Y<%KHRB2OQcVOxU
zUb~H4BZ)=MJ=AFrMi?nte-ey8qCvwTq5xAJu>{VfQ5?Hq_z#T!X#P|6;99$WUs2<f
zieZ=RY{`rHY~N1z569L7Ssj~eE{AJ$1LoSi`pgdhpeR*uy2t^RjVIgk1C_}Icwq$3
z?>5A#QQ!O0EX*5#E52@;gAb`5(F+vund0??husPR8ndr=_-Uw#-@#S#mx2;!_e$~e
z2#m3T+DjlEq*hiz$e9J)Z~9a)Md`wSn{i)2*#i!Zp}P0<RzZ}pvPI!zTU2&pW(ds(
zCuEIin0aPXPvL<q#d%q(Sk7-7%&B|T0-&(ItrFP<6?%JN#d}*iQga-xS24wbR)zIY
zXR!s>L&<feupkNEXWH1_jZgb-A+E~YJOQ@@ESVQWTe{*#3}?hHWwsJSd3$NOB7;4S
zNt5rK!5(K!fQ+9Or1BwN9B~~p$-pUNv`1N;d06p6GId<Ow_R*IF@vcNf}*usCRi-@
zPC66#@nuFD3E09|np+*V5TGLp%DuDuX3t@O8ExMb1Q8%kO};ZRuFA@208IgrNXc-k
z3N|-Hg>a)_%tJ<8m8S>VJygAu6ia{xC_M^-&=TPm<{&NoxO?300oP)8)P6$o|FQPY
z(V6Yp_IGUCc2cp;itUPxif!9ADmE*&ZQHh4NxeCz@9D1YK7H@$cl`cIGM?la`#bhX
zvOasRx#pVUL^`F|Lm>$!^jW}J*ia0T;Rrp$W)XYQeO$S4rmFhv{JEyZ?8Gjulsgpy
zQ#8XSB1d)4e~|L33t5}sxtucPC*=`c0xk#~lFpTKy#1P5RQtUl7$cpj3vOgvAWBFO
zZvR4;%F@~tuRF0L-GD~W-O<zcQ>H8(T4Jv=ST~=XGv*356F{zdQl+Vv2R56@G9wJm
z3gyv~{D9c2ypDW;N=lMRR6r6X=dDz=ACI{9EG3m2=&2%1<=bfZ{FuqUOs`@QiMmP+
zKihhG9PR_=+ycRx7>#W4ps?ri7pUobx3;E`Jd7;rJ86sYa175y@~*lYu1zup&oUZ)
zh75M@W8p^<bhhtOPv(j@iN4bMcMR7`7xOwk!z!s^qkER?#t5}fNpH&4e@S_We`7#F
zbU;udEn#n)A6Ge#X%p?o=G7#;G6Qg|_4N~pxazZ7Oa}P^z?R`eC9msuM$L(OEV13(
zu5ULm|9JNH3JRWT1!kQU;VkZb=SJw!G>-N4Mk>2lGbE@!WH;-vofpeoaaJ4=yu-3E
z!q}ezB_};%<N_Y&8+gM}_1CPRi>{@L@xFA=Ksqj(te<i|;5@R0Y<sUQO0L&sBaGE}
zy!>n6fg*6V;4S`yVCr82HYjMnT?69skRh$MoEJBCE!)3v^8*PAeyQ7nz*W$0Q@esS
z)3U^|LZ`eB&n<=z()aH>ssNG2T6_+oHec_$<@o7ujV}e0hFG<xDyG}ycNsz%_SyM0
zX!LGJ{&h!wbbOHbg}^-zK-j)AK)ZIiEIoCP56s5{C5VXOC-e{A*Q?=76DH?(;{zzZ
z&_i&H)~f``gMFG1eLVE<&fl|xNR|v<I?)EA1?+0&hDsXUI;uNX9vKhU=kGi*`P1TS
zC!c06jb5<jV*Ggjq^|$k)Bg{3{ogWp7#Mz+6aP>5Is?PMrto}N9sW`HeiJ4ces{+I
zpSJMd0!jb#8yPr03j4>l2v!y*T8<BI#J{>I{&wC!DeV6!=6^5XAG;lXsq6nJ{eP{n
zGyaN6e-w814`at)EbQ%Se~ajORmJcOtqWl%5@|<kbzdUj0O24>;k6_aGfiEuu#~)!
zxN-P#sn|~?QEN15j6`6{JjQl<J+CS<YzgCSmtJN`Yk)}p++fc7(`&8ml}vtBAwj*?
z)59(t?~xB|H{7yUtibll__&dg+Xl<U@M8y+d1`m`)!bK#-L4=;=m~Cn-kUka#aptM
z<<$;2Wh@UEBdjGJSk?oV^3_Av%gx?wyiQR0a7(~kV~SEL;oR?*B=)hC=lSoV>o~1m
zFnKgl{&^+Z<KN#7d%ZiLh%p_`72jw|;Z*7xaKUSsgtU03R*GH#pr2K)zuLRF1iQZn
zHu{MWk%~}4w-{8<t2uu%3rN?P?K0N1##Y29qL<_2ZmPh<Kr@W$+A;dl^7gPe-S7(7
zPZTQ$x26f%@w0ogW$E60(uHuyg|~w@yd87gWk5@m3&Qz)(zx^ToPKTPEea*Hja?@5
zhhSy_m3JPK17RHv9UKv+in;cN@jy+LbJ29wkL3zsz01*u3v6X8(vZZ$qg9GY;jDsH
zibxR&C)vO>dU`q#gnZYYZ}(_g?9Wp?e5+Y(v^IFx17c?Uh~hU8NB3+}(8@FkQhYyw
zQq2jO`mr&?_44g;abDqS-fK2ieLt60z99^rJkMd@fWpBQ5&L3t(VAB?#Kojn;j_gF
zaRM5ULOF&e#(dG!_4m9-vnX0mQ-!<J$33Fp`@A9nm>G%!bguATmVT>Hj8PHIcC&QK
zTqaWKazi^g_lZ!^MAsxz*I=<{?eXxs;A<zuEVHf`=I7Z9=}8jXV(0ebNxdX6q6j42
z3{ia&EwK3D%Av1=G{VB$#|O+~WKG@lA&EQi^e}q6-NZkzyf4l`u2jVa@FlW~^(lFr
z9?_<!zp!b5?P^eLv!0}^(?1mMBw}QHfT@LH@h>8`KOFF{0@x~3_-{ldfstXQL;<E3
za`qQv(1aVgcnt!7N6n;oH-6!$zkecScJyW9y)t=B!wvLBK9jWTOTY%prjg-hWInK|
zUK4l?FtuU+?wqrX+h)JKtZYq~<$=WzJpE9C=^6{(aav6-U_wVumgtZR+6a+31s_p-
zaZI(pj^qg0*B!nQRcKX(d~o(T`x=Zj^mf-1#vhXxdSDno=$cW{&6Yu#Q~&21#$$gk
zb`r$8*0~8PVqtt(>uu@7QPB=R-7~2IN3n)f+c^YU%>ku`AM27fG#p1x#4U8Sm#F*D
zed|xb>aj(|yv9L1ViB<OqI@kgs)ZiWcmVo7ingjAXo5W(YS4AL-LH!6UGpNCIBkLj
zsv!fAM)Xhmf~fWmH??R9x0V|#4l>?LPXZ+YBADMQ%<zEJ!Gu)jgCc;0tnY^y0ba0m
zF=5xbX@t;c3G0H+G;%$`0Z8yv6~84bNmhpLS5$GF5b8|m!DWIXEAZ<Jo*$vhUSLV$
zvNz=<h1>t&i5#3Ak`yIC%PLB-moDCTxma2hX7GSQ5o+RZK~biF^O40_N?~GgdJt@k
z5JQ#`mk<>(1+n0oS*XcgS?r~ru_9ZCkn(vh!NWHL4R8Y9(_~=VL3jqy@PJaX$Y7|X
zHRDpFG{c$Xh0n8m>Zr_~?BXas(_-y;lRcfG8b&HcfT{%9%1`f3;g8Jm#GT>y<+^pq
z&)K51_uPJa<c-ymA6}I_ua+&h>*BX8e9?uQdd}$q<e{s1-`+(WQFvmv4~VBTt?b%^
z4(KduxQ1=2I4tx}#<s)05xJoV8{GqZ8y$(AG_ee)e4TYQ-qWDo>=MgAK>qNNa@xgU
zpS)n>QTa3%bkLyoJ3pG6cX}Mdk&Tw{r$@LHkqBA5?H`YM1z=)yN|>n9CGxpL!)mqD
zUKUAZmSRdGP%GMkOY0!&?<bn6UNxa=mI>HH;+}DhA#b_vb6!#;3bGr84^Ehnj0+0(
z?>*9%Dxl#2#1veYkI~JRS=(8}LZrSlX22qk@s7yXJCDD_DufYTSAC}=8z(LVho$>A
z%GR%>GWsL8JgCxa5pB5MM~O95c;9@CXnl+7YD3txkHm#h5uTM?VOvsbtu?gLpvJ>^
z;6ZVdMHPB*wQ5dtulA|ax<#qa*6xk0Uy-lRjC)jnP;paXwYdvNCLyA$>k4kr`IfQI
z+J;Mt+{je~XVQt}qHA?aXTybc6l7z6W2XtsK73><h<3YsG<ZLukv@S+9z~MNhZNAs
zc?!4&_Y~A^YixfOx&<AhJEFx;tVB3zFPW`UvPMD5`uMpZu!UZ14+hB<6sZH1Lm-S@
z5xrp$p7r%vU{_L!x!PMOmJOZs7}i8DN;&%m&2q-B29h8-Nt-fZ7mS9K{ktZ+y(Ok$
zhJKBprnf-~AM6n`B4~i>yv2Lt^L3F+>t^4o^+$QESnAjYhad#BAOCCu+6bTin2ukV
zcfdI99S3k!&MPiPqotUCj-tjHwE6h{y_Zu!@Pe)|<Zx05nabpwS*$p|^CdNf`}MAg
zHi*tR&A9nbF!ZST-hZbZ#{b(f`g;Zc7Zu~LSw{b$oqr7LF?@7ae`~vH{gOlYx7Yq)
zk-spm_p!+9qfPrUu1C-Ep<1vLurRXGvV6b}3j;GPD?7(;<9Y;i$_nCt`#@pp;9$c^
zM`vhlU{7n~>R@VZMQd$mLT6#7Pxn6?y)3LvOw6oIXiXg~Ehv9E8S}3w_#2!3ez*RL
z=<*-pSN=g)zarJIr1gtO^&ew;f049)J?$T)#m4rRbL%TGRx4*eU-9T57Qj1F)w+1H
z?I=+dD{tqZ+fpgZIqWT^yL8w1pW6&=IvblTB$L#679$MqE-yE4;xs#KXKM~@FeX*n
zyt$;}qtRWU%X;6m8&S;+>1<rWOnY8WG}Fzy?e<sBCbxV7h)^h6R!7Np3@$HKk}Pty
zXJ@T1eT{;Pw)~!ybE}RX#<2c{lj_n@ktzFj8t>j{0Xe<g=%BUUA(Q4ZT}7i*YqoN8
zvlA>bUfy8yRs_SP+<E@&v1KDX`+E!je29m>?g=Dcg4DVNpO0XsnzaXlr_Ko+9Sxf=
zj7Z$eXk-ErtdLN;NwL!rr-q0u>_bC4)zSH!?~*_(NS_GBgk%7|@F*;IZy#Gk)@=Vh
znO(P}`a8pFS?m02@@b?)np@GMzC!`OyHYFBPG+S~Ey*nB^}gJ8=KSns)%coPw-*6W
zwR40SJ11V3;M;zSwq?bXg+X*BJ;KpPYKkLRE43qrHUqYp2YuM%npl1b?8s`ebfo@6
z3W5ca@S>yeb3J``i5PN6wq<Kapb|zsItuw_qzDvddu&Zen@!yeyysL#K-Nlx-2^-^
zwry@ozNtXUdFoJuv<^~X<C&VJJ<J#6nG@@39`-Qquclu*Tw;e@8i1Rl+KoU{_>U?{
zw1CitN^^yomoxYGVxag*h?(1&2p`la#4w>Ds<oCgO8vGzDITcxM!3(JvI;s1O~L>r
zvcJZMHaeupP8L<1_&%=y<L45m65oRZl#e1<tpJL4ZE?~i%nE{VY5Pg2Ndq^29i?ma
zzw~DmO%&i!F>&*rQw~H(b7Azl=E#6>SoX~!Bpq`VnyjdmdR3I%2ehUdl`NbEzX$0a
zI(`bf=+p|%nN@%1Vli1ZQxsR>xqJOKt0&{uTUGvAPY$-YoENTe)s>UHA(#LO0kDS{
z!zc_LGluf%t}DI(s1))|xqVa#D)qBw)rb=6NI-eFk4|DEKD16@19T%BMoXb(r1YCg
zybm6_q~siI9%y0!CyChi_87oGhQahCo=l@fMG?1e06u-K%H#3^Y>-+{bXBO33bR2W
zPXH6<P<SL>MEQ5dfbKLbM)Es2wM>8sR>Nu$USq=mO{K?A7@rX+CY+1o0J;X`<xxrI
z(hyAN!4eD6<Mn5{U+jpn9Kfuzw=&mDh%j=sG-#9wYx|GSxV&K+40c&!BS-=mbyvN8
z1@<|r#y0t^9q!foV(%4;9vuf5D=N%`#XA)X7j;%3z?8zUy)&TLqKv&P(>Iu3OxVW@
z))jK)WpHHD;Bcmd!D*89ah99G5kwG~$gY=4_|@5XaFdr;><3Q*hz)=;xb-*KEZqwi
zEmkw!a$WLC2iFQaEECgi!w7Iu$`;bTtejS=R)|Ni*M6N6!9O~8I5hWFaVR5iV?QpX
z*SpukF?<TRw&PaeY_y+&bs-djP{<=DN>j4ZO{Q_nF#=WP`u;*FaT~u9WmU7k%_U<?
zF!x#ujvbL#+u)NAqaS_98ggtWwh&=8CxJQ!^A<kn<WBU-+ObbXd5vpZ*7z}D*$e6t
zMe#e(KB}omIQb{$mU_udw}(w9PK6a3{iRkZ^Byqd!k<kyIgJIkm(<xWQjsXC9jMwl
ziIYojKgCmNlh75ouPUXMlvYiz$ymRXWvlBxj@1S|yEVQ<ZPwy}tvt-hTBu`g2oM43
za<k+qXUky4y;(V`;L<gYN~b_!PC{n$WxBkbo>pZ~y}Uvxa_!uM0H)ui#7V;zTohfn
zaOUgv-A8vB76oW%()tvkjiI{^3%ZOpl@6N@1`H^%eez<<TtrkkB7-72T;x<mAK5P~
zq(xKfnl&U#;Zr?Wo*V~J)KOh-VEig^q<s$#932kWP)3kx5waUR(L~tN_S}ptN<VFF
za+J(s&9AyU7mkETdTU*WZGhspzbfAY>5=zN(BvE9gR88oJ5<!X>A+TMAmPITcuzli
zu_T8|?+YXD&Unkq7NCaD{v63DNdVuDX+83>dwA4Y1>Sb-MZv?gnz>HT)HcwnMi}A#
z%7+afPp${KrI{#7h;*fXw}&h}4>{E$tt-Mkepib`fFUnVXZv$QE!b{Ii>{SBBou)<
zv3&=Z^GYP%&3M8@e#|i`DR8exs3ym#e>xQOs(j(=vZb%%tr-9Gl;R@0YHL<O4jf9U
zr*mFOS6Z_0ZQ%{DH+?$88)>iX?kLdqj!3>GvYJ_)&tylvi6RDK9e!GxB`;$>Us}Ig
z+gI-IPuE_A&*XUYJWxVwlQ+FPvpehV&gwL4`=^1_9#4f{AWMOkJX&~YfM^B2W&P>>
zmx^d>3TM&6+HP;e@)T{@1sCDMGQ2;mCvaO>_c5P3mJ^P1VJhUkfdgdG?f-;@ewAYW
zT=`-AU7Yzfwt(RSME*)g=MNU*<t6yI@(1leF?=A)KPxzlzj)F<)~B*DGt+X=v;Dhx
z^ZO<L0H$BMGJlR2|4?k`KK~sx{vuNR$}xYq%YI8r_>1CACdO*i>cG8cry##x0uT|c
zNl?DnBE=RO+!O)-x|=_DpKA)KD76!%!nT~#-6KyjrnayZ4mNaoP_W6wn?z0MmaK&^
zN+6XCn^{)yH18@(QL8ui+PZgDtu)$!S&J(>!hz7U<kf%&qqI(=toP>o%N4LD?BlU9
zIG<}Aq9829y%V`13aGNZDdeY>vwR5qZVKzImz>h^IANK~$re5<XIn6o3%=tEys1lB
z)o++bx68BXTlgy%`LE|P1vb0$gA}24pI%N3w<yk>l0W-(lW63T)8V{hof($T%fhu8
zzMy}uSQT6}Hkm9ehEwkDQg}ST+ZQ3km`k-cp-GW>R9-Eg>gCc#WZWq8t9FunZEMcP
zyc%eFFz-CW!vqGOb+l|G3Zs>bndjt4(Khv3EV8$hE30OBI~DHp-hzsCWumpD?dUlu
zTajha6iMl)=&M=VLl}hGbs=X8yUz?0N)&Io(y^z<4UkO$_k`{wf_llbb^d;BMI^XC
z0}#TeJU$!eg%jK?g}b1RKKTtUo_2M|4hzVKU$-I_S73@FDcX%XZ);YC%(v3~Eu>I%
zZe$&|l0E%;l@N6+@SF=_T3+b1lBz%n=|KK~#OX2FI6M(1FsyqD!CYK$Xi{U#w1Eh+
za)`vf)=0YnX1p&r&uV1F0gVS+-)gYiUloAJn;M8IWiNl*wiKA#OCFZ;hCWs%T%7h3
zv$#=8|AxS3F`$rxiB(JmZ*zRZyYg39@X|5E;u!~H0WFk+wR9vT#!$aO>YtTVDFb>L
zE^K;Qkln1{s{2H7<~d*CcqR2OS0oj6j{@Q7vRqV2%u=8rgFRD5Qm*(gdUx6?EMMKk
zx$Tz9hhLp9F|p@<M9Jb8D0zHB`wkMC+c$id#Ncf)4KI5K|2@nHzp{a?h;*|Z;_;og
zdW))Vl-TAgD-$g!h61Pad2@ZsLDA%3Df>+gbHh=oXheq{GY_tZ-)wL9(fdp@TSA>n
zaawMNnuM`e8{zdk)P^(vs?gIBTyvkRoN9FDMdd0p<Pf&=CEi8A*8w5lxMf}`qQG?w
zb8Hs|64lS?hdErb9@qP|`AXbhEH7dC=8?{;!b>Y(P9sb6p;Ahk6IJr%cy4$y<fXtp
z@DdSgAoA9lNpZ=cn-em}?-woOK%5Gs&Iw&_sub*tJN8b@?i~T$j#iMl`i$0;m>|0|
z<wRVE0cVS|sJA1awFhSWvd91)G3()_6ltD9#gJ;SB|A(jjkb!}Aq_aluETx53!ZOk
zfuh}0?$oI*d_%$Th=;dIj8rbRyS7^jp$Wia$iKz!AMdTHr3F5h25Vrduy8+y>V?B6
za<5+y%=5>}>kyfZdA(97R{Z+90t!?(;~VX>=${gZUTf|K+^c$7S~u3Mq6Ba16pCVq
zog%WI6Y<!`7N|$<_k2t~8rnAL=iN|bzsg^&;Wdy0=Dqp0kzTlW8@yxsZMss~u^HaJ
zpY%>=D#(b+9}1+wBS6BOI_<L|RSGjd-2x!ZEU7L-BJhTpJ{;XbWR@(~S{c-Z(_88E
z?h?9RuH`pq?JzG|SVxB{njT7mtw0vI?>sL2-Mbh$T9X529QYml?nN~YvHmMf4VAj0
zVGlx~LN%t?PZ%P#x<!5|Pj?Q-YgeM@hy?Yv1!B7~yKXU+r)V7B9iayUQs0<XiBwLF
zA%bDxC=Wf4HBWmE`}GzQKQ9o^+hAM_hWe1R=2#7oA-2}2gOa~V$&1lT00ww{zH4wd
zZVJAc0|5xb4fv)8LE!GM!}5X(=%0Lq=LR`L<14arNK69#Wgi(zNa%-{y7WzUann>E
z8@G%4;H3OQR*9--25AgslSDsNu^h$mozEr%;Z-ONV5ei5BsXc^>Jid~TqV435<*9W
zaOv)SUS6IkFagFAm&@*uHa#h9w(E!?SPVjt`XK-Ou#`JkyQ9)btsNR^DsI~7DE4|0
zV|*7k{;?fHqn{XEf;QtDcVWuYs=REuQiiX~-Z2)OY#{^gUSA?UM|C3g*C(#>g&!fP
zC?Lmc>_Rnh2jW+N=bnu03|wg6Dp}apir?W>F$cb4;bFjmu0qc8zQYU7`aSD=0C%XT
zO-4zP-n|!|cjmoux%6S#A7ic=khE?G17IXPaUl=wo?N5h)rwmJ9L4lU<uMcxODF?{
zx+UwkhA9jY%i0EN?bA9RG)m6L&TbFmtxC2R(y0(El?R80uYpdYxGzKZ^Qq=Yc_<&&
z{5a!FSUa<>J}l_b!&=yprSGbzdtCIelmGGwJ%U6^iTCh|?aC(OISouo=4Aw2MO7YY
z%RX2L57vc7YP2)0^Vz2!)Rc3w3OnAQ;|a`8qwk*moBZ?DcM;c;;J);8)<zI|EL@_X
z3`yN`v)KXq%p}`0ozXSDcFwvEJ=ijkrO*y;n^mkN>dict^`fBJZga<1ycP(y6u#M?
z9<94{B=I#~Zm3xFS0BBH&I4;Bqyc!5-RL-O4M=mQYQR@iYZb^tGHe$|HP0ZaYvRnu
zOVeTQF%oa@xGdPM!)bQGef>gXyyvlC^a6op>{2R@K@nQAT^J(8S^nzI&p}6y^J=ma
zgj>O1erPhTi4?{DXihJEx}dk%_bMLAq;|@KJ;V>iDrNjpGSMkA58AGbJikuvui)=0
z;IMMOoO3Z6$X4ehL&pxfGt}tK2U9VjA?A!{;<mJAi_n4_Mb^FRL9g5k4&|-<hWGqB
z_8o&v*;j*^=E<t7Oqx&0+0gQQ-|)xHF)Ef?C1{!sN!y`yT9vk!1BJQn_Zwfr17Bsx
zrS-=<-|fa$9j{1M(sF&iu8HCsi(AaaTDiUF0|)e?RIWv5{EK%cxiGX>%yR1RH>vz4
zTZ$=9M`b{Kuki-Iv>*7y_Yv8D0(HMN-2Pyt{{VIWqh=`>*;_l>85r3UFfjfeD)pD!
zdA|r;Ourj^2pE`t>w77DeEt)`>-V4e=QI7U`BWb)$V|^c%ly$iWMpEa<@m_A`uLib
ziS_@X*7dgo{}+d1`cT`LSvY=QNYDI%gv<<Vw9Fs!9y2}b$CCN~=42mA+`mb0f4gu0
z#c@9F5fd8+E&Ff68pFpp$43M+6Z?m&=|Ad$&d$!Xc1DJu46U7vXbr3_>Hek-YTDRY
zI~W-_5NJADnd)1+5NJACn;Gi!e|+)P$j*v@mY&tfQq$?T@3?E4+BsSn5jdL}I+)V3
zuozi-x_kadE%D!A|MyPt|J~>P{a5ji8}SS5|JsxN1^oY1K>V-4|HBLRkH4C~Yl+OP
ze-->I)YPq4KO=Z8R>>)YBmtO>?@Jdw2>Bln!i$7scn<kGkXF(xpp;xy6QR6aW-g}|
zP{`W?=hH{D8y~K4@JdACKE>}2;fl^6MxjTlu1-%r=AABPamffHx$VBz6Sl~1%3iX$
zHC*L0%`;(2brzq~9!4rRuO|l5Vtu)=T@^1JO+A%brI9sr&!aBfG;x*rPI_qw8VNh}
z<nj{?B_tZEi4ko^S}ihn%v^GAcE(-=Nkr<qVwvdux5KVEST%SR8Pwu8Uo@$~FXMZ4
ziZq}4Ew;%g?h@-O3jN!Zj)D54(X9vq9eS<>C1OE*W6P5UD5KacPs?2}uy$X29Dx^Z
zMZm_&j7?$X`ss;FEj8Hs?1$S_ji!ssrzk%~KI&Sqdc2d{f@Jr1F$hP^KnmjojMcg`
zTpOjs_XkIXASIhsg&cwp+S<P%q2akOSa)f(P$}iT4hF8Aq_H-f@W<zJ&vq;{YqO|4
z?{63X=+fBQ<rm&4iO#-FJABRz6<Jq0FGalj;?Wcn3!$|c(^krSjPE1D+~%%~W;z1=
z=`|L2h!U$^zHLdFG-yUK2`fID+q{~V8u|-!Fs`JjM2jkLypstH{i)EhipdPYgJ1~D
z2s;7g9n6l)Pa1b*Qqz*&38=`(6778`<Vg|pJnjibE`2m>$~G;&9E2X=df->54?B|~
zYtoiX{f;}u<344Xn_@%CDSL3Mq*x$u#Z9e;VVYgI?W_eo+Yya>Mrv9Ql<~bZI+)f~
zOU;!B1Q3S*5|h*0oO{}oK&V@$XLIHlTAV~cvEzMy*>I*U)LbJ(-Vor&!fBtp5GzH(
zrpz|?GHOF#wyMi=KCP>wh-0kQY@BFkh68JIk<dl{iyZ;<76oZ_>9Ko+9%PH-a9++#
z5I_34g^_dZYZqeWNWsuX<ZEgy(PZsiSeLk3&g#@~0z}3z_1zHWcfA?{>C;SYqf-kJ
zYOxr{hba}PW;x+))-ZiLFS!i5Wwnrd<-|`c){4@D%lfv{8qqIPk4SDAXZAvf<in~F
z!nGpPR>{fj^2d{wD#v$Dc2o8Bnn8juMRZal7gG&YBHh+n5Haj&2BkYGCXRWLI(v0A
zO);7f1*g6wLZ~DrhFg(}bRy{ObyW7)Jd-WrDf~YThprLnK1XSnv07V^ze+D0UD_{b
zm`0Z$Y2(UhiPZF`hiQJUSUIy%aF8#&39cPU8-MV2C^edi?mX3Q*?<*MFRe^jgKZt@
zQNf4j6gy)?8>v1J?Z(9Li)>i%0D-aHksW2Rl=BV}UoR$7R+cF9n)EI3+;p30dE%i6
z(4vV!JHVMWEM{evnQY|2iHW%k0Ps>HF+reSIjaFlu9GuM%NbOf2$rjs$JS<;S6~8@
zKtd$j5cj@d`D_I~>i_8y-OU3+jU4LrC`$5ist}?(zTQlu(6Z7x2!_W8ro(87E?J2x
z*n%{7bIDKCVaA7?WGppKjOk_Hiv&}|-G^W6E`W~bs!f>Y&Gqg*k4Z*!;%R&cQ5z}x
zk={8VAYmg4Xj>Gr>SlU%`VyCpJydTd=h`J?Kzhd81_=CpAC!Vb<V*u#nBzd?N`=u7
z0>$B*m#GDfAfM}m+7RS72Fpkn&+HnP8?A*e{OwlI&{2jT6%HydmoQN5U@aH+$cJe&
zl~=Fsr1avVq9)^kmQj#l6IRb>c+VBUKr(5D{@T32dAvl-M+#F+q}sqq`n@}UMp^?^
zO410BOg7|3X~Vp=3S#;b{jgL!FPS>N<tI!Yfn)JbSRT(4$EY-_n@_ObimS0iHnJyY
z(P+#m+6J8OxRLWe^g7z6Tq~spn_CE}Jh%aF?Z0G>RXc9CPS7zd8}$?4#9a7<Mrd(K
zwjHMY=&cWnIH?CWjaqY#zTn3^SVj=zB^XWjl{|79NRGwQ!8pWxY|QhIg=c>r5+*az
z1ryr-fwtzNgRL9X#)k*X-db_{#XGb9<mD0{L#0y*b=i3yEjm!%Lgx-=%3a^99(^8F
z_NDM4uKH&Mn)p$4w|MrCg%fHmjWT+Z^{!qJJ79bSwb{M21_c{-k|8X5lj$*A_}akX
z?+4~!qiI&xsu7F#3M<;X$;qD(jDZcB$tzy#dU$-Ov9;8LO4)O`sUd;9&9>lKxi?BA
zEI*P>zv}S0I|^kG>YnVFlkHM^3sb(!FnVR#yRB~0y?}h9eAgBj;e4C%9kqK-Zul_M
z`lzpYS!T0EV8wKoN2}%i{4Ti#ykh6BX7J<$AnPgWWxtz$!mt+?!XEWHev-fFgID;{
zY(;=G`14YGH~6|=W*0n1OtaA)a@BUtaLLxt@cs$iZV{B`-?{QbRr^PU{By4Sy@>vE
z<CN+Dw^+#Zul(@)0scZS^7l*r2|xV848Nen-vz=C*Vo@s;;)H?AF((8xFdgvg)D!a
z88R_nqgNZAA780qV1x__jD=6uKLIb~m?@%YHLTIsiN{PcT2jZ7ipJTuzTdy$<qKyz
znz7;75b?#~cRu<jl_9+EQ7M*51rbHmD!aBkzdRf)hl^D}$>&d>ILW44Ia~WJDl4Yl
ze<pRM#~FWFKYy&I%X$zA3{h--JX|c)7^Q!T_-Tp(mYdy+;p;4)7DC?$ao2#)sRj4Y
z@yfKW36lBjPy!&W>jnlR$|EI=SZpC%y}iczG167CJ;P!=h+US|nTGx-2Cq5q^ax|!
zh%d##9VkwPyTHA4EWpKiqw8%pl4Fj&r(YkdM-zw9p@1ST2$Rs_o5={@;38-&;K;dn
z078O@S<Uaz3$zK~Q)}sS46V>Z?T*L30HCnYa*4TcfnPrU91{f(7@rHV5hVU(_jBuW
zG`aAWOYNMdxulu3c>=o`u&nZHy@X%;v0N90kIlf#P>HdHd~u4+9HvY|izuP69!irV
z5hNf%K%-tUT(V4WCY%6>3b5+Qi@uwj6iNIyESK$kzBmE(wW0<l?i1-*jeMceZ{fhE
zt$||u*L^&h1m6YOS7kc8idbL~UY^(L4=ZQ<L8XwYxgiibapVn&lp}Vk+BQp2gC??g
zNOqwJ%Fg;42k6<AgY$Tl8_z|#O4aBu9Ok`^JZW|Ix9lljGDbu|v`3lyg=af}e=g^5
z)g1L_OOrp<-h*X8^+!nZYp%j_A865)A9Iii#VB3_=*_CMBZAMS$mGcwPHE-6KGtB3
zMK|4QwAQ$fP*;^6>^AWi*-oz)^D{A;92&UpV6I$J^f3vOy7fj_%)y76fSMw)^foZL
z58S_d&MWo4KDuCe=vxn;3^VbwnYQp^sgLblfp2(OxQ~`tIVHU@d0&>c93Xy<WCqEw
z(JMGTK{sf$x<n74oyg=kogmO-lI$H#h0Ir#Qsr}=F&pz6F#S$^;%t$b@aTaAnf8{N
z$j$9kZE@P5!L7a{@TCCaw68fosg`NWRfbYWNub`jk-KSa<9+W!EtYmXUeuQTHgop8
z91Y$*iEDPq7%;uW{gzLAf6eFU$4BmB8+(kFYhWTakx(YzJxG9g{jI-qWSh)-x>Oj2
ztV%jXP*v`$*B40$zVm&^&xCcS5I4pK6gLJtR+zc1GHl?z{39;0k_QkRr#E>AMk6u)
zV9BO%XoEF=DNDB?GaaMPvEw%o3tv%J&gAFaG&Gb@j`W`A-)t<>$xlfjUl_P$SfrCe
z)LKdlJ1_gyg>X7{_y8Zmhpbl9c*t`~6uwyMCx&s;9I*5xoLX!I@<+z3?8k0^I<_;l
z@-2(O(crI#y=HZNKXu2$URSkh6RIkhUJ#UNDVa#dvJ$4WGq2Xresg$T-}DB#t6B*3
zTO%oevFiLTmhK2+SKiXVvD`8eii(6EqD+&|W&)HcZQt2Xhb|3C2G!>Q9^d4Hp|+5m
z>B@iBQSo(yg<(c!r#7{@26*HLdlP;MVD}PN2=iBNouSw(>6;LY*Szd%in33JIefD{
z#txaCQ+QWHG?}i?0pg_;+7OA_-+G=5HK#cUYteZ%TCSI&mCHXo^54oho#G%QycQH$
z8F|hXC4@&(?<T#!rh7W7@h1-m_K#S(mnnhWjNNQ~t?rHsj$bI@uTKjOe7EBz)?xTq
zu_!jI7@LovphOl%LHeUn=9xSMXmJB8cq{N3X_$8h!!)=f&^dBD(82@Oao<}W*I?Kv
zV;ul}51^@^8Px*Tf(s?8U{_7oh!?wNXv?j$NANX6{_r{mg=kc>t9xloL;u(kQ6#B3
zMu|v3)P6>`BFFiD=qA5_y#O)W)jkr*6i5Dx@TYt8!qd{s<7BzdxpFc~muLywIUXct
zVsdI}<FAGIL(X~3`kVc}lA0iUlgcCl1dFM=mF!Q8eBN6eMyOBvS5|k!S7&m10M%GV
zQxET7SgBNr#xg=*Z*Us{-puy;s4L}lDV`bqgp2!@_ry0f%*P_kbPr2FEnXWsV2pRn
zdl?9UsHx{PbBE|p>W0MOs0O_-c7OKq2;RzbPB92JKxsR?=nC=R08*RM-zrf!&)&n#
zXKoA3!bJpRVx_HtG8DsTaz<22J$}K9MKU|ulW!h<3Yw5RR9{J_JUUz(@2#VQB#x~4
zS}<l7h>Ru}l!^)`ic(~=X=enon$a?)s)PLTUn@;9y`N|@pJ&Pm8lxa2ZwG2O{38U^
z;1g18R4l4PTmU3^mssH$GKRkkr_*2q+r@L)Gm~c1XyA6aUr)hq-Bni}z>-ykmN)^K
z$5qd@?@AVgImm+&&Q)-nnNF-{&mF{R{jrPSTSXrMatX`~rYm07w4)_WS}T+}4Zfbg
z=iRVHkQ0-Tlivik8lfR&&e1tXDO-WVk4WJq$4aL27e|wy{1}R6@es#FMCyhD?gO7%
zv_obQ*c6KH_iZ@cxkuNvVbjL_djs3y>3kn5dlP0qwr=^Af~Byl8xiW7a84FDLr7Ul
z^%==QC3v+;Ysp{2e0^u(;XxMZmTWSd1UKGB1Qtt~&4b3{(Q>;uW(8%&lJk0+$wQ~3
zR{=H`mM)<uy#$f7=G7`c1&Yb4?&)jP26;3wtape7tNN^=c~i!WY_Q}2d!ktlHItB$
zA%+Tx(x6)ESC<rp_hUIWj({>MDliw{p^%YJED(|jiW>$uxd1?)RZ*h_2mrM)h6E}0
zkT&O|I*$iss}u4@mfSzvdB#k-G$%osexg3mZ+47s;iVk{fwD^PAERFRu|tKrZ8{i#
zC&wP>x!((j5;pzrClkgb3>vd!Wp0w*ZrE9fj$ernxRLJ5s9^sDmC?LKF2Fr(2?EEy
zaxHv2$Rv;wYXzVR971qEy{jKmJc(6lm)dK`!elo#ZHx88WSB~XRb^rl?QCFn5_v!Y
zkEPv7NN6*MnrzE^^XO)%tidoezN9{)f@AyJ!c(dLXj6^JH{%db8NHNGF*}Bw?zj(n
zi5iS3BE}lU%b3nOC;7(NI0-|7&je9Una4RbD>;A!<Hf7&#9Cd0ymEfdo41^wy=GL;
zv`YZlZ+zrRg_eq)+N2^YIy+h21}DJfF6p6>`k@o~uk5@n&tu}grMr%SdNKvCaX%m|
zjL?KzDr)$x`D2F23;0&6CFbZJlWL5sopR)vX#+3}%S+XL-*!}i1PFR)D;Y<r`l@}X
zZ(4!`O&p6dYz+cQ@s;SfpDmgMeKQ5hlB21tdau+~aGsiW_I54G7oXOU3m_}2h^NjA
z7g+9qX}8Ea_v#>h72C|a(Jvr_>`3bA`+QM4+hx!E1(~L&cdz9ZZiEnFxV~dc=<Hh5
zY5F4D&ntPg%qE?VZ_Vv5`;rGw8gdyy+IigKCH#HQ#F4d|j)jab-miu)JfmL|<zYI3
z#_^1P{s~k6-&*bOkohl6{oiS||7gH7|LX+Bhj#dzW=p50{)OOw>A!RQdd%N!f&cQp
z-=`@*yo3KXbn%hR{|`d`MaugH#sALS|2%c^E0+HfY5Ct;1et%0&-oujXJTgkFVbQ(
zqE8zTy*71f=tSE1$HG*_JtbF=2?XT6k*9Mr5Bpn;wwfCb$L@XS_}Rg<7ZXRb;?7Y7
zi>9X71{ch<sVU}_-i6;e4m%ou$GQ(GY3SC<v*zLHeY<F#-!!gnwRiru;ncSI*0$0x
zPA5);C!@unWNF`s#;L9+tySBqbJ^)Hf6Q+FDIwE!T+uk7mTAti=GA@mtSs2Nv?ZTm
znVl?t%rGeFm2^7vH1}yq<U99xv1n)euG_sDUKm|b-&EU95;a+3xBN&ojUw~e;eL5o
zftkcO^I}OEF|uthjI0iey!&AMd~oe)sn+tKX*><Omt^Ng#VyV|of`0Ae36GCEId;r
z&SA4I<)Pdb&EksF8=jh1V^WDWDlBS?9lIl}a!YpqwElj_SNINe9eE>nn)Em_BB$1u
zaqp4yu8Re-4}bKd<OD<>hN>61;zgAEoK#_)n-Y(-M{!Wj8X%LhzKW&LDUY(_xkUQR
z<+cqsY*vHxv=CwvfPj>JVRKqd{{_RSO0R)1jL1Z-O_;PTMRI;v0w@DbP)-@EGmxP%
z#d@7(03ek~=*%{VNgQV_KYdoIyiY<*nR#^lK^c^co=raz=+Ed<e77wZDw;t{p;=BK
zFOQ$?)E40b^%fy@S{w=MIp#Q$dM~+^#57|32{#!K&yw>Nr?a$zRR?2x?{cp>7$ZW<
zFzp(pxdoxajW+ag;s!?~QaH*sd)ssHl@2GN7y7e}j=J<`RVI>n_VIpcG+TP&jR{hR
zZdB$?0k6j+p38~jNl92M>R&tSnN}j_ZHT_<s2s%f;}g4;0lw!SA^>f{I{^tdHenTE
ziQU%DJB?B<N&t=R+56&|d^T<8cCSo@`~uNKjNH)@e5($f1H`EN+>0BcET3Hr=}IE^
zW$n<0b72k$oE=43kp+J4j-B->US!yj4EO|Bsi_u}RO8A3Zm^*$Xr|%;;@okW<Ct)!
zs1@%choMgms6@7Uz##Mn*rpXUr=bXFzsTIB&-35`BnODaji>G_U$Z3_${?|YFIn$3
z4{R+vaPL`1HUDn0=cbd%`EG|V3H4l;Ng{cfyQDq{CK9{dZauFR9a)iVdyxl)Zi&Uc
zjo`k$%3xM9S!wkYan!PcfV8`s*?i=BQY|$M@@HAYMa)a;+OiY`?Usik<5MSmTR0G1
zPugKFcJyshN>1?Jmo}3a+ObgwE+6F?O}3mb^ogOxpwbyNd&EHe^Ob#hR#et3uGMMU
z1mB>#T_Basdb$|3>Vp;ih)t83;Ock{<ov(dQt)!xvD%yot1}=CO|LDrAe!nS59+%J
z2i80!SCv-_IjSxYEJGmTBN&=-Zl=a=OT#GIG$~qn&7#7~NEm@BwD{`>(m1?;D2u4J
z2+k@Ly*qF>;Ho**Ig0OWNf;Bf8hMv~sv~&NjpRw=YNUI*6Z8KbLnt+buru+pLFnv<
z(TRW#$w=-u)-LuU!0?<N<L(fq<GYrjrdOYCIK!ENr;}p=j|+``;sSKBWGNfF1<>ZJ
zm|rR6Nb%|#Tu9?Q^s|7-K9;_s7CP*Q`r40h?YA-Y7m_Mo|1D8)bPs_mhg``H=iHBp
zo8X}e_At#Y!kiP_Bl{@arAa;TBpeGNz&Fdqb@$bV1>EbIVHt%a!y6VEvK6wDTv{#=
zE``xD0O0hb=e|h69Xzro{yHuuc+28B=m)>qfoAYp@7(atyGBB>9S?5F)DUHsCXUy^
z2c_8`>2?Er+c<iowLH(?!g?<b$wB#NvN%t#Y$Kwh<N&+&xFL{#v|v8?u~qd;VpNEJ
z{u-j28zF;9^8+St)?vhyBtei^)DXtHP9JkJ+vF^uN`akCToD40iL1Apa>ftJW`;k?
zvl<lD@b;VJTC~rUDD|}_e<FxFS9GyW6=Fk$QPxLvIP91tF?CK|WO;xBHK+9{oC!#&
z1n28c1oQ(GM%hrX<28oYmt8+lYHxO8O!N#gV@;606ovMQ?>h%!X4{aGA}uLbpkyLa
z{(z0{WfT$+5Ni7o&2(%`b+139Sr2Gj_ZHw9tTsR5Q4wzUqQFWux!XQPhZJ~iM7>-h
zIDDFOYi+z%;&x+uR<rP?#vAEGjGkkDWf|8^^olK@RPr-QRb?IY3^Utw)MBqq9rw|_
zA6x#>a?~C-Pav4DP1`4izS=kNu4YvMUhlZcs@cC;OolZca7X@bExI4a2)YJ8%}_JY
zquTK@5-0@b=MM2mC`QH@r4`=i2@pLojb*#j%uKT+&;Apy(9`%D6bS?S7BS(lt4VWg
z1`>hEQZE3t66St3NV9=^AH^&AK}|`28UWbiYd)h_?A@BCzo{=)%HVNBKEb-2gfZC6
z>~ZLiYaY2D?bMC0>IRH8ir*Qmh#Llx!J<K>zcjA)=remM!CEq%r0Ay!W;dTqV#72j
zCD)I19pq<MGV^&@1noqt7EiQRhmE5+i5M|JjLNby_!Izn6FtjV+_v@KRQAVr$H59!
z0~qil!e0Q_kG=EG`1BaWX((2?N%95Jm^HjC&~>gV@A}P(qlw0iu{x`3FpFso3A46_
z92p}FXpXl+!HJ@c8$Ym$7AJSbldEuEFZvdb-+FQ<<-mSEx3nYeOwbx4u!G>$(qb{1
z{fy*@@bncgFH+3it-K-HDisp!-r!)Gb^8tk3n77QOF~*8j{}#L)JPDD(Tw?$CU^%`
zT0>;F+d)spkt+f)uEh)XM#1w*lmX=%GCl#TS0<{fY;9FN(ej{zFP~k#zq(c+7afhS
z1u+7rSZ!JJOM75El)au4tyF=CT|Z{|S?D{YeGs~7rjac_OA`XnnVZOLd_qn$naIGG
zO$p)0BG}Z=KLiQ;Hl?xiEIvsMC~ySVC#M+0y}8m>mZH{bb?D84Y4*Gx)dNoy3(~f?
zo;8XOD;=5LPsMO_^+>5}&ChZA6Gp(XFNi2c2$joTdN5?t_a9>IDhc}OY$(GkD+<q8
zQW>Rg(;3BNxg5-_>YEJ&mV>R9eydQ3e$&h#uV+e|^_>-NU>2y#>9+?3EO>(zS;vwr
zdD9ibs;a+@V&TX^@?;$2mi{3(N|lw*YQ#1=j&uMV4WU~I;C@hs$6$gpVUB(+`aQxn
z@)>QMc?+uYYJP0l2a)aa!kS&{%8or4g-ne)MvatI`}*DM_O@#H{DQ3c?Sf1XL1dk`
zo38PZ2b^je?j#-G>K&Dg2|=i!G49yHPPsfuwV!WYZz`IbFV8sB?i-j_clgkq$QLVI
z*kJ6~Tp-DpEaY9Jj?)P7_~<#qn()(=)Fet$0!OUi;BgP_LgCBF(^=ojaUDZj0=G0M
zy29t6Tx>V*8XRa~#j%3o?=kol2pDMb;~@6NLB_#*PKF{dJ-t7{t^GpV{m2QPPDoV-
zAjl%XHSr7kJT_2@nVz{ZvDaewQVBtvO>%d1wE8lTkx;v$o9l#~;N*2n=V7ME;SPR!
z93GT5Xg7NJ4aZ<OEhdhBdsmkqf%O-opptkB6J%g{EaDZ;u^7R6e^y6E46DvN_I;g&
zqa?SyWIA9+vcIyR4ZJwYLPp|=#CtcSWml}y0%7F37WXJG+6E}Nx(Tdx*?^q@#Ugs=
zl4Zii&B-2bc1;Anq!rlMY6?fF3nQst1jopJ*-MB1d_B`%1Q%JB=|)Y&cG1(ED`R3s
z_}~l(zzU|_4S~(0SFno~RAHZN<f=@@fjoQ<!fL@vlGPERRg+=^Y|nr;r`uZ?MYL$U
zi37AkLXzfp&j;;h&t7aHfzTvIrAL-Zp5WJ+GH$P_CSawHrU0%6cKzd~eEGF972IBe
zvm2hHI)0*C8l#mf*~O+4*{=r!KSkeu2w>O(DMf!ReB<jSj-RU&;X#~?Vhc1_5#(rd
zn^%o)iiAsbk74YWpAdQ)cSiW($%JHe)({&=^jx0qvJ_Wn?TRa>nYDE4EjQ3}MH?*5
zG@Wo~LAm2Kfz*CI$}%=XUT<EdoP6ypiX(jSzEDLAgs<AuU*4Tno#Gv~Ctx4Cms7C{
z!;DTuwhX$pWM*9sXD%^r&r|kMQLLap3GHZpk^_y0q~#v;LU?WB1k%2zNeBsG!lVs;
zRMr}}45x2F`N5cFniNonVN^{VZ?l!0v4NlimcZ@MrwTW@DYv89EgfGr*uZNN*jjS4
zYR?uQ603N3Jeuoo-{;PnIwJyrLXT*mvnVcT7J3yhS-vK7{#fX6N%T$DQm175E$cy?
zp}1ahHEx_R!oDEOh`EOcx8ULS3F*v0zW_|fJ%8spMMkKR8*3WV?_4$o=nErTuL$^T
z_ND{6pa65G=gLgBubU0aT*6DpqrDfgos*kP93y|fN7loQ#Fln2tf%_rTb@~FpP+ci
zoPUicWZ&i6+&kcxTTot!yh(khNi-le6X>INJbzp%Z<yQGzLTx2pSr>}!}@!i?&<yP
zmM=M2GE4WKuI1qso?S0+Is@%1ZeoeC_g!(bq%54^4KrM64`1(gu14Fr`imAWRHUa@
z6`w?;&)<M(H(^B{m7vl!(rnl^>~~D&Y3Rla;%K1Jx9`pvcE0*jGCY%fO{H*i352HO
z_dr7`2I;~i?Dk=z)AoTFF_d?dTdCHJ7<+IdyeZJWTARJrnY_`r`}c?Ee_F@z=MAaf
zAOHUO;hFib<q<Lb9#f$3%WND2^S=fQ{eFP|`{|kC_e=io4XIxqaep0@`^V$%uidHt
z+T8L_&;5UxTUbBF9RH%Ly<JuEBQq7D<Fbl=%HQatQPqx00vN{%nkMAy)X2jOiKdyV
zO+DR<c)Wh!dC5(oQJb+|M*)Q|1sjZS!aD&IY<JKrJ8>ht=fx>mK=*?_6ZO7-wJiLQ
zFL;>--4$t~av|TP{$(rGX%W@!`uK!r41DmJ9^Dp$i*&v`V*o(FxF^~@^7Fh2zn@ZH
z>g<x!x!}NtR`X1_QHo*K`V_$<RLa$fWkCfpd%BwBZYlVvj_HsKgQZk%wAUHB<@XSe
zOSr|Oi4*PC{hy^;AT`caT#Yr1SSsRKnvJ;d7i>l~+j_IPir<aUpi?K36vA*cKRa`_
zS19FtUH35)ICC%9{)x)}B9N!rbB&1mtOkaNJEl-b1W5!m10T4tf&^o7e^-nLN;GOW
zxVe8ZurzZacW<|;c7IiQ+O-FH43TBhJ+!i^Y!(B<eC3sgjfWV@egsKQhzuvXg99v6
zL4|#o)m;ItfmW*T`<Xhzo^{CNtrFO}S8RxeSV#fTLUQx5y>TP~ls`Z9F<8FQBzKTJ
zfPTbmA2rhj0xbc9df;YpB1X;a91D?lLrY668Pt|+M{L4|Yr6<(whP^r`Bp%RN~a?e
z&HHVM_4MWeg)xpOTo8jODc+M~EQn}`RF=_(9&~_Spp}wjyVHC=3|Ec@poPK5$~iN)
zm|#0HcehuMPN8iC1zW4NV4y*(N+oI@p>{^UCSrT2lGP&c?x`FVp`{BxE=QzLM{1f5
zSoek`m&{n7=8Be9uAzlOergbxQskN8B&?y5GUqyv3Y&Z*wYGUb$4+iSRN}k%nF8Rq
zF={R?08sZ0(Nx9pT<XS${+u`?8&=4~5|~^XUsEAAA>Kl_RVM^}NMZA@Az=8#kUu8L
zp^Sef3=m{cl>3SfL+Z8is51kr<3s?QytN3kB5tAn)Y8d2dP$o!Y6%F3Jbj1ilM8(U
zf2u&#Fk;J{EnNZHl>=c_Q|%VYX<2QBe<!C(={*OZm~&_M|L#LzjR0pM$i49bFs~SQ
zleib<4zT-#jF28u+d_gWe8Tp$mIO=Ot1{^_pAp+9q&03^we1CrN06{+)ep8)GqY}J
zy*78=sjnD<>gVkQ0aPf4N5NS#>X{<I0YjK>bE>ltkHHi4LZ7aQSg4tcb5ez^edQ_e
z7MG}l49?G=7UAOZEONR0v6NV6Jo4hgfz3p|DAvD<Yp7<v>{&bg`F7EyiMwBJ;l%AL
zY))4<W!ae*G_sMw{W+j#!J@h9vg&P9uvS*h<#X5jU{hjnp^?<Z?7msC7QZ;RPEJs;
z0QF1nQF>xgl)v>mzUG$hv!j`soQ1r!NJ}#%yEHr}+Cpb%CwkgRBN}K$aVOo?Ssf(1
z#`J3Xq@3a;vo|B68P$40ka70$I}+k^JCw1EY)Z?vN=ys$Vge0`MMGJ}z~@gu-ub%f
zy?|=a!Lx~M#r5A;+EO0AAh0tAtm3=LVKQx7m-AF|w67=<K$fb5tScrde!sfk=_U7W
z@C$$EVTRMo8o$`qIarFFx7xGnJK4F}X#`dR8rKI}=g%M=)Fw!K0P?22=~A4x(jR3E
z6~PL&(VrUb->fqlV4Xu}gau_iz2j&gMRQFP0H*$y>G^}OOV4pd$QRwGS=eOeXGp>z
zKFO`TRO5Zds6d8LTYnx}e$^+%2h*~Gh@5HYcl5kngI8?%O8J#xgclhCJ_&+LB!w}!
z5(gz1ppR+PHJ7jsg*2uiMlf>2pY%@{Ae4{Oe8=}dt*piQH2es7gecLRo*G!;vbdY8
z*`7QgM4><u8QfBE?T>XJ`9wf=cr5NAG;eYwUVCVB=jOr6@WuGNu&|QF6x%*-o*stH
zX#tE!cY_i(?z*<Ro#I_yLcVqG6exR~n|jr-&7CG9R2R&}rO0vydJ2wZ9iT})lb^Q-
z(*p!fYI>QOTR%B@&IR@!j(d%0z~vF7bTAEA;A&$P2YR}rxL9M<Ezh&v1q3qK`Tooc
zwpKg|+L&aQr!;UoRXWTsW&9Kkw#JoSk?!kA+Fq+GRTcWp+liRQA>_ifM}=Ne*D)=_
z@Yq~A*Z8NlgNZ*Wt{HOt8Bcy>00eYX0I&8t5ryTUdA2uw{-l;zp7;eAZwhf9Js!#7
zHC*@=mUx!bgh0YB>FcFEa*tz1*3kIo1Ab!ipOo`o{T~0Jod1vT{flq;_lfn73NLMB
zXr?Fl;ZfE2t-iB<q=d6^Fn$ci%jtcPg9E`I<@^`lGV|ZG?|&N1|NXT;D)rya@W1+$
z8CY0oKZf%^a@5&q**_x9893Nz89$7;tPHHQY^)6bkM`*P)2V)w-#;uSe?_)uVE)yh
z@+WWn$W)|dW@Y|&-Tt3)0DjT!fAP)yx_|0d4gVLC`MZYy*X%Sbf2P%cY$pE8l=4L~
z{$tf{$7EGXo)U`?GQpFCOoM^y!am7_+m4V0K067seiD%p@BhczTR_#dEbGE}f(3UC
z0Rll6?!lel?(Po39fG?CcM0xp!QI^h1cFO&`&Wdslf859-S7R6F&K<BS+lCUW_NvE
zGOOOcJ=vWIwTSTK4>dqh6n)}Fh#x9=;a`e_TKoLA)BKgCEE1G<%yOK6QYpSg%Oz7(
zty9FyvHgx=2IqYDW!L(<v$aN=7I__~*eAwuC%YdogO4}!YcJ@aneUiHnVkzi4#{$c
z+`*pN`%9!(hSb|~O4Vj|d6a*_f5;h#-05~}&(U2m7ihXtQcPTkR}Z#z?KD|Ju<yBZ
zkW~#k>KuBjE(wVmr4piA4r=VL<w8JS?=;&{nX72ezT;ktM);;c5^8n1xf{#L__Lon
z=vm=SH|5ni*N~4=?}E#7Z;r4b9sg@9Gdrzi|07a(<O1pfOeVn%?<u|bLYHK}I2^wL
zo000|Ktiz>LIbn7RdD02qkPH?634Sf&J{sDbmYr+WIRYZwz&HsUzsQOqxY3Oh37sS
zvu#hmHKBO*)j9e4JbPCze4vP+mTnn!DEn|#nIC7e=8N*AkA^Q3%#c1JX}@g#T!>^2
zSC}4%byb1GWvLc?BE|eVgN$`*XktQ&&rZ$yMcsT>B(;{G!LYlYbUIPtSejLW<uyJ8
z7qb;sT~JS?GB#vfkwe3wy}%g-EMc%ut~_VkGkewdW~l|unO$%WUytfy*SbrFEQ+OL
z<*XyTi?Rzl1IE4J3^tBjr>#!anBimb8Si{X@WvLIh6ZG$Im~%{vPz1)V_-2#1mYzC
zaEoN>*dkbDk!5d}O%Yz;>K8Kf64zdngmftP@j^z5mWkkQ7<Ws!b2$*8WfutKYww9p
zPWi`b6JG|#Zq1<aXeF2-@h2aiyODy9xC(jryN5lKj(C?vykf_GO|u<d=GpI?s&ukS
z637f9BNAo4Y`v0)O_ji1+RUz5g=$TxZ(}_t#)tOQxLtqNK~{DQE_^=vL<%>2b@%I0
zQ|SOF8pWL$X8&{@8MW5Pl6xc_QDsrmaf`*8a=C~gYd*)>WM_JEBox%Es5?UrGIX_^
zTO`2q7c~BgOBwh=>$sPrrH)iAy12@Gm!>m2X!`u^a8=tX_4F6CV2Nm)BedI*d7rKn
z6{Sue7?k=(eV=d`?&UY=&G<a&vduhjubN1q+R)GwNjHFul6*MbEm@k9+!B+~TA^3C
z3q;rkbpQ`_gj_mUlZ)#=T}3&x6AbSlL{`wXvx4(Qx&dQOgNrf2ygDLg)ItZJOy|*_
z;t6-WemiyKZ<}4WrK}d2%KfmVnhbT-G!03q*2-N}#x#ol#Et7|8+Y>OQuYQoAC~B4
zs3zu@xU+ibl_+|F_J(9BWvEIN?VwFNjmAZ)7PXbRyGS4mOI8AAPCRBH!E)IU=wwjz
zNoYI<<PopA3$07Yf~Rr)m6()3U>EFVRlOo#iEzO0N{z^}<Jv2~K;G`(frFQa1m;&#
zJh6`%`+}p7gm)Gjsb03%zaukN(R@ysY9pfEC81(McOeLI+a|y{J`k2iIKvnnxvO)0
z+!ZEe8!CBO_HG-VVIE@<N}wXyLb;OFDoxv3Yws1fxrv=)EE}H*A=vgE3{u{^P|SQP
zHbF&cCp!M8Vs%BfyCYt0BTrJOuWiCzDlI;)%GkVPbBHR>ddQ{V4l;Q^f~wHRG^u(X
zD91MFB|1ZCye}5w3z5u<)Z(r(KxrVN*0g;_D+B#@&up$9Y_R6d0~_2ci>cj}+y<df
z9$}0`Uf_V5OAK3=`Apb7epmcYAm*)En?+Vvl~^F5(WXr=M!Z@2IAdEJD1*=Spu9%$
z6>{ScVV;dcDsXIr;zt(FL<&KQm}Rjr5a2psJ1U)U=PN_v=R0_9xjJKVgij2Vk6Z}~
zd7*ys_JLX5U9!;MV0bET<EGsYo9y|Fv6=b<hD!2b>!c^h?Ku}1hV0A6PvEhG6%y@m
z8R<gr$SJRVKUs9Gc=5bk!|90N-?ShETSu9vF7ow!wlZlxCyuIVILGVR{}BXJj`Q$r
zUZV?fpZJO~_QPz@t=T8N)NWj=I4UHXUTP4$BeAC-vadm0!B?ro{j|DCxTztsxl;r_
zKL-hQrxr!!8;rOmz!ZX@rq;3Sf~%0`RmINf4p_A=P0E5kO6LGKiJ?t<_qHlWO4hhv
zUVNzcMT#$lk|8Gb1zSAcEtTxFh-a+&vX?MeX-E?uZI3|HP8!IpXzxrY^sMv7^-EFJ
zh_csBZI?}+A3Azh@t@S|u5^xV<tI$`J!zJz9HxZ_?a}I*^3{`C-2nBR?qn3K)WAsy
zSyP~m_k7ra{9rm*(=@uBeo#U{US#6}J{l^pM1)Xw_)MLV@ix)Xzf8oZoJQ0m!WQNe
z2?y`I>_v#c=BkA!BL|CpGEzshQ-yz7pQ_bG*gfoZ5>)?IoncZBYQ*h`xYrl%2>*gD
zQ-sn^Xwvv})=si(5}tCjKDfd#EhaX%ByroL<JX&X_n8Rlcb4j)Xji?J$+`{+P}c5v
zz|t2lHQ%mnG=jWnGXq=M^Vrw}FQ<uVQnKMm_Cik5Wd<Xg)T<Z!w4J}uF2b)iUeQ+u
zmS@=MeLOs$o-<L1)ohiYxwx=24VjBu)<qkega2-6au8*?G_h%bAv2*F6{fl6r8}}0
zO{kpWsHQBUw)ZC`0tQm7@ks+a9NX@W!dQ|MvgTZ{E>hYSWiW?A<p`217z8_*L|lYi
zu5}384<rb(m3EBbnp5m@%-D=ZMGST3di#$00-Q#Tgo-54Sr%j7G((B1A`eukkh*fa
z4)(D9>@0E89B(Hay`S5{DOH@6P3LNq5<|PqJcP(;k8yn|y=b_#>gpn5QRrA{?^?vo
zI4)=YjQxItV5iy(!tQ-3*D{OLu`!h?7G#Fnc-)0kOI#D`^CCG75fd^bV&%_QqiMzT
z1_jN&UdVD8tZ-i^*WYOY=3?ro)Fecx#F)xb@B<822k`DNiX{X)I_edld&KLUPAGsl
zuYT#$B^VQ5HTvKf6n4Lh9&MLnuK%)pkjV{2Mhh(LP24TRa?Y)e1=fye87gRWbQ(%A
zu~#qEOS&GmEnL;al^yg~p%&KJteXcbNPSvR9y1$F8wqv>D!~DsN(;|%b!|_iRtv)9
ziE@)}P2Hm+NY}Q`tQ0z#`%shFL`(8?{a&v$G1?099N#6t+L8LQvL?zvN6Gh;SV)A?
zq3IDyNsz+$eMEj%IVPq3H9Su-F<I$(OD8N~@Oo}KoQ5LM5%M{2j9Zul$g3fh&*JQ1
z%cwMRARBm|J7s&S9CUcb1y9#h=VrnibP{8O2_<eXwIx)Zr(EtuVHLc$D_xLA)}w!0
zydaGqfF&>hHu5R9jY#-2lcKjOtG`y<#atmpk_^XdH-4C&H`sT~%<{6bUsqh&z*5(s
zPOv($mSRnxAo1HmZC(R5787v{dnm8-rV;>yU1$~YT~KFB5)aN)uqa|KW3VK_YwlEj
zq*5SBAZ#qwj8b4aMjAM8g40v;z2}Hb&ALRmWze=PrM#Nb!J6<<p!GwgMKxR4Gpz@x
zs#=MZ!r<XLhy&Jx`&3P(J492dp(k;Q7>C!iBn>i#T;8*9u}aw~DMG!&+VmRiqdj%v
z+FFh0A(lKz1$|e@B>4qCIFO+l0#_zC00k@IbX21Gs{1*k$Vw0k0SDZ~aX<*xxwcv*
zHA^F1BzVngad3Rp(|9YiftM?0+-<Mzp13ntx{=Ye<8d6tDhrR%za@%cTA%X&=xPUw
zpm~23=28}E?<&q0E@V-eSRX)y)hXi2XpIY+sJz92zUluF_bFXCKnkylh7C(P=)(3s
z<tD@NI1+lAdBW;hC2t|d2CzY;tggnS)qxjzIuoi(;KzJ<?bo(}TQvGd_$y_HZ@+?n
z>YE4n?@`eb>0N6Ok)^gqwWogN35ULwEY0?aPx4@DV_v@1{3sE^j?G-pbvA^pKJ2ZR
z%VW8|)QBIizB(%Tc_J>h48#>6SL(rTMMdXtMG}0cdfYlkW1YFkYaK&FtiQSyci))x
z8F|;+Bn3cbi8_|Tb$hpdcfsK(^zb<q;r1>vs{HGY2#SCM&W_IN@dd3BcmgOF5_o5e
zDj7?&y``5ip6QnY9bB*CQAJAA?GJ?R@9V!j13NZ#xY)XcgLG-D%Pn_z3UNs87U|4Y
zulJ5G?@)JeTU+STNh@oUx)y)qcS+9UPl+Bt;f0MZYNwqZeJ2+NWj}_Og7AUXKgysU
z5)ap)#gf@PDI?27bL;ccyby#44DsO0C;Aic*EV1n0mj6WeJR@66<4;1v8OY9N`c11
zu?r29PdnaUH*0gjjuzJP9XKeNecieV@{-<zKr-$GMHdx`Z-1rv`o$-Pr-sPR?R$hK
ziQ4`2x``kh2JmxH93Grs+qM00Fa_fe++I9o+cG808aIK|luLk;F<8#1BE^A_m6;19
zxc;J|RMTDTwc%@hKPS|#2=Z!$9in>yI-a?n)b@0!BXqS?2~;);69Muz;H)+&%>&-D
zFzM*LF$s~(@$C(qJ{;QVLPw}59W^ua>*ZTFeBLe))L~||b5kWkSwqc^er3-G5Qu1&
z)_>aNjK9{i{j**E-E#Zg&Cd9L>t_Fl&HA1Hzwoj%{@p$Qw;KU0bkyH!2>&BV?fXz4
zjs9PZ-M^C5esjbBY6Jg2GJJpad_2BK!picm4c}rFG23+}<dz%7OEPn1rB6H{$*NI_
zawM_ydWKCdv+r3)QT46)B4I?jj_<Gc1;JedVyGk~B%Gci6V7g1xZc_G-7<N5+%k<(
z1`ZalAq8jG4?SaESy{Qg6C$@ji8FF?X|)AEtW8r`$un<R4p1tspfguBOPngP*j84~
zTWHGgKT9mdzfUq$ADG4=&zibxF)~(aES21hdna!%gg|iHBrx^XcnZxp{^t2Yrk8R5
zR&|)7!)0%Y%xl@&kr~Yc4=Ck*!om<8r6(82S>gmIHC&9!_pCuLF4yl<#;%JDc%dKq
zpC6u|#kE+mRO3BsUF!9v*WTEFXB*d9jw8Y=R5$B*_VPZSS)zRDGaBbbVqkK53D3X)
zH|sr*;sm3(y@~PC*v<$Mr^}V2Gg)xR{L0-q%IU6D2OASFtExqvwhGxxV&$NBiQ4&y
z;@11|w|wgRK|Pwi?)0J9!{Vh<azyabu9(P`otUpPhon$$ZX<^CkfG^`uyR$(y50$u
zJ<WNUc%EsV9hEI_re}Y~8q~R7Akvswvi+Qk-1ck3ENS6|agB#PZFNdPhty@am&GWz
z40#0@mr<p*u`XI0p%syf2=pg$Xzg^2ukL-ZexUKPFC_05^f8w6pW-r{t4zF*wCK}K
zY+`3z2KX4JgymdNDcQnmC|9XVb2^tUr(CUw`&q;+Y`V{zu)lv@h|tqO#rANIf4%17
zI#smpcm}_8kwxE!vxE`TgBHDNWO#pLY@ymEzOq!5<F!<nIrYM*&`Bdf!X{U~Y$3j$
zfBg%!IarCd*YPLPQQSqFw70X!!!#<++v*e5oZ}lt@<#ybXuUIa<#TW(?~h7P7FA7N
zb4=J9pUx+Asc9%&8-aGyQuw%~1|6-TbR)#fv&N#3aBkphS3^LOMw3`hJktxQR<<AM
z!Bs`a#j1@{YzbT1@r^4?(-LW{-*+Td%Dyt4#yPhhEQ0R<7_2deZ&yQ{`pKek*&T#c
zXrm?>Q}DceDs>Uw(2wECT|w*$&6}iPAaBQTfnp&ziM`Bm8Jjkma5ark%;P^KJNB6e
z8<`X{HFdhNUat&`B=`kad(eXvoL0W|r2PQbMm~|+rFWd8nO5O^px4NH!pfTGMSJG<
ztO9Lt7!}(3blnU}cl$eO)H7=85cNCJTkZS(UQoz}ekY@R0&X?ufhB?`Nn<{!G&*Mq
zicZmS?hy!uv(j?v%X`zfgp>n3BQmCWv&sa`*HtuREdlZ_&Q?;fwG&oghj<|R2u@RH
zr{-?NpxV1WX*-i>`eUpT>tI4IZCNb4jfl7fyIkC-CDa4XR61q+o`=wvlBmz+D#Txr
zQn)fdSu*k-48VT4J{kaPvNniyjirIuVrATPeWuiUaJ&vC=>4Qi*7x=-U{Va5AgIhs
zCUY6Z(8b3(VwP~Jx`%^;_QR$-$0vfYTt@_iYm%@{3`Dc&)Lu9WL<|KG%p3{Ab{03n
z_m_Sstf7H%&{Sq{Z8BF(yru~Zw;`hqG?NfDz33sS-up-)hmIq}J9)|uqIU<~;VXVh
z2+NIlvap9Hl8EnZ>;xnOI_!*@&j<Q~%??atUm;$IJ(QDS$Ym4G2kc^f$a6bw8-iTt
zTx)0p-vgO_wk=pgAffmCiIX4bY<#n-YVJBKsAnlm<2>V;mrB4J$KsS-U5W^mi%IS6
zC0;qyGLf?iE<qo`oi+|?Z(u8@_XNT1-SUWCVzrnE@Y(Tq7@7h8eB5eMt)X9oU7nE!
zd@R{J73V=hE!~(JZBv)Po=-W0yX1Bdas*jT@qeGXW#d(8VlXTdR~VaUCG*O+=~}9-
zWM^B~v6b79aT}muxrX-fgT5XTk{(<+fszx^Tc%ipdEY^n{f1meCD<e9o-d(auDUwK
zvEODn4b^IS3^?*vS}~>F@eXWuS#_aiKFor>gcR+ME{lE(^6U^D6R!*pzuR-|(5Oa7
zNYPTu5=Ju*WRrka-daiED{2*Xp)sgu1bf?N6Dp`<+lpG?s^G{gB)*JQm<T}?z{f9^
z(Boy!X!P&%na2@V-X`bg1#<II%fF$8S-01cR~(B>Unwooo)t;-jMjdkT0=$PpG@-f
zi7u(m4uC+2LAqBjGOZ^O+E|dJR!k%gifqhnE+_`p_S_f6K16{E*c4cmd*Lb?CRLVe
zLRLqtWMxh+^a`1a)WKU&^NmzHeEG#w{?l1hoI?sksON{t8(7BqP{3B*sU=Zm<U%b<
z&+H+sHi}N&Yo)O367LL*8SNW0`s1#j^S1_L+)<t@ovF_6W8i|F2o{%?Mo|s66$NUZ
zlX063Ty<t~h3v4Y+rs#DL#jcwewt5p^VoYMeh!NnRK@qkpOHwIFws?I^BApmtA@|V
zNslcv>?xPbpsb7Mdxdn6(D+`0QN^6g&RH~14Hfi<0SwtNW(!L~1e}0k#9pQz0o_-S
z8|qDF!4SSNCS&My<<v{U$VAwVt+4H`lriO_L7Fk5Y7sD>Vgngv{QdSICUZ+u(E<!m
z$Ad{%HYLQ68^;Z!q`=@VN)ApG$&hpq-s2O6Mk$5nRWQBPvj?xUE-Iu7OUYwUpqr45
z5!1$@obJ#n?an-dhC9k<N&S#U3k{3FEq}E$)51+R5g~9+Q8JB)h|Nh24V|Gd$2H>K
zqxL=_qf|^+GD#t#uhl21vl4E*>*KSx7Q2g*+=c|Bvw5cNoY!N~lad>vW)5PVRAAJ|
z8xIaDsV5f_vNZkyWI-eRmNHU7FP?fMy4igMfSRIe*sS^XP(ULjclY6*(dlZ6#;Zqj
z$?RFv@bVv$wOW*I*OZ|@yEnWRMV8NS^}G6*$P&H^&!~dx9)u2=t0;ntZ0@*6HlNJo
zt@4S7DquUFRT-BD69KG2!s|nCV-qW|n79Th81E8;y3735vupG)7$vjPw_M=-d|^~9
zUL(fE?$7!_!?i6xeihRic(HmZTh^s_l4M;_T5@Ov31W%?GL2-XIEW$`0v#0D;pA&A
zV3U1hH(_XFzCHNm4i*w0G%(yv%u9%Om^%i-;N<|fPX+XvoM<1gL118>Bl+ikB&|~<
z_xLU!Evqh84HnR-AbR~ty(fon<lm|GX;(eJ&LxtZD?_4<wsILIIfqo&!I|+=<Y+Xr
zVN30DQ5nF^pnY+G#AIvPlzK-Dm@q+JMUKajB!2tBJ~KPELkjiw`5qrhLQt-@ms~3d
zX9Q-VQVWt3k?Am#E_bS5aSrIh<eb+uKL+e5@%x(Vou$oFQ~V)VndmH>+~}Fh5Qy<u
zwRsC)s_5$iTGy(jkJhymE#p%&*;-1P(J(p6Jh4;kCMg)vtD#luvoR4a*f^~k%4KY`
zy+XB>pW?M6>suUmeM79*gp_uLC+U)3i_oJ!BfJn!GloZS?}AmP-eh0`?dOEAe|{>F
z2W>U7!*?88LIRQTYX98|N=T8<Iv*(uTN%7Y<IqCpd))D_Qv+OcQP3!Xb~eZP7azuW
z3?c4#BG(|u+06!sVsc1NZDy^S5Ej$?XyuHrGPrQtq;M{<(OQIcKN_1gd*QIvdzPHr
zab#v`FW1{yeP|E$8vdxHsFlfIUixKnLH5f_A~slC`<CcMCc8Sb-1&Dh$;_H!&I~P9
zMthJD{zG~<<0BtOD7uAm+MvMRILjurd}McB{d{)^fAM-Q5;Fy&(+mWyySR;DA4CLn
zh)xkZ#KPm^^{4Vvf@28w*k+FP0+-8n^1I5F8;y1~u$ys9U*WEj#9ZOi)KamySz{~g
z6j9c^jpe!$Vd9S?hMjZw?Y2+Swu0AgNl!0IjQp`gcV9l|?9LpKENecJg<1oFd>3<d
zJCxix&@26g+<6y|F)hd<yw#D-sW0g^Tz2fTjb-{E9SWMbUuANGfH|#&zgBhhd_;@C
z*V-aY>NKQdm~D}4a8|vs>B{kR#65Q{gccLLW_}Qpx9zh?-Prma`!y2ep3rIkIi5E?
z|MWfPjwv(EE8_+aY0ifylal)ue=x6pEj;sI%&YI=zTawaGJY?d`F|7k1a?aP7CZbc
z?D_rLqXqOY!k)mO9I*a43n2?Vu$dYQAp`I)9k3tFw-zi6-|HaR>)1J2=@Zh38(Wyd
z(C`p)a1eegQ7&XDXGuuIPe^8_t)p*7^|Khc3ZJEgfw3XAnWeU#z707cFk1N6D@(r_
zXWv4Yz^>K*D)!0v>#Ik9xDh~0OU=N-@&|M9*E$D(GzWjp(E6)p05C)Am)Pg8CgcBM
z9F*x-gZt4OWCj*u{g>HV`zp%ei*-ouX_^28sB!Q*tm6oNHp%5q*gc&!!c=9#0CPg@
zKu)TnFb(MwN5qZRydeLuxOrW0zzH&D8CejeLU~E)2Y(Hrqi~J9J+>UAKu7148O7I&
z>UYON!zF0a(5)@(vjPrXH|idf%r59cjeB+<`Z;z7YOGP*c2o08nlp|MWR)*tKWAHR
zQ`5ZWM;cg?bNT4j-Hbaw4F470$E{A;LGrHL44-4u%3zsEctd;JVC$1O8oHi)h1rRT
zr!1_51=sQ&Ttz@VrAs346;%pp4gKy*fiG1703xGLIqs+RGZGoWuWN@gTrYEmV3w|^
zZ030jh^)o$QW2za>lwY@nCfK8d{Iom)#uz{Q=QNW$`*f<ov%qJ+n3rMk}U!TE-Ht+
zkkgj&rSbV;bKG={AF&*nq!VBI9CQI;2&B`X)nQ5hE-|bSs$Uv?$Ct^scWQ;aUgJA#
zra^A<Od4XtT*;~Iv?#jv5IO-dWejKY)+dV@2dola8uHafgyGZFRxod%(Bh3?ls9ut
zmIxL;hKjAH()&<qd_>)1Up1`HMd+!LINJ~mXGZm{qzPm^pjUCvWX!1)5Gw&K-=!26
zzgb<LZyI!N-7Vh>SNLp{(R3!C0xUrJ-p){?Si;V@#|p=z+M-&)X;9?D#>~Q|$W|6W
zil<Jv$SIFFv`+KXU4l0PHD-&e_3K`(fe&Kv({=659(dRkqz0|)D5Ph64o-pUR}I`L
zHg%>Kv|2AQF|2N(;a?U)J(yH~%$Z>mm&^IGl+Ss&H7*Fdlq?OFwbVmi4b{*9)BW5z
zBvQ*dZ30^~lUU7gdP%C@1=D27QLsq=&3LkiC$TxVJt-bV8SMq+BKcvz%RUYG(kL4S
z_=|u7rCHTO$5v-s2LU&1M5K&zUwLVMWD;5oeuz}0KtJ`9q!D<V6rB6D%)TIE5}EXL
z;eio1FPH*CY*taD4+~I)FQWPpQxN!~v(|hBeZ?J0c}|26#M$jwH59WyYrt4CF|4(H
zA(#o9`O-Q$eXMPDNX`)>YjdJUSDA5%iHTFq#{57oW<}&=#W->z>CZ@RRZC`|nH_^D
zpxc@WEbQa`lK*M_=;@$}r#+9`EsEJp)!qDqj4%a}+QCB27*6-V+IHWJ6EYv}+p696
zYh=V)2mic!#Vij3yXYH{T_f5B0`}r1s|xFe$^#Nzf`_#i6)TYa)2yDt1TFX12l2KQ
z_sL^$l%XsY4MG-e9naP4#AKmGKIs#gdwDZLwfJbQeBE5@BT0RFevTJ1MknnNc%t@E
zCJQtR)nbpDl~<#mdeGioo8WDoQh1g@{^3$2<=$ol@`ntI#o2zQ6;rzv@E38=f#R4C
z+Eo|!VanNLu(7V^Z{H&b`S7q~0aAw6MlxWqdm?mwnFCw(+@Pp$p-<40Pjh$Y;q7?Z
z-Lr_zoH$HIq(GB|njBtaBEB2yJ!#MVTtSDw2w)tbc7d7~kG^ysLDLRW-H8dKl%!zY
zc%Bp29t<)FaGOJrB~_QoGX^^%uiJdmEu%#q+qi(w@WhO56b8kVEf}Jk$W9i<Cx$H5
zu}F76F;IO8-DgOhHkIeu1EX#D&0tJD4)wE2`h%d8idQ!;TXTyBUhOUh4|!fz(GFV}
zJu$VVrKAeQ<nj(SV!j7m%s<9`pH=dTIk~M9SIZV8j2t16u?K#MJ}RJ$@JOw8xOM7a
z#`?+P8xsI1HzvN_*||Yl*^=b>L#riXJV;P?ud@E#vcF&kWn?{jIz8rAU%TRP+e>V<
zJTLJQ06R^2i9M>;<vn@Zr`0z+e!7e@+lsqMnMOXtFh+agXXqB#?c6Q>LqRAR5qA(!
zlb=7YlrDROl)&~DzUE8)403|MCQ>Xv<GztoTw+mr&omB30Tlun!9BdgSw7r-NZL0t
zmjR~^n)z9Hv(fgnX1|(sOIMO)n=!ICAnI5^umlhM46TC=-gUT{hSi$Ikp@)bLtrd?
z!pWyDn5|C4H!Tb1X5~kCj9}0D37OIW<zt9#Pq_^VC5B<;q(Z77mo}_uYD~jDFOrI;
zO(9{muRK9uX=Cm#XdL)T-XA7qoCG4!({dHo+?kt6!sS&9i-K{<YeszW$a!^6_2GI3
z>Xm(3ymv}9HCf;U;Z5s9>7ZrQ(^XYbVxMtzLZ;~3V4XRuOOwj>u2mcPrY{)-Fhuw`
z;LqG?zdYCe@K(TyTiAUxg-YyrBE!^_*+!jJ2`wG;jf#<H*K1V9@E*F?4aVFtE$$ig
zOjrT^pT<sWu|Y-0oyyoLz^DdK-TJ0ezNX_W&2hn2#MSqCSXA-LL~t~VyDTGI`LxD%
z!#>O3-rmI9hu&iRR5ilcW?eG5$F?<-a1%NQ&!;tzu0sy1*#KyZqg4JBou$e$Z-~As
zFuiIlsT<5lQ~tz2`V_EBw^ygLKn4;~`R0R@$@Si;pe5=koHUa2OYw3?dJ8qW^cmYY
zN{n7_7x-?^!O3cBQ7yztRMD`4##(8)35Zj6^(QG#Jg<aNCyj$0vV*3I@fQ7at6eU6
zD=C?j1*U<O+d0NB$gn~K+8AEjmZh1wk-lZ)L$Z!B6l`4-wgVtVI=>0iQ_~sn>IHRo
zWH7IsIqL&+b_BN1IQksKr=*$^h7}XlP}v1KT&E(l2eRrTupt%9szlI5#s+LcAi<Jz
z@%5%?*KLSGZNO$&LYgbaXqQO8#RIgAoCqm)i<7j-DFe|~TL4gsp3u8^1E=12#YcvL
zq2TG{$E6E-I(cfNK=nd?E4995lvn~YLpmSZ!Vq!_4ovfdo-FsV%y8AaEqi6-vRLU=
z5^LkE>YGnVTS0!$aDDGk&2*h?6XFT+c!;c)xLw(>+7{F|*IeFTR*^oFWN3Dq!(7sW
z)iO~5ec|y@#m^5LaypC=rYDYJ+K$DW1*}@v$YUZ!ozmgbHt0B*{^E!lHVhnK68^IB
zO27S}@k88PmZtVp?dn4|dr?=e{q8hmg3E<nbZ6tfih1FQ3i=2a1op$RCsF<M+UfpT
zdFL`W2cR>L;p+PSdH}IQ^5xf3`(+=U_^uJVHc){aGwav2D8X84Fdb^2oTm<!J|8eP
ztV!*T0_=vBbxxyFg#qW-IHU9PJS0fbAvnY!THU%Uw$+>@NRlmq@E4|OHWDhi8w#31
zeBM#oeA1uq1V@DfVxBw9Y8HEL#AtX~JlLHmVsj|^L3tiiggNhCa2wnDW!<LV4BVLw
z2jHq|-wRRBRHlP~XF1L#a@3VnAA)~4ZrGzta9#OquCF@Km+4u%lBY@Qiqm*1an!^a
zTf+kv&GclRbgW4NfArZK9WJ9MlBcwG8R{QK+3Xy4?7}4kvr$;u9gU`EP^sLz-wQ3P
z2Fp-Gsz2)(OYiU4URD=A*OoFwdlxr4*=ayprhoGug-a!gev<qZnr6+Gp7F@(QTZ0C
zi-H-FfM}&U{W0d(2UiG@k-Er*MYX3`ltfX~eBJCz;EpRqJ<stV*A5$aEU!ZYw(TA$
z!bte@zn<KYAUZ@j4L3a<b*)KwDCS0=w`y16&RW+bawI(HDB9n69cMrZN)s@c{t4UT
z{1Q+&7&j<IBF3tC-SiAX?DJ}0vbz9^D6vf?D9I-X&x`CFsRtfh<tdMlC(rJ|*<(EZ
z(29WRTfFFZ6GTeeP#=Z{XdKu9GdO`IYQLY0{}yHXS;+SPJo}UB4^?WvU;BLw|8w>y
z02omM+D_lHKLISvgaAecYFgSK*`L4?3IC6>KYtt5H$&#TsQ&kW81vtgKmT?gBONWU
zc@*Fe2GFlJ{@DQfFLAG54WmCAKs16tGZDb_!-V^HS)ssM4Sy|+$9Grg|GhO-t@y=u
zUJ&(RSd&L~1d~H>%!^x3#Sj&-T*ex8+BlhkK+m{*Y9~GF2w?tj(X!<BQTS=ku=sIE
zbq~Ay;i7sDc}Lp#Ug1y+kyypFrL*)po^ca?p-*n6Fu1y96j~n9eLQ***?t)C{f~hL
zhb(*H%5%k&wKFC9#CHtaXAssx6C@H$ECX|M%8e7?@8lEtb-p?!bQZ3v^fM_cKl`M@
z)6(ZdjNZso^=4cxgcWijM;tyHj_k>GI^`TjT3XlE;reXo_Aw5Cy~Vx*eu5`l(o|14
zYVyD+nO1UIH8+dw$gLYqVJ_qqP8P(gcBLatz%ubj^!DWncyC(S*4WO9zB(7zr*qoC
zJR=%P3P!NHV(u%ju|Xqhc~;7^eA-B%#7zOk@dkl>dkH!_KiR|tY_Y?4f)<Tg=b_VN
z$4p3}wtyGr0<o$Hra?)byb9}{Pc%0o=W)65jmgyZ#lVYu{GxHXv-$8zv=gf2o9!_=
zXLQJWJDF*WxFm1^m?Y*o)dC=}+2*Jqv**+xF{u{x&cXasdLsTAWMJYeZf)vE!P5dC
zLfF@a+@0(uEhNX=_3`9moh!-?@(9@Q4qU8UTNO_pM3%z!JNha;^mh_55u(rdu+>(y
z=E@h39NCTvV7NX`%DyL4o-T_(`7F$YO-e3Te52H5v4_EKoeHIx#34doAXts{&fBW}
zt07A+mV{+NtG-kQ8o8UeFGxX!UhGOeIqz(VMalgt<Y5M*llKZvwpw1Fc?783hOh#f
zw@~Bfq(a)G&e^r%{w!<3U6nqeL<7}Rc@cNJ&8_;5WN0ez3QYt3k2^!)X$0f19Y*7l
z^iVQisE>$cR^z-z?euHs!Xcwt)904j0v1!IfjqNeB5i#SD>fK3b)|D230*EUEnL(@
z7Z2G-0ieyx(y>xjw?-^GGj1u1>t$6<_k~8`bTEr(Mu2al93(HN+L+hDnH$K^wUI+H
z^@TQ)^dIi(O>OD9YJ??%n!tL7>!K6=5r&SB%|=6M5aLZLtO1kY+awlrrvAWc%vA8m
ztKqk1Hi0mM7rC73QDB>npqRyhj3jv^n&idh61jr{H~l?+9pZ37^@Qg~9|6`*?&S&e
zUn_IuG1ec7rH?+LJXzt$ws@zE{gRc+W^Aa|^rL7knJEE+XOZX*4~E2ezDkyWLip2n
z5onk9(75BK%#`Xc%XAzU&~kDE_;kZbb0`wWXFHmELz(?3(+uz@kpvKtY@brpyIF$Y
zQc5nRchi}Pz-Z_%){O@+l_NBRO}n{!<5g*E8t1*Beul?E`j)6eGNNK@R#_poB>k(X
z6)}KjV*qU|u0z%dV_Dbbl>8L=bRn@itv2BZX-GY-WKkjkLriWFYbEHMbz1~Vjm2Rf
z<r#Z{X{op@6ek|T27-Xg{S!R*F5Y%2N7e<srE^tYQLy`Lgd8V`bf?<V7Y0e7R#sGd
z1l`$sl54Edh&>u{oIUw#^OFQ{qO03(Z*<et^&5)PBzpHSOoI%&!zC|98ME!I5ENyu
za*~-N{GLm+3X#`w^MBebYDsGJRs^JO2+W`_vn9vunThLQ>)s$a)yGG#ydjF@7WW`&
z(g%O#<u>U~STh1Al~RAlX6ry8GVPlNvvGpO9eD`SFOTT`Mg5%#3teDfMln>ScH}#6
z8lll?VC=PxAk2fY#Wgm`ozKGEs5gp<Hqn7Ljs)^6f2hSsoAs;`llu~y7tK&g?ba{_
z_hQ}@bvnSVgN&1Y_3eW?4ZOie5Ag2Zvwu1#nSOPm{n<JBT|fVh%1l4~mH!fz|8n*L
zi>m*n>%T+Y_ZL3?*ZM>Nre7iJ?>Eu|fK}3g-bzM#pquhf-G#o7_YwF10=)mY?SP)W
zUz0u_^Fsa|egVrW{WYhb_{GG+@WYu$2ty-iY-Xo#Lr5cNrfsLsudi#Vrw??0!r0o`
z=xdw9IL=Qgs#+$|V!CZ9j-HdehpMcH6qC<0)0b%=-u2_Oce84rlLF6KH42<0c)g&8
zjBG@hRP!bh8OJqjWb4%JEu4NEm(;u54Bn2ICy4pn1icz$ZdN%CX?ChL@_Nb@>?kkP
zA8Igk4?#U%x(U#OY)ia6h#wiQD&-D6dn1T4y`vr`H0<;++^mJ@CryL`ZItNf)>Ssw
z7QV&jkRWU3Jq#fVu($V&C^C`~up%tW6B?d<&mvn}ssy(YJt^j$02PjlqmR%Eiju4b
zV=?V(Ic&m^j1RBwz|mm5y5^~K4nMgEfClVSusH%gvt7MSxs8z8i^kFzWQ}_psk!!C
z47b~s$hY0V+u%9*vC<xrdY9T$Wmshz@vryAt6tc!VzsYqE_;)Se7Fo>Bk0M0@pnq+
zS<*U>f`WU_a9Zk$wM)8KVX1l_=B7M~^i0|4U5)8-b%&RtrqE0^7S2mObjpis4IjN@
zTKOljf#KxCWupf{@Q!T0#F@r5+$n8(A|e}kE{wHrY+cjFAzWfgSePt?ORG0&;&z@X
z^A+Q3)hu=GO^+EuzUi|0*uj(+lKxyuS}&z`LZv{zId0i1tm3xzoKl6Ozcwv1sTGZJ
z@R*8s1Lg3d9C;-*sw;-H{mwgTu^|pk>*4K}ySB*JFZ-UOrA6ckL?FISi%Tja`eOSv
zp_o`Re_c3Ps>lAOOEC4$OO32%-JaJfY{zNz`E!;!;UIKq>EZiV6<~KsMP-c!0;aYU
zTcu=gt*zcJhrp&PZpeHd1@*sOAzz&KFc*LfINPiXQe1MIe||#)AC0T%eFZF_&1kJC
zO*tVKSy#v*QtQBdPC}?D%smxJ^`K{-&Wo-`h7||Y8snjM^2k`ZIYbh=Q3+_d${NlL
z#4p}{8Dpm5OjPc@goGYa5c;S30Csi#YwiD8eSF^@zpD@Cf0;VQ{38bPm-+xO|6PB4
zAHe^9R0e!!`A-e->$d-oHu#s@{%2SR=I@~oE1*?FOH21X%mI`bkSPoR*s}DyoM6ZR
zgd!GpHkNw!x^~8v7KG$5|M|+#WAMMC$MVydf+3?L{1IHSx6%GiP}a%9PTTPpx<CWq
z2kSqNlm54Hviy`UkS;ZVP{0vrnCSuA$P(Ha>HnRNu93dE_FuyXD!l)R4a4tje&)k{
zV?#$Mr|)QIuWd#sYp-K#XKZKxeICXZgbLbb_WHKJuy_>WpJ&4OJBy!y0YgSlsAyws
z_iYw`lhpSa{30pVN1^^3`m8^-K9D|8QnEj237F}de>Y|S8Z_&pME^(>K>ORESs&LF
zkSN_FQ9es^D@$Np5ojU*LiSODe<lm~o$TY%B4Z$wwbQoH)3(tg{27!Z<kz;-{uayo
zMbN-E^8SZs3HWVk(*e!lpG%wZpTvL4<xf+fqkWX%pQrFQE|1HLj1egA$ABWR$H`C8
zQ0r*h8teW?K6JE?lKeCA-&Xy%Bg4-r$N|^6gtnE{x78vGY?<<Hi+)#YzfgV@=ASA5
zw$SMSk4p+TY`Q-U`&(}nU`X^)3(x@`rTJ&-Ous%?I>6)FB4Z+yxBaog9|b~aXZdsg
z{#-}D@OYHvpLwwS&f{^Rkug7xpOA`*ijeo4ya{!`2X-HIkFC9xm8Ff{FPt6)`$tZ6
zzb$?`z~ibT13FuN(wDadD$~E{J+}7#vyS{5z3+XwziD4GmdBA(>pd>^?{nbSH_)~>
zvm^X=mGIjhAhgjpGzK2boPJ^XDC0j*;kS)O2XtEeTu7|{VfiEI{hi<ATg`;uu6=u4
z{}9+?|IGg~!Qb};@WlJb4Ja`2@4<CKNgWg5y8UZ2(*a@a$G84Gf!|4fZ}Ie9X26yE
zpIv6IXJ%}n|9uL;ooo(NAXMKk0NV`N*cj{S|8pMSTSR@M^Jk{NZ@<UB13+;6Yiry7
z3I^X6oUEPA50U)^5`R6V$L2o&h2QU+^08*=H-13D2{>w-Tbcc$|A56#|3{JiPVKSw
z3k(_0qsHd9)V2SH13y#;aFrQb00HP%w5R*6)XZZF0MI)6w<!P;;q7laXc74j?_mnn
z_I!rs1MzOWAxt7vj6kPqYy4jUOtMKcGSEJB_EbV=@U{#(8wy#XLb?XMiWZH<P9%?r
z(5uSt<0GsM56pVwY-;RCkes1bc~?S?P`=Ka2Vz$Llt%I@3O2{O;XRd9`OKCw*X9Ck
z4~kdh^5t4paF6X&7c%FwB^+9-z_b;KU{P!BW6SX>w~reouU|S5n((8(K?wMW{o>Bk
zk$9JCGzugIB_hz_D=ee%CTFpT$x=Rop1^Vm4`&oFv8{Ol=h-aX)ryQN<#vee9=;Pu
zpF$UxP1g<{HC<1xxpj%I?R5%R&;8yBQjh*3$aXH9y!ol(#uKduxu}1fN9lg62K&eJ
z=nu*Nb{?ezdNP3-C;xk{%|Fhgbl+<T0oOS&{{;q^_M-Y@0RQ`W^asnw8~?IM|9_lE
zfp7VL-{;?l@?%8-2$`9gU}%1PpaXib0goT(fwnT;;|D?j0GLbj{pWwTR%PYko=)3~
z_eqM{4~$TZF;3D~$ylAkhcPfn(N5B@%E;SOi%L-T4UX7~P>NK`&{kD3$XQWJDAF>>
z)6&+`(Us9LZpp}74b$XP_KS}I%g0zvLy*%cS`A82^^3n5lsy98EIb667@3@qg_xX{
z{9Z+-Aa6AxItvkET)GG`MkQHAE-o>%N8czvBI><bW<*S4>ieD?1e-`h4V4yeR(_V8
zDiBinI2AcKyBKe%(a1y??w7e6TbpvPva@rozuats?~6h<_+p3gGA|Z0yzHR?VCZqM
z#L#Y_*wWj+%6N5R3o9xn6O8J5Dk~FwG8+3Xj~90<DKSbpF_?N+IVvIg{a!+RYV?P6
zQ=?2nEi;(NUdp!vMZyx$y_6!uQo>?`<ZmE@^FHUKvqNIKYWZ)wymNf#2WiT;jz)<U
zk?>v-Sw=bHgO;Zh{AV1Z%QmT3{Y6p%Or!L)v=FZ#crhu^hv;USHg01Eo9rN8$kO|D
zW@dJZQftA0ort4-fJdV{m64%9m7(yYo}L*PpaC_J27!_&=2dY=!XQ(D_c|Dt6crz)
z9u}b-2I;fbcJA&YZ_QMh>6PJ`8HY2f%gA`|U0-iZ_2J`a)8NN(IY|siS#WT0*cSnZ
zQDVVCGN+<#1y3=hXx&vvX{}^sDP*GSEi?usYb^Q_TZ+x~Q+7YW7Al>wdZa4doRHpl
z1>o6N(#x9oNNhpA%8jRVsy-T<Zt3_mhe6U5Rh$QdVIH}6wi|d;A?!FbT>?2kM9PqG
zFw=-csWG?67<!Rbb4@YQ;ipaixr!^QW?u8Fqe5QA@DNS^*-OXIIZakUDouws&8CaI
zDm>9uG1-HbC0eZmWTYBS7gzdMlI%Nq1ho?G4(D;<hMtb)s`1xd4D1KlG=VFrR$aNV
z>P`80mxITvu^8d~Br^9;FE94OOB`P;WK5P^9}6E<1eL1W12Z-y(aM4~cN~`*k4!^f
zznpPb&dYM3!L?w$7jA8BcS|b@9}Mj2#%j%ZV{{{dBEno}I9T;%?pfc2vLr8a3A5S5
zgSR^H)st}jz&YC*$w+%AS@}BG+WiZu<7E}jLvwnf6`DD>gqwK_X+G^63g(m>kKwJa
z>>bnk8>$Uw3srbnZlz~kX)TPbw)@^UQ9bbWa2!##reM0YwrAHb8}4*>IWNCzT#SL)
zwyP!&@;2@vSXtvjb9Fn3yLGiB#Wycnbi+;o(5x9~Kim~)hb}o>UvMprI%Y`%B>kK*
zwiON#J^C*jE_^4}sgL8!b4vvrTCT8EoCl44x{Tj-mA1UyN36dyx>?M3N+OVt$>(Rd
zW82zZu6poX7CE*pz^imfGW6Iiv;Uy(8hzUDJhvP6<pgXmig$s4_K4qbLj-rAC4I1r
zT69I`zV`hP>|x&lTJDUJ88W4)rNYLcs7^x5gIBTmDW&FwvTE2RS4*+|HeZ_pKGz8E
z<^x?rPw~0HVjYXT;STol<lH;e+NT2vMagJ-nphK!^%idapnjgFlb+hz+%|7abfiVx
zEp()rFhJsny;c69_J72i9<}KYwf{}0!q5Os%J1honr|0@+E2g&=zjZljG%igD<f!Z
zV{6A}q-_J#g0w)5sQv3F#cyp8ZGm6@b_fOn1n|(S|6N(rh{%a3D9BRqNysXHyY1UW
zetlbA8)GXwOCYK5e$L-&%l+p0<gwNLc06Ea1m4Q0Z6&O4Y-nUh$izYq1Q8&WZ~vnL
zurmCh`$pRwm}vPoh9Y*_X2!Za7KXrPjZ^^OH)ZYg%@qh)7-49Xev)IL2Tu6gz<9q`
z&7lGw1gThoAwpnH7$zVvFao2OK$oPUEg|skZvv3jw<9E@(b3j5wKdbWH6s6UGthgf
z51h|8aQSsUKSc8Fgr~|#OAAak{prT|A=lp@#iP)HlQT9j&<B?5`PPtB6-beW+QQOK
zPv791?Em8YZG?~i`7Mi;_UF0mcZg$UegrZg#IgQ2h@%5e;a@`BzKp39iXvvQf&e-w
z658<;1r!8%A2q4378dl=XJm1t0oVof0|ycnWpnUJ@TBrFFv2<2GD1NDIWlBK;9)eb
za+_-l?N3a23S_8Q2;b&U@{m>QFlUF!-p<|c*d3<bwb&liZ>1hOj|9m+AroPO7A%J&
z@C+<RL_{joAO}yt6liLG!4hzaIzZX!gb7CL2h3)ye5OU`+0O@7j!60R7z(LqtY4aq
z6T$Vxtr;<^6gHs{5(~JFwFUybH<Z@|_YgAbMzKzMwsCsJJ}oF30}nNPlnz7(hzJsg
zus2%}4>1$pSV$bCjY|>(jwX+B1U+=FXQ|dW!c%mSxQt>?Ulsm}7_?z;?`V&XNvmTr
zdI_2?cqS)RVav8Uugqc(Bqg7*LB4056~r3}Ym=L!2UO@$F>|!Ip`Wc2iDE*+-VuUC
z2aP$=v2(xAB!eYhCrcd#r+SmLDn&rIVMW2~%qK`39M@BTOmw#|i}&%9RnXdcvEb9u
znT-r=7j;-H>lnx)-Y&8rXnyVH>l>?C<w&^pDAX))uL?p>TU-kPDV|qSq~N9iZUAu=
zh!GN;SSQ>7tg2^(INN9it`5!zUQl9{M19tqIta7;&<wH&#E49CRX7#)qt7HA9m)gq
z+;9+N*)pCYIqlpfZQ~Eas@+lr2AIu=y;7*~-FpB;gD@Ys*I#RVZ#AY=M0;_0VTmK+
z4B`^fV%4ndCR)xSNkY-GNW#G5KQyS|?kM1Dwnh_8OjTqe7<{=L@98q1N=rKvt_P63
zOGrB$#yeiD_*yyDyi%vAuysJL8X5n*5N$=;MTy?F)eiq_+9}JmnaRg6^*QqiUX%Ls
zk3*F!=3i2p*T(CJPNeH{vvVJM4Dz6D<rh9XOT^a3uvdj8>}i|tn_cV9Q}zsI%73_7
z;grQozB(}iELE;T<j>dzr7jZ8jQZ$VL{i(~VVc40!NYzy5w#j%JEXWUxl0*1NI;=M
z5N5>G-N9!Oi3-o{4$eItL(8H(t<h{`KOfV7?ayN|0xIEhGH=cmJCu@y^m1x?S_p7e
zf<b3sN6NJ?XD4A5++uUPniJl=+uTc~Oj4Wrdg#GVWAWy^ujJEp3qgaWov&i*!sMC}
zoJ5k;jn1v|SfxvW?$Y%QDU_S_M)5ggQ1TJU@cnQ-Y4aCv*KT!b$B`YR`uo5eQ3{S%
zZ$2}l7xy;aSE$M|10LRFTu*Pl8Y-ES$Wot_r*^=(+^wCz4bhdvJyMHdCAnPCt#-ed
zC?*-n-ju~@zz+&@m5EqaAfp+R4xYSw=aVJD=6}-Vk4_1_^m?LC#)f3he6Wj=?jngi
zPi=iHj6dzH5dJmE^77v9?4iuNfDSHmoAAQ7p<|jHXR3);E<1KNfI8b{m*@+VR|BMs
z5*qRR)Th*iclPZypL)L>jP}iW$VCW$nz=*NBWw85+1@NUKOXOALW<V>U_HWiHn{bn
zY8+K;n(V6I>~3mbRg5o*>IwhFwMoBZQF6`gyOG?2^_te%)3yxj-Tk=hG|p4C?um&J
zEvGv}ItDh2hm3yb(p6ALE}1LB_QKC^hQr8^Hl5tc$O{rZ@D;W>%QAE}S{6H68at+4
zn`_tBpp;j-!8WHRVux|YY#S}#hH+%uU%lE&5lrjQg_#_l8TGh){s4RX65=WLC)^Qi
zD{9Cq6lC_g$bX_Ku*}(Cmc+kA({GL*AVB_wrmU>Lf+;N!=>7wy-?aE|U@D|2C8Hql
z9ZZ4v^>-}&?w$J;djOCAy6-sfe}SdH^ZPGQ3ixI%{TD0+7FMNV1=@bVqH`?3GIZa{
z!2JPA4a_Vpf&LL7wA1M78v{KiK(ICdddI%c@CUAbN3>t(M)z0>4n~y;7@Pg}`wO=I
zbf<lv_m2njFR+!BmFCYQAp}nGPdNJ+toR$u0uLvTmL$+%{BJnR4D?F<Yn(Nflbu0_
ziu{mKL|2e)!bur|Y%B=(Ji`+dIRG&j3{)>7!G+JFz8-^yIU<Iq(+fsk8NGZvN?I-k
zrZ9*bPs$`v-U#&N_^XfLaU=e6#X+7EjausJIj>i4&ZgP7M&?%ThnH8bh7Ia`{VItO
zTR`L1F#A50JLw>%OZ7Ubai`R)Vf!F)x4lAcFXu*s4hEGZK7lCwO1A0(QX)lpgb3@)
z0QIy@HUuIno5V{M#5Rf;0vS)7&;$(%^_2|lZa%gN8q7y)MzCl>rigShC3dG5XhnXq
z+C=d0W*P7ixC0U)a8`*SvaCrN`vCCId@Z$w(@v;kp->89$X1z1qM^wm5yLVBLE5pg
z4H@6q`;b#Tz$;G#4}2wSP{~Q6e7!lja`Fn#8~Z_PwE>kA8im`7kJd{fPq5vxi(KpJ
z*lw=jW;Tq#Br+wmA82r`_mKKAx{!>10O-4qh%&{H*l39`l54b35{kT6%sD<NSuuY7
znTrpX^96bdFzvlhbz$J@JSy^ZmeT<G$X=M-l<`BGCuBs`D=DAy7^8JeaCys!+b|r#
z(0KAg7)%24g+b^XLD~stLA1bg5<0rAq8h-+3@bdLUK&L|?DRq6qP~$x;a>rBZ4*gM
z=In14**laxz373*qD0`*I);>^IDnxcM765C7Y*r*2z6G%e+r>Sli?q)1tY%;4c6UT
zkdbNTf?<l-Z;Z**W|DYCT5`U8J=Zd^U|NwQpVCwGz5rG%%*_0C>%LqdvrFB~!W!Gy
zr7gtm*x~uY-fO<F1+;Kq*A6cRZ?0=&kzDc@$(6-D%X+(=MY9B4yBW?b)Y{#f6*bcc
zc267@>1puAjqN-xgLf+%jkk(-c@Iy0W4}h;zGtkpXmO58X)4GXF*{}4V3QLMreQ~E
zz)-k9wOE@9e8S_1W>M0@@!H~nzGue3c{k-?W1)s5)0@t3Dik>_-nLHMC%4t7n?0gw
z7FYe13L154^XO?qMH7UQ>-M?~^8UgiYUZW+IX8wZ!>Y`Qdo2fZAq$HrK5N6OlAlk5
z0q72joJ6hfuIfznwrhE?lh2u{4PMkP`xTc!Q%rYrNOj<PDP}9p9P6p^Z7@dj$O`JH
zD*gw0HOgek!2p{Jw$=F0pScFjw#JJ~-L!22rHxw~ghX$>UCb5GtG>9O?2ee>%X%7(
zh2rKV3AI`=o*G|ttv9%B5>QOndo1Y9qr7BP3ecg}PW!6Jbv=_4=M8=fq0DV&`Drdd
z^&s99hguGjZTj<22s2}!p1fG1!8#}S{i(Vu9)|Nsb#y1ZAjm%TX_p1x8u)&__>=a|
z%glA15uYWRW`B3;Fb2jj++(#EbIvM#o)hYGpMXs)jaKCpgaQeMx6V_k40kM+b6jLZ
z(bWc;C&!5qiTiBpx?{=BJ^W|kjtZOaw^BtD-A7}P{L+W1^d>4d2bsFBDXPOo8b6j{
z#0HuXbsw6EQ3z`JyzHhQzchTUPID9{=Df7;qPL&16S9<q&(%_`GZuEnP%hyy;u(-i
zbl1dkGc-MxO4uCQTvfn19kRr(V6)Z3#m`W-x%k|rxlr;evzVLewmrr@pvDG1*s?UR
zcQq4ma20ol0+(Meqqy7YnG8j9Qy8^4I85rdesPjFoKV1;qtzIqhwE-%@j<Dt4}VYD
zsei%`4zCbZcTH?FZ)_8t{$LQ-MP`rVGGx~ENTsYS!xdx0p^?aJ$*9YBWT^z&Eo?nD
z@-|F!#F304h`D^t1!c>0JoMeL(-#T#9N4{ncu>mPVneQ>%czH`FW%il=%)cwC$!t_
zV&*sJyJ3nxu{CVVab%HR54T2kLM2l-%2$QkUqhAJ#~T10_YJG#?JZgCG&w@<35E^3
zxpvgC=&dy&($nzOs&QR-pM&wRr$WylU8oh%=iD>-!2`wv)a)1beW?oHq1nT~e7mr>
ziZ^b6Za*|+np$nO^;t6+pGFL}^wK%y&`H}O@g!M%bc=b*Wv0td1zEJ0ja81Um@C0f
zavml5YVpt?A^oBVTqvF+kWDoz?LtRt!KJ&fNtnwuN7km$9wDm+*8BtY&_Y&%>U!D#
z*V@&^NLEzgXP5Zo1BryBvkA!#$x!$I-Zn8hEVDB5Gt008>>7hT-FJ2_+ue7#-MzcB
z5)8ii08t(^zVK{9B$Du>5`ADHJec?(F(6MSKH!5A5{W<2h~KGGb^BJGUNWeJF!bp<
zr%s(Zr|Q(HbE^OSi{5M3|NQV9@BHbB%dg)2@=e(*Prv<x@2N-MdhWspAN@Fd@w+#_
z`}Ge`{phyj*4LkT>Gw~)^87b%x%It&z1#0UyYul2x4d@drO$l!)^~o@Jo)Cy>$m*z
zbCcbFzW?FLS@BD}6aMDO=g<G`OF#eoU;c6BJHLJXsTXhh&G-L)>5Gp(^TO}eo~WLA
zKlsB<=WlPySHJPx(e<}K`pNa5?SAy$lOKHRrw1Ra55M%lm!E$9_HVzjxc%f0|60HG
z^0U7depB8(ogUL|B2UY4o3d^UB8!$@lYMSy8G<<f(3yMBJPfbtzO5A@h@3+f%iIYK
zum^MFG-VWHIA#FC>+S|#Qx2AI;54n{QfNKGIKqIurZiaXHQ^>^rY<5k3Q1F4UrBo<
zj;mxVM^Ah$*RrSocAO>Y#U8_1$^u0U;C)}~8TGQrveV0A3+=`wq7@G)9iyvIjq3Se
zNYNg(U%^LH`^`XKIje)k^x~jdOvZ!7T-~d~F>1_z>Wjz)xTN%57=k9t0E)sdC|oYp
zq?#^}O5ZXHA5~o#4w`zZuJIrKFZ!l|R>TDT&tqzTAOC`n{c$gfTEcOF(GNiQ@kKo^
z>5_R|s@dUSSgP{sa6Fjs7o)wgnl2`LWizWTPC;}$9yIE(Z1nJO!G<qzZ^(_=CqATy
zo5>?q`u`Y+@LSnbvrFo5yqGE6?@tEm;OOw6oD#9B9w`>XwX&%dB%$V4YBfJ-%2Msu
ziw1%OtF!8=nn4&-r2}hfirY)j8bb!Na#HdCX+0|+N8*W{UNBc80R^oFbG4!FRHxNl
zYEzw2Ur~3fE%jA(kGfagr|wtV>a2P|?WhOUIkl@EQV**~)OmFQGl1Y(32|afZ5+))
z?f8aOHLMz35URx#)zGtH-5B9&yn)(NLp4$ukGiN1RHYtMm(*BI)Kt~#P(7|1HB)o7
zP?yyebyXdyYr2Qh^xE*JPH*#Sj4RqO%G)^&F{#VxMCJrK8`n^&b}lun9FMERSv6Cm
z0VOGzKQz-kfc<fKm1bprP%rf4KFF$daZ!!6qG*=3H=%+{YYLOR?L~jD839q|cyBQt
zmve>16(LAfPV^r=0yS??Jw6#Um(<Mbhf!+`GVKg9?W^;n3gJc!0TIlOt-!chcSrS=
zDb8jvKvFw8p{e1bp#bHPI)eJ_HT9)(x(7|3aWaUq1ffocN2cFqv=6P)N=8FR4iK3X
zbO6a6)o?IIjMM5c=NEvdk(vyKWR#;4Yh9_;U7Ic`G|~b{*Be&s_S0avn8UyqWG@34
z<>7&`2^xW>Im8?3Fg!Ah7OQSXlfi7bpc{MD@@H6|k^wY8CX|E0c%Qp8z*%b%T2LFj
z5Nt3X8yzLs;2GLzrL#fnliJu+r!7mHjJ(Onn;qoM7Hf;!Y;l{dPMfW^&F1_--Ot@^
zbMtL(zTIiQZ5p&#6Qp@mhvOr)&CACwqwX^5ZU=R@#W~Mko;#?K)(`!syobH<Op)lZ
z$PD=FK?fHZcrZ1E+z!?{HPB)hVDYhHz-$w8$;f2~8B=GlDsEG88?#zyW2@RWCB!i0
z?rLscbMv}GvNjD`tWk9t=^CnLAuJei!HA0v;-bYk;xA$15W<lKcXz*3?z0rZ`J}rS
zez&iCopP?4l(TjADLQXM*mRgqz~j@?K8<|=qV1CHW^5nNQ<E$kvg>!&<57n;ewX0W
z2G1mI7Kcaq|0n028oJ!!S%#H9y_D}R$Ct}Fmb>-sy>W$|v_s!yMg32+*wXLNjZJP^
zhcJ~C&ZwqvfjczhKAh!WhluP0miJU6Ts_eo?erMdGsS9L=Fl?<#X4(tr>p09NyMMn
z<8@Zh@OOGTyA*%)O&WY)odZq2u>KZTUi7!yVHp0mJt92EM0LL04)%W2iOv|qXPt3H
ze|0F<>D0++VcR&~FtN#Vr{=dS5F+Hy<~<i%)fnoM6L+LKvBsWFR&<I(#p;O+&2*W(
zo9;+>%0G!H)PvPQ>(Ne6&oHE-<&MAn17W?8(|%mqCM&R`dE1LQZLv^39RvrOxiqFL
z<z8fe<(oA#j5=z}-?f}pa2{~wto4JQ*5PzCoDy3RQ0Md+4nMx-Ps@W|<eCMO(>ZUz
zA#z$dmgH6ta7Jec@C~pgpv`AVK)=hn<i{|6KeD=)!)BQ<YiMd@VkE>eSq~kVn9Nz0
zu;IbvX<-mLtC9M&_U*`o_5Fl0JO~Lz&Wgvx**3I<^tRI>Zpd{+tRdcv>Eusf5qFlG
z)!dn3819U`jD$36t%U5SRT9D_L}x)VgD_J=JQ%vGCB!R)m5>~uyY?_~ouJDD#4Ee3
zf@Xk*Tt`A0;z<a32O109p-R{x;OdaxM9~vVNDmaBjD*-ctq$2wt0d&^#byv@YKR9j
z5341_3$*1y4$!?NGsKO|LEPNzv<hm;vC#%XB?D(KV{+!^i!@5-vqJ`!*A)baq$hCR
zT(nB+Z2K+&!zznd4`wN1%?v)Wnc7dQM68G9_u86is4EFNy*ks3-%?^FU>@wGn`t&9
zv^<zdnc<<kd@>oAhY*jJ8C47WM(eETTpoDQx6^E9X{pD^!VzK(b(sc6wSk|5axFcy
z<n%O$_@OA`qDxL1ZKz903^~_mTXI@L%!@<LbxO3>gm_lk4joSx+}O%O{eA%YHN;I@
z7Qbbk#cz8ye$~1oe(m3Q;xAfD4|QSCQU%&xSwaObSRDyzh)=FH+K?Ty)(~29JP7u+
zx&n6|NjPUD)S79i1BXlQB3C)=7H)_eoiY+KtQ!fLo>xl93B4K~OgNKtDEBsG%LDgn
zW@#y*NIwl|B}Ca_@Mz70dx+aq!}SSQu&lwA(UWSQ<3o%{mnW?u`bvsn;T2V+t@ZSS
zdM(g>yKk+>7G+V~Z|@j#i2!>1BqGc53pCWvz~7*^_NBm(WpRUEyEOtBlh%9Nbfzn^
z)|VAR=#tk!uf=FS!dJS!xaDSMilkh0U2Y@Wv^6EvgVy`D2VIM|hQ#Zxw6AbA+B$Ox
zzS%iH)z%HNuhugF)Jm|MZUHDiU}w?-Q255)q6MHzNEh;fS0<VRU^IaNsPW-d$F}GL
zV0hOps4&eq^h_f}r3?Squ0f$PFkQu9dkMj*ahVMYIOQ};ivmtVcCIW0teW$J!2zqG
z@y87Wyy~B~ZTkVQ{8POaLWMs%Z=wYdvql~Ca3V0vHz=m#2+W!nVz`?K%yQPIZ9&ZH
zdefFFnB|OroL=zq<)oV5(DDq?Y|TA3kl9(wHAhi?=+kXRGhv${Ar#$2e2VI<Y?1T+
zB_EnjqmT(fM_L_m3<i>M2kqGe5VMl<vJs_bou>Bpp)kFtc4wt+?R3^Xi=Nf>tEg!|
zbvW8TS3a^H+z}>7811Qz1x1Q_r!LeeraE0WC<oJJscSvO>)t6_c6b({a&+3;DNRP`
z8cO9))3p!bgDCEP^hd<wQKDG(PTlo|C<u}u3Ht$ZZ-QX0!{61W+&>Xuc>NQB{U^c6
zKE+{Yzh_|hAw-xO{nMLTSV+%qdN6#1?$AHWcK85MX_%!~Bq<q$hPCwY36j#VWxIq_
zT*Er;$our##WWpmJ2)~c`XG<k!;R>T0~Z(7hhl6vn#guB9mzf1l>9FbmeIQ7!E!o*
zd$2w^t}ZMNXsvKzD3Z4PEoG%o+its9?~ooWmVE>PJ#(wuE$Icc@Oik2oVPfo?Za!A
zlh^OT3R;UiSYP5MItJsmOCpJz#4B)9IvqK<MUutXL>yR91ax$FVA$g55iJ)6hr!3i
zqgJ%-IPGHOp7~>gMv|-Gwk!IyRXDH&@hrbx5+@W|bK4b};4g*x`8uBVbD=L8nDKee
za-C}L^br*L@a4ktm?8!)EXnCr02h`6C}3G@)2;pHl0VD`x#KQ9{fS|%PU3N8UoJrr
z(gx<@=7o@b9wc<)aN6Y&<+i)9Fo@}}?X*L-AFcilEGS~J9)ws$ef|(Zq>@et7okgv
zF}Sdd?)E)cnhLqV3!tN+i<?S&3)8-sH^AtK<KPxyLH7_2EbkX$yhWJP5zlQGq~t3&
zFr3NA8Sr2_XzRi<eMEC%eW`y@gnS2&9;$_fz2Q7bXGVt}rdP}dRw1z#!y%yZT^Bb^
zDX#6pqJm=4E(~_=>q`U~_;U_au?}GvLiWHF>vPl($YpW*O=HRzbzyOoiM1q-Vquq2
zKOu+Sp@(PV#JWLCyFZsP<l@^*9OdM#y7X4aH1DVM=7G~L2hblIdKK#~W>>83aZ%9W
z(V-WoeX&=>ke@$ZY%>&Vba0aZljOnTzAmqK+u^k0kE2Km*+-nV{)S!uP+KJStt5=Z
zTqbec`Z+CbnhDv%l@dCh#2FoYT%J=r#puI|oQ}C}yCR@)k_$_7I_0^rLSIXGuvqwi
z37kEjUW$#zr<Vp99riqW1>Lteur$c!o)_m7D{<Rl&-3Y}0Tjz`mrMU3%@Z-dDHPDx
zjWmand-RH=FJy&fSNNw;Q^_A{%Y<C|SjfFTu*Zu%AcHU9&t(>`kbS=|cm}T6kKkE|
zbuG)}z6woi{d=pYz@><{GanhY51+q0kn2?*=%+tC+(fKvc@PU<6gAtzuftExQy|iz
z2Ny*6FF7uYd^uwnLU;23$UeO&7V^ung!wiIS4!+XIb2(R4a?)0t^ggL3mA>R&!7*n
z9~C)PZ4Vb9y1H`cQ8V8@3OGcvT_$|_JkHwlf}L|LCE~1>$CxG2FJ{Dp6;K4pv(&3R
zE`-e#S%1aYu;6=rVAmHmSY*;JbDWQT{s5zhjw8PU<|cT?l#4mS;_mAKTzRox<%tfN
zIDO<vB=(O2M{}X4i9Ex?%NF}vk>O<FjRWn3pOYtv@PP{~6248NotR5_0%AQVGI%he
z-&E*0mR;dv7O-C_=S=t;WYT^g&=zryLc0~XQrCbh=;heseLVn7%sIR|xz^$MA#hOu
z?8Ax_n}9E?G?2EH>cj^}Uw})`CONP?4TatVCe}n)g75|Mbj5m=O24E?AsErGjLGB(
z`}petaOK*WCQ>I;={w?-BJ46xf$Z~=!P)a+*noXra<~jae%PYK*m78*PY*7LKbJYq
z;=*S7*i}4S9J}d`z?nC|gbWH?P5b>qBSjnuo~Ss3A!;Gct3?6N*zdPUgl|?Lh~nEI
zJnyc5fD}`dyEk}62<dCpHKbu6ps0508kr0_+=`o`t?7P^PZ+7b*~2@YNTAg5#{#=n
NcH*|%Hn-25_z#;~4c`C&

diff --git a/branches/bug1734/doc/ZConfig/zconfig.tex b/branches/bug1734/doc/ZConfig/zconfig.tex
deleted file mode 100644
index 749cfa23..00000000
--- a/branches/bug1734/doc/ZConfig/zconfig.tex
+++ /dev/null
@@ -1,1856 +0,0 @@
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%
-% Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-% All Rights Reserved.
-%
-% This software is subject to the provisions of the Zope Public License,
-% Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-% THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-% WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-% WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-% FOR A PARTICULAR PURPOSE.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\documentclass{howto}
-\usepackage{xmlmarkup}
-
-\newcommand{\datatype}[1]{\strong{#1}}
-
-\title{ZConfig Package Reference}
-
-%\date{27 October 2003}
-\release{2.2}
-\setshortversion{2.2}
-
-\author{Zope Corporation}
-\authoraddress{
-    Lafayette Technology Center\\
-    513 Prince Edward Street\\
-    Fredericksburg, VA 22401\\
-    \url{http://www.zope.com/}
-}
-
-\begin{document}
-\maketitle
-
-\begin{abstract}
-\noindent
-This document describes the syntax and API used in configuration files
-for components of a Zope installation written by Zope Corporation.  This
-configuration mechanism is itself configured using a schema specification
-written in XML.
-\end{abstract}
-
-\tableofcontents
-
-
-\section{Introduction \label{intro}}
-
-Zope uses a common syntax and API for configuration files designed for
-software components written by Zope Corporation.  Third-party software
-which is also part of a Zope installation may use a different syntax,
-though any software is welcome to use the syntax used by Zope
-Corporation.  Any software written in Python is free to use the
-\module{ZConfig} software to load such configuration files in order to
-ensure compatibility.  This software is covered by the Zope Public
-License, version 2.0.
-
-The \module{ZConfig} package has been tested with Python 2.3.  Older
-versions of Python are not supported.
-\module{ZConfig} only relies on the Python standard library.
-
-Configurations which use \module{ZConfig} are described using
-\dfn{schema}.  A schema is a specification for the allowed structure
-and content of the configuration.  \module{ZConfig} schema are written
-using a small XML-based language.  The schema language allows the
-schema author to specify the names of the keys allowed at the top
-level and within sections, to define the types of sections which may
-be used (and where), the types of each values, whether a key or
-section must be specified or is optional, default values for keys, and
-whether a value can be given only once or repeatedly.
-
-
-\section{Configuration Syntax \label{syntax}}
-
-Like the \ulink{\module{ConfigParser}}
-{http://docs.python.org/lib/module-ConfigParser.html}
-format, this format supports key-value pairs arranged in sections.
-Unlike the \module{ConfigParser} format, sections are typed and can be
-organized hierarchically.
-Additional files may be included if needed.  Schema components not
-specified in the application schema can be imported from the
-configuration file.  Though both formats are substantially
-line-oriented, this format is more flexible.
-
-The intent of supporting nested section is to allow setting up the
-configurations for loosely-associated components in a container.  For
-example, each process running on a host might get its configuration
-section from that host's section of a shared configuration file.
-
-The top level of a configuration file consists of a series of
-inclusions, key-value pairs, and sections.
-
-Comments can be added on lines by themselves.  A comment has a
-\character{\#} as the first non-space character and extends to the end
-of the line:
-
-\begin{verbatim}
-# This is a comment
-\end{verbatim}
-
-An inclusion is expressed like this:
-
-\begin{verbatim}
-%include defaults.conf
-\end{verbatim}
-
-The resource to be included can be specified by a relative or absolute
-URL, resolved relative to the URL of the resource the
-\keyword{\%include} directive is located in.
-
-
-A key-value pair is expressed like this:
-
-\begin{verbatim}
-key value
-\end{verbatim}
-
-The key may include any non-white characters except for parentheses.
-The value contains all the characters between the key and the end of
-the line, with surrounding whitespace removed.
-
-Since comments must be on lines by themselves, the \character{\#}
-character can be part of a value:
-
-\begin{verbatim}
-key value # still part of the value
-\end{verbatim}
-
-Sections may be either empty or non-empty.  An empty section may be
-used to provide an alias for another section.
-
-A non-empty section starts with a header, contains configuration
-data on subsequent lines, and ends with a terminator.
-
-The header for a non-empty section has this form (square brackets
-denote optional parts):
-
-\begin{alltt}
-<\var{section-type} \optional{\var{name}} >
-\end{alltt}
-
-\var{section-type} and \var{name} all have the same syntactic
-constraints as key names.
-
-The terminator looks like this:
-
-\begin{alltt}
-</\var{section-type}>
-\end{alltt}
-
-The configuration data in a non-empty section consists of a sequence
-of one or more key-value pairs and sections.  For example:
-
-\begin{verbatim}
-<my-section>
-    key-1 value-1
-    key-2 value-2
-
-    <another-section>
-        key-3 value-3
-    </another-section>
-</my-section>
-\end{verbatim}
-
-(The indentation is used here for clarity, but is not required for
-syntactic correctness.)
-
-The header for empty sections is similar to that of non-empty
-sections, but there is no terminator:
-
-\begin{alltt}
-<\var{section-type} \optional{\var{name}} />
-\end{alltt}
-
-
-\subsection{Extending the Configuration Schema}
-
-As we'll see in section~\ref{writing-schema}, ``Writing Configuration
-Schema,'' what can be written in a configuration is controlled by
-schemas which can be built from \emph{components}.  These components
-can also be used to extend the set of implementations of objects the
-application can handle.  What this means when writing a configuration
-is that third-party implementations of application object types can be
-used wherever those application types are used in the configuration,
-if there's a \module{ZConfig} component available for that
-implementation.
-
-The configuration file can use an \keyword{\%import} directive to load
-a named component:
-
-\begin{verbatim}
-%import Products.Ape
-\end{verbatim}
-
-The text to the right of the \keyword{\%import} keyword must be the
-name of a Python package; the \module{ZConfig} component provided by
-that package will be loaded and incorporated into the schema being
-used to load the configuration file.  After the import, section types
-defined in the component may be used in the configuration.
-
-More detail is needed for this to really make sense.
-
-A schema may define section types which are \emph{abstract}; these
-cannot be used directly in a configuration, but multiple concrete
-section types can be defined which \emph{implement} the abstract
-types.  Wherever the application allows an abstract type to be used,
-any concrete type which implements that abstract type can be used in
-an actual configuration.
-
-The \keyword{\%import} directive allows loading schema components
-which provide alternate concrete section types which implement the
-abstract types defined by the application.  This allows third-party
-implementations of abstract types to be used in place of or in
-addition to implementations provided with the application.
-
-Consider an example application application which supports logging in
-the same way Zope 2 does.  There are some parameters which configure
-the general behavior of the logging mechanism, and an arbitrary number
-of \emph{log handlers} may be specified to control how the log
-messages are handled.  Several log handlers are provided by the
-application.  Here is an example logging configuration:
-
-\begin{verbatim}
-<eventlog>
-  level verbose
-
-  <logfile>
-    path /var/log/myapp/events.log
-  </logfile>
-</eventlog>
-\end{verbatim}
-
-A third-party component may provide a log handler to send
-high-priority alerts the system administrator's text pager or
-SMS-capable phone.  All that's needed is to install the implementation
-so it can be imported by Python, and modify the configuration:
-
-\begin{verbatim}
-%import my.pager.loghandler
-
-<eventlog>
-  level verbose
-
-  <logfile>
-    path /var/log/myapp/events.log
-  </logfile>
-
-  <pager>
-    number   1-800-555-1234
-    message  Something broke!
-  </pager>
-</eventlog>
-\end{verbatim}
-
-
-\subsection{Textual Substitution in Values}
-
-\module{ZConfig} provides a limited way to re-use portions of a value
-using simple string substitution.  To use this facility, define named
-bits of replacement text using the \keyword{\%define} directive, and
-reference these texts from values.
-
-The syntax for \keyword{\%define} is:
-
-\begin{alltt}
-%define \var{name} \optional{\var{value}}
-\end{alltt}
-
-The value of \var{name} must be a sequence of letters, digits, and
-underscores, and may not start with a digit; the namespace for these
-names is separate from the other namespaces used with
-\module{ZConfig}, and is case-insensitive.  If \var{value} is
-omitted, it will be the empty string.  If given, there must be
-whitespace between \var{name} and \var{value}; \var{value} will not
-include any whitespace on either side, just like values from key-value
-pairs.
-
-Names must be defined before they are used, and may not be
-re-defined.  All resources being parsed as part of a configuration
-share a single namespace for defined names.  This means that resources
-which may be included more than once should not define any names.
-
-References to defined names from configuration values use the syntax
-described for the \refmodule{ZConfig.substitution} module.
-Configuration values which include a \character{\$} as part of the
-actual value will need to use \code{\$\$} to get a single
-\character{\$} in the result.
-
-The values of defined names are processed in the same way as
-configuration values, and may contain references to named
-definitions.
-
-For example, the value for \code{key} will evaluate to \code{value}:
-
-\begin{verbatim}
-%define name value
-key $name
-\end{verbatim} %$ <-- bow to font-lock
-
-
-\section{Writing Configuration Schema \label{writing-schema}}
-
-\module{ZConfig} schema are written as XML documents.
-
-Data types are searched in a special namespace defined by the data
-type registry.  The default registry has slightly magical semantics:
-If the value can be matched to a standard data type when interpreted
-as a \datatype{basic-key}, the standard data type will be used.  If
-that fails, the value must be a \datatype{dotted-name} containing at
-least one dot, and a conversion function will be sought using the
-\method{search()} method of the data type registry used to load the
-schema.
-
-
-\subsection{Schema Elements \label{elements}}
-
-For each element, the content model is shown, followed by a
-description of how the element is used, and then a list of the
-available attributes.  For each attribute, the type of the value is
-given as either the name of a \module{ZConfig} datatype or an XML
-attribute value type.  Familiarity with XML's Document Type Definition
-language is helpful.
-
-The following elements are used to describe a schema:
-
-\begin{elementdesc}{schema}{description?, metadefault?, example?,
-                            import*,
-                            (sectiontype | abstracttype)*,
-                            (section | key | multisection |
-                            multikey)*}
-  Document element for a \module{ZConfig} schema.
-
-  \begin{attributedesc}{extends}{\datatype{space-separated-url-references}}
-  A list of URLs of base schemas from which this section type will inherit key,
-  section, and section type declarations.  If omitted, this schema
-  is defined using only the keys, sections, and section types contained within
-  the \element{schema} element.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    section.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If any base schemas are listed in the \attribute{extends}
-    attribute, the default value for this attribute comes from the base
-    schemas.  If the base schemas all use the same \attribute{datatype}, then
-    that data type will be the default value for the extending schema.  If
-    there are no base schemas, the default value is \datatype{null}, which
-    means that the \module{ZConfig} section object will be used unconverted.
-    If the base schemas have different \attribute{datatype} definitions, you
-    must explicitly define the \attribute{datatype} in the extending schema.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{keytype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to keys found in
-    this section.  This can be used to constrain key values in
-    different ways; two data types which may be especially useful are
-    the \datatype{identifier} and \datatype{ipaddr-or-hostname}
-    types.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If any base schemas are listed in the \attribute{extends}
-    attribute, the default value for this attribute comes from the base
-    schemas.  If the base schemas all use the same \attribute{keytype}, then
-    that key type will be the default value for the extending schema.  If there
-    are no base schemas, the default value is \datatype{basic-key}.  If the
-    base schemas have different \attribute{keytype} definitions, you must
-    explicitly define the \attribute{keytype} in the extending schema.
-  \end{attributedesc}
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts with the \element{schema} element if it hasn't been
-    overridden by an inner element with a \attribute{prefix}
-    attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{description}{PCDATA}
-  Descriptive text explaining the purpose the container of the
-  \element{description} element.  Most other elements can contain
-  a \element{description} element as their first child.
-  At most one \element{description} element may appear in a given
-  context.
-
-  \begin{attributedesc}{format}{NMTOKEN}
-    Optional attribute that can be added to indicate what conventions
-    are used to mark up the contained text.  This is intended to serve
-    as a hint for documentation extraction tools.  Suggested values
-    are:
-
-    \begin{tableii}{l|l}{code}{Value}{Content Format}
-      \lineii{plain}{\mimetype{text/plain}; blank lines separate paragraphs}
-      \lineii{rest}{reStructuredText}
-      \lineii{stx}{Classic Structured Text}
-    \end{tableii}
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{example}{PCDATA}
-  An example value.  This serves only as documentation.
-\end{elementdesc}
-
-\begin{elementdesc}{metadefault}{PCDATA}
-  A description of the default value, for human readers.  This may
-  include information about how a computed value is determined when
-  the schema does not specify a default value.
-\end{elementdesc}
-
-\begin{elementdesc}{abstracttype}{description?}
-  Define an abstract section type.
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the abstract section type; required.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{sectiontype}{description?, (section | key |
-                                  multisection | multikey)*}
-  Define a concrete section type.
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    section.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If \attribute{datatype} is omitted and
-    \attribute{extends} is used, the \attribute{datatype} from the
-    section type identified by the \attribute{extends} attribute is
-    used.
-  \end{attributedesc}
-
-  \begin{attributedesc}{extends}{\datatype{basic-key}}
-    The name of a concrete section type from which this section type
-    acquires all key and section declarations.  This type does
-    \emph{not} automatically implement any abstract section type
-    implemented by the named section type.  If omitted, this section
-    is defined with only the keys and sections contained within the
-    \element{sectiontype} element.  The new section type is called a
-    \emph{derived} section type, and the type named by this attribute
-    is called the \emph{base} type.  Values for the
-    \attribute{datatype} and \attribute{keytype} attributes are
-    acquired from the base type if not specified.
-  \end{attributedesc}
-
-  \begin{attributedesc}{implements}{\datatype{basic-key}}
-    The name of an abstract section type which this concrete section
-    type implements.  If omitted, this section type does not implement
-    any abstract type, and can only be used if it is specified
-    directly in a schema or other section type.
-  \end{attributedesc}
-
-  \begin{attributedesc}{keytype}{\datatype{basic-key}}
-    The data type converter which will be applied to keys found in
-    this section.  This can be used to constrain key values in
-    different ways; two data types which may be especially useful are
-    the \datatype{identifier} and \datatype{ipaddr-or-hostname}
-    types.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  The default value is \datatype{basic-key}.  If
-    \attribute{keytype} is omitted and \attribute{extends} is used,
-    the \attribute{keytype} from the section type identified by the
-    \attribute{extends} attribute is used.
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the section type; required.
-  \end{attributedesc}
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts in the \element{sectiontype} element.  If omitted, the
-    prefix specified by a containing context is used if specified.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{import}{EMPTY}
-  Import a schema component.  Exactly one of the attributes
-  \attribute{package} and \attribute{src} must be specified.
-
-  \begin{attributedesc}{file}{file name without directory information}
-    Name of the component file within a package; if not specified,
-    \file{component.xml} is used.  This may only be given when
-    \attribute{package} is used.  (The \file{component.xml} file is
-    always used when importing via \keyword{\%import} from a
-    configuration file.)
-  \end{attributedesc}
-
-  \begin{attributedesc}{package}{\datatype{dotted-suffix}}
-    Name of a Python package that contains the schema component being
-    imported.  The component will be loaded from the file identified
-    by the \attribute{file} attribute, or \file{component.xml} if
-    \attribute{file} is not specified.  If the package name given
-    starts with a dot (\character{.}), the name used will be the
-    current prefix and the value of this attribute concatenated.
-  \end{attributedesc}
-
-  \begin{attributedesc}{src}{\datatype{url-reference}}
-    URL to a separate schema which can provide useful types.  The
-    referenced resource must contain a schema, not a schema
-    component.  Section types defined or imported by the referenced
-    schema are added to the schema containing the \element{import};
-    top-level keys and sections are ignored.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{key}{description?, example?, metadefault?, default*}
-  A \element{key} element is used to describe a key-value pair which
-  may occur at most once in the section type or top-level schema in
-  which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this key should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the key name to underscores.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    key.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.
-  \end{attributedesc}
-
-  \begin{attributedesc}{default}{\datatype{string}}
-    If the key-value pair is optional and this attribute is specified,
-    the value of this attribute will be converted using the appropriate
-    data type converter and returned to the application as the
-    configured value.  This attribute may not be specified if the
-    \attribute{required} attribute is \code{yes}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the key, as it must be given in a configuration
-    instance, or `\code{*}'.  If the value is `\code{*}', any name not
-    already specified as a key may be used, and the configuration
-    value for the key will be a dictionary mapping from the key name
-    to the value.  In this case, the \attribute{attribute} attribute
-    must be specified, and the data type for the key will be applied
-    to each key which is found.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the key.  If the value is \code{yes}, the
-    \attribute{default} attribute may not be specified and an error
-    will be reported if the configuration instance does not specify a
-    value for the key.  If the value is \code{no} (the default) and
-    the configuration instance does not specify a value, the value
-    reported to the application will be that specified by the
-    \attribute{default} attribute, if given, or \code{None}.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{multikey}{description?, example?, metadefault?, default*}
-  A \element{multikey} element is used to describe a key-value pair
-  which may occur any number of times in the section type or top-level
-  schema in which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this key should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the key name to underscores.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    key.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the key, as it must be given in a configuration
-    instance, or `\code{+}'.  If the value is `\code{+}', any name not
-    already specified as a key may be used, and the configuration
-    value for the key will be a dictionary mapping from the key name
-    to the value.  In this case, the \attribute{attribute} attribute
-    must be specified, and the data type for the key will be applied
-    to each key which is found.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the key.  If the value is \code{yes}, no \element{default}
-    elements may be specified and an error will be reported if the
-    configuration instance does not specify at least one value for the
-    key.  If the value is \code{no} (the default) and the
-    configuration instance does not specify a value, the value
-    reported to the application will be a list containing one element
-    for each \element{default} element specified as a child of the
-    \element{multikey}.  Each value will be individually converted
-    according to the \attribute{datatype} attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{default}{PCDATA}
-  Each \element{default} element specifies a single default value for
-  a \element{multikey}.  This element can be repeated to produce a
-  list of individual default values.  The text contained in the
-  element will be passed to the datatype conversion for the
-  \element{multikey}.
-
-  \begin{attributedesc}{key}{key type of the containing sectiontype}
-    Key to associate with the default value.  This is only used for
-    defaults of a \element{key} or \element{multikey} with a
-    \attribute{name} of \code{+}; in that case this attribute is
-    required.  It is an error to use the \attribute{key} attribute
-    with a \element{default} element for a \element{multikey} with a
-    name other than \code{+}.
-
-    \begin{notice}[warning]
-      The datatype of this attribute is that of the section type
-      \emph{containing} the actual keys, not necessarily that of the
-      section type which defines the key.  If a derived section
-      overrides the key type of the base section type, the actual
-      key type used is that of the derived section.
-
-      This can lead to confusing errors in schemas, though the
-      \refmodule{ZConfig} package checks for this when the schema is
-      loaded.  This situation is particularly likely when a derived
-      section type uses a key type which collapses multiple default
-      keys which were not collapsed by the base section type.
-
-      Consider this example schema:
-
-\begin{verbatim}
-<schema>
-  <sectiontype name="base" keytype="identifier">
-    <key name="+" attribute="mapping">
-      <default key="foo">some value</default>
-      <default key="FOO">some value</default>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="derived" keytype="basic-key"
-               extends="base"/>
-
-  <section type="derived" name="*" attribute="section"/>
-</schema>
-\end{verbatim}
-
-      When this schema is loaded, a set of defaults for the
-      \datatype{derived} section type is computed.  Since
-      \datatype{basic-key} is case-insensitive (everything is
-      converted to lower case), \samp{foo} and \samp{Foo} are both
-      converted to \samp{foo}, which clashes since \element{key} only
-      allows one value for each key.
-    \end{notice}
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{section}{description?}
-  A \element{section} element is used to describe a section which may
-  occur at most once in the section type or top-level schema in which
-  it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this section should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the section name to underscores,
-    in which case the \attribute{name} attribute may not be \code{*}
-    or \code{+}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the section, as it must be given in a configuration
-    instance, \code{*}, or \code{+}.  If the value is \code{*}, any
-    name not already specified as a key may be used.  If the value is
-    \code{*} or \code{+}, the \attribute{attribute} attribute must be
-    specified.  If the value is \code{*}, any name is allowed, or the
-    name may be omitted.  If the value is \code{+}, any name is
-    allowed, but some name must be provided.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the section.  If the value is \code{yes}, an error will be
-    reported if the configuration instance does not include the
-    section.  If the value is \code{no} (the default) and the
-    configuration instance does not include the section, the value
-    reported to the application will be \code{None}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{type}{\datatype{basic-key}}
-    The section type which matching sections must implement.  If the
-    value names an abstract section type, matching sections in the
-    configuration file must be of a type which specifies that it
-    implements the named abstract type.  If the name identifies a
-    concrete type, the section type must match exactly.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{multisection}{description?}
-  A \element{multisection} element is used to describe a section which
-  may occur any number of times in the section type or top-level
-  schema in which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which matching sections should be
-    the value of on a \class{SectionValue} instance.  This is required
-    and must be unique within the immediate contents of a section type
-    or schema.  The \class{SectionValue} instance will contain a list
-    of matching sections.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    For a \element{multisection}, any name not already specified as a
-    key may be used.  If the value is \code{*} or \code{+}, the
-    \attribute{attribute} attribute must be specified.  If the value
-    is \code{*}, any name is allowed, or the name may be omitted.  If
-    the value is \code{+}, any name is allowed, but some name must be
-    provided.  No other value for the \attribute{name} attribute is
-    allowed for a \element{multisection}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide at least one matching section.  If the value is
-    \code{yes}, an error will be reported if the configuration
-    instance does not include the section.  If the value is \code{no}
-    (the default) and the configuration instance does not include the
-    section, the value reported to the application will be
-    \code{None}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{type}{\datatype{basic-key}}
-    The section type which matching sections must implement.  If the
-    value names an abstract section type, matching sections in the
-    configuration file must be of types which specify that they
-    implement the named abstract type.  If the name identifies a
-    concrete type, the section type must match exactly.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\subsection{Schema Components \label{schema-components}}
-
-XXX need more explanation
-
-\module{ZConfig} supports schema components that can be
-provided by disparate components, and allows them to be knit together
-into concrete schema for applications.  Components cannot add
-additional keys or sections in the application schema.
-
-A schema \dfn{component} is allowed to define new abstract and
-section types.
-Components are identified using a dotted-name, similar to a Python
-module name.  For example, one component may be \code{zodb.storage}.
-
-Schema components are stored alongside application code since they
-directly reference datatype code.  Schema components are provided by
-Python packages.  The component definition is normally stored in the
-file \file{component.xml}; an alternate filename may be specified
-using the \attribute{file} attribute of the \element{import} element.
-Components imported using the \keyword{\%import} keyword from a
-configuration file must be named \file{component.xml}.
-The component defines the types provided by that component; it must
-have a \element{component} element as the document element.
-
-The following element is used as the document element for schema
-components.  Note that schema components do not allow keys and
-sections to be added to the top-level of a schema; they serve only to
-provide type definitions.
-
-\begin{elementdesc}{component}{description?, (abstracttype | sectiontype)*}
-  The top-level element for schema components.
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts within the \element{component} element if it hasn't been
-    overridden by an inner element with a \attribute{prefix}
-    attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\section{Standard \module{ZConfig} Datatypes\label{standard-datatypes}}
-
-There are a number of data types which can be identified using the
-\attribute{datatype} attribute on \element{key},
-\element{sectiontype}, and \element{schema} elements.
-Applications may extend the set of datatypes by calling the
-\method{register()} method of the data type registry being used or by
-using Python dotted-names to refer to conversion routines defined in
-code.
-
-The following data types are provided by the default type registry.
-
-\begin{definitions}
-\term{\datatype{basic-key}}
-  The default data type for a key in a ZConfig configuration file.
-  The result of conversion is always lower-case, and matches the
-  regular expression \regexp{[a-z][-._a-z0-9]*}.
-
-\term{\datatype{boolean}}
-  Convert a human-friendly string to a boolean value.  The names
-  \code{yes}, \code{on}, and \code{true} convert to \constant{True},
-  while \code{no}, \code{off}, and \code{false} convert to
-  \constant{False}.  Comparisons are case-insensitive.  All other
-  input strings are disallowed.
-
-\term{\datatype{byte-size}}
-  A specification of a size, with byte multiplier suffixes (for
-  example, \samp{128MB}).  Suffixes are case insensitive and may be
-  \samp{KB}, \samp{MB}, or \samp{GB}
-
-\term{\datatype{dotted-name}}
-  A string consisting of one or more \datatype{identifier} values
-  separated by periods (\character{.}).
-
-\term{\datatype{dotted-suffix}}
-  A string consisting of one or more \datatype{identifier} values
-  separated by periods (\character{.}), possibly prefixed by a
-  period.  This can be used to indicate a dotted name that may be
-  specified relative to some base dotted name.
-
-\term{\datatype{existing-dirpath}}
-  Validates that the directory portion of a pathname exists.  For
-  example, if the value provided is \file{/foo/bar}, \file{/foo} must
-  be an existing directory.  No conversion is performed.
-
-\term{\datatype{existing-directory}}
-  Validates that a directory by the given name exists on 
-  the local filesystem.  No conversion is performed. 
-
-\term{\datatype{existing-file}}
-  Validates that a file by the given name exists.  No conversion 
-  is performed. 
-
-\term{\datatype{existing-path}}
-  Validates that a path (file, directory, or symlink) by the
-  given name exists on the local filesystem.  No conversion
-  is performed.
-
-\term{\datatype{float}}
-  A Python float.  \code{Inf}, \code{-Inf}, and \code{NaN} are not
-  allowed.
-
-\term{\datatype{identifier}}
-  Any valid Python identifier.
-
-\term{\datatype{inet-address}}
-  An Internet address expressed as a \code{(\var{hostname},
-  \var{port})} pair.  If only the port is specified, the default host
-  will be returned for \var{hostname}.  The default host is
-  \code{localhost} on Windows and the empty string on all other
-  platforms.  If the port is omitted, \code{None} will be returned for
-  \var{port}.
-
-\term{\datatype{integer}}
-  Convert a value to an integer.  This will be a Python \class{int} if
-  the value is in the range allowed by \class{int}, otherwise a Python
-  \class{long} is returned.
-
-\term{\datatype{ipaddr-or-hostname}}
-  Validates a valid IP address or hostname.  If the first 
-  character is a digit, the value is assumed to be an IP 
-  address.  If the first character is not a digit, the value 
-  is assumed to be a hostname.  Hostnames are converted to lower
-  case.
-
-\term{\datatype{locale}}
-  Any valid locale specifier accepted by the available
-  \function{locale.setlocale()} function.  Be aware that only the
-  \code{'C'} locale is supported on some platforms.
-
-\term{\datatype{null}}
-  No conversion is performed; the value passed in is the value
-  returned.  This is the default data type for section values.
-
-\term{\datatype{port-number}}
-  Returns a valid port number as an integer.  Validity does not imply
-  that any particular use may be made of the port, however.  For
-  example, port number lower than 1024 generally cannot be bound by
-  non-root users.
-
-\term{\datatype{socket-address}}
-  An address for a socket.  The converted value is an object providing
-  two attributes.  \member{family} specifies the address family
-  (\constant{AF_INET} or \constant{AF_UNIX}), with \code{None} instead
-  of \constant{AF_UNIX} on platforms that don't support it.  The
-  \member{address} attribute will be the address that should be passed
-  to the socket's \method{bind()} method.  If the family is
-  \constant{AF_UNIX}, the specific address will be a pathname; if the
-  family is \constant{AF_INET}, the second part will be the result of
-  the \datatype{inet-address} conversion.
-
-\term{\datatype{string}}
-  Returns the input value as a string.  If the source is a Unicode
-  string, this implies that it will be checked to be simple 7-bit
-  \ASCII.  This is the default data type for values in
-  configuration files.
-
-\term{\datatype{time-interval}}
-  A specification of a time interval in seconds, with multiplier
-  suffixes (for example, \code{12h}).  Suffixes are case insensitive
-  and may be \samp{s} (seconds), \samp{m} (minutes), \samp{h} (hours),
-  or \samp{d} (days).
-
-\term{\datatype{timedelta}}
-  Similar to the \datatype{time-interval}, this data type returns a Python
-  datetime.timedelta object instead of a float.  The set of suffixes
-  recognized by \datatype{timedelta} are: \samp{w} (weeks), \samp{d} (days),
-  \samp{h} (hours), \samp{m} (minutes), \samp{s} (seconds).  Values may be
-  floats, for example: \code{4w 2.5d 7h 12m 0.001s}.
-
-\end{definitions}
-
-
-\section{Standard \module{ZConfig} Schema Components
-         \label{standard-components}}
-
-\module{ZConfig} provides a few convenient schema components as part
-of the package.  These may be used directly or can server as examples
-for creating new components.
-
-
-\subsection{\module{ZConfig.components.basic}}
-
-The \module{ZConfig.components.basic} package provides small
-components that can be helpful in composing application-specific
-components and schema.  There is no large functionality represented by
-this package.  The default component provided by this package simply
-imports all of the smaller components.  This can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.basic"/>
-\end{verbatim}
-
-Each of the smaller components is documented directly; importing these
-selectively can reduce the time it takes to load a schema slightly,
-and allows replacing the other basic components with alternate
-components (by using different imports that define the same type
-names) if desired.
-
-
-\subsubsection{The Mapping Section Type \label{basic-mapping}}
-
-There is a basic section type that behaves like a simple Python
-mapping; this can be imported directly using
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-\end{verbatim}
-
-This defines a single section type, \datatype{ZConfig.basic.mapping}.
-When this is used, the section value is a Python dictionary mapping
-keys to string values.
-
-This type is intended to be used by extending it in simple ways.  The
-simplest is to create a new section type name that makes more sense
-for the application:
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-
-<sectiontype name="my-mapping"
-             extends="ZConfig.basic.mapping"
-             />
-
-<section name="*"
-         type="my-mapping"
-         attribute="map"
-         />
-\end{verbatim}
-
-This allows a configuration to contain a mapping from
-\datatype{basic-key} names to string values like this:
-
-\begin{verbatim}
-<my-mapping>
-  This that
-  and the other
-</my-mapping>
-\end{verbatim}
-
-The value of the configuration object's \member{map} attribute would
-then be the dictionary
-
-\begin{verbatim}
-{'this': 'that',
- 'and': 'the other',
- }
-\end{verbatim}
-
-(Recall that the \datatype{basic-key} data type converts everything to
-lower case.)
-
-Perhaps a more interesting application of
-\datatype{ZConfig.basic.mapping} is using the derived type to override
-the \attribute{keytype}.  If we have the conversion function:
-
-\begin{verbatim}
-def email_address(value):
-    userid, hostname = value.split("@", 1)
-    hostname = hostname.lower()  # normalize what we know we can
-    return "%s@%s" % (userid, hostname)
-\end{verbatim}
-
-then we can use this as the key type for a derived mapping type:
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-
-<sectiontype name="email-users"
-             extends="ZConfig.basic.mapping"
-             keytype="mypkg.datatypes.email_address"
-             />
-
-<section name="*"
-         type="email-users"
-         attribute="email_users"
-         />
-\end{verbatim}
-
-
-\subsection{\module{ZConfig.components.logger}}
-
-The \module{ZConfig.components.logger} package provides configuration
-support for the \ulink{\module{logging} package}
-{http://docs.python.org/lib/module-logging.html} in
-Python's standard library.  This component can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger"/>
-\end{verbatim}
-
-This component defines two abstract types and several concrete section
-types.  These can be imported as a unit, as above, or as four smaller
-components usable in creating alternate logging packages.
-
-The first of the four smaller components contains the abstract types,
-and can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="abstract.xml"/>
-\end{verbatim}
-
-The two abstract types imported by this are:
-
-\begin{definitions}
-
-\term{\datatype{ZConfig.logger.log}}
-  Logger objects are represented by this abstract type.
-
-\term{\datatype{ZConfig.logger.handler}}
-  Each logger object can have one or more ``handlers'' associated with
-  them.  These handlers are responsible for writing logging events to
-  some form of output stream using appropriate formatting.  The output
-  stream may be a file on a disk, a socket communicating with a server
-  on another system, or a series of \code{syslog} messages.  Section
-  types which implement this type represent these handlers.
-
-\end{definitions}
-
-
-The second and third of the smaller components provides section types
-that act as factories for \class{logging.Logger} objects.  These can be
-imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="eventlog.xml"/>
-<import package="ZConfig.components.logger" file="logger.xml"/>
-\end{verbatim}
-
-The types defined in these components implement the
-\datatype{ZConfig.logger.log} abstract type.  The \file{eventlog.xml}
-component defines an \datatype{eventlog} type which represents the
-root logger from the the \module{logging} package (the return value of
-\function{logging.getLogger()}), while the \file{logger.xml} component
-defines a \datatype{logger} section type which represents a named
-logger (as returned by \function{logging.getLogger(\var{name})}).
-
-
-The third of the smaller components provides section types that are
-factories for \class{logging.Handler} objects.  This can be imported
-using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="handlers.xml"/>
-\end{verbatim}
-
-The types defined in this component implement the
-\datatype{ZConfig.logger.handler} abstract type.
-
-
-
-The configuration objects provided by both the logger and handler
-types are factories for the finished loggers and handlers.  These
-factories should be called with no arguments to retrieve the logger or
-log handler objects.  Calling the factories repeatedly will cause the
-same objects to be returned each time, so it's safe to simply call
-them to retrieve the objects.
-
-The factories for the logger objects, whether the \datatype{eventlog}
-or \datatype{logger} section type is used, provide a \method{reopen()}
-method which may be called to close any log files and re-open them.
-This is useful when using a \UNIX{} signal to effect log file
-rotation: the signal handler can call this method, and not have to
-worry about what handlers have been registered for the logger.
-
-Building an application that uses the logging components is fairly
-straightforward.  The schema needs to import the relevant components
-and declare their use:
-
-\begin{verbatim}
-<schema>
-  <import package="ZConfig.components.logger" file="eventlog.xml"/>
-  <import package="ZConfig.components.logger" file="handlers.xml"/>
-
-  <section type="eventlog" name="*" attribute="eventlog"
-           required="yes"/>
-</schema>
-\end{verbatim}
-
-In the application, the schema and configuration file should be loaded
-normally.  Once the configuration object is available, the logger
-factory should be called to configure Python's \module{logging} package:
-
-\begin{verbatim}
-import os
-import ZConfig
-
-def run(configfile):
-    schemafile = os.path.join(os.path.dirname(__file__), "schema.xml")
-    schema = ZConfig.loadSchema(schemafile)
-    config, handlers = ZConfig.loadConfig(schema, configfile)
-
-    # configure the logging package:
-    config.eventlog()
-
-    # now do interesting things
-\end{verbatim}
-
-An example configuration file for this application may look like this:
-
-\begin{verbatim}
-<eventlog>
-  level  info
-
-  <logfile>
-    path        /var/log/myapp
-    format      %(asctime)s %(levelname)s %(name)s %(message)s
-    # locale-specific date/time representation
-    dateformat  %c
-  </logfile>
-
-  <syslog>
-    level    error
-    address  syslog.example.net:514
-    format   %(levelname)s %(name)s %(message)s
-  </syslog>
-</eventlog>
-\end{verbatim}
-
-Refer to the \module{logging} package documentation for the names
-available in the message format strings (the \code{format} key in the
-log handlers).  The date format strings (the \code{dateformat} key in
-the log handlers) are the same as those accepted by the
-\function{time.strftime()} function.
-
-
-\begin{seealso}
-  \seepep{282}{A Logging System}
-         {The proposal which described the logging feature for
-          inclusion in the Python standard library.}
-  \seelink{http://docs.python.org/lib/module-logging.html}
-          {\module{logging} --- Logging facility for Python}
-          {Python's \module{logging} package documentation, from the
-           \citetitle[http://docs.python.org/lib/lib.html]
-           {Python Library Reference}.}
-  \seelink{http://www.red-dove.com/python_logging.html}
-          {Original Python \module{logging} package}
-          {This is the original source for the \module{logging}
-           package.  This is mostly of historical interest.}
-\end{seealso}
-
-
-\section{Using Components to Extend Schema}
-
-% XXX This section needs a lot of work, but should get people started
-% who really want to add new pieces to ZConfig-configured applications.
-
-It is possible to use schema components and the \keyword{\%import}
-construct to extend the set of section types available for a specific
-configuration file, and allow the new components to be used in place
-of standard components.
-
-The key to making this work is the use of abstract section types.
-Wherever the original schema accepts an abstract type, it is possible
-to load new implementations of the abstract type and use those instead
-of, or in addition to, the implementations loaded by the original
-schema.
-
-Abstract types are generally used to represent interfaces.  Sometimes
-these are interfaces for factory objects, and sometimes not, but
-there's an interface that the new component needs to implement.  What
-interface is required should be documented in the
-\element{description} element in the \element{abstracttype} element;
-this may be by reference to an interface specified in a Python module
-or described in some other bit of documentation.
-
-The following things need to be created to make the new component
-usable from the configuration file:
-
-\begin{enumerate}
-  \item An implementation of the required interface.
-
-  \item A schema component that defines a section type that contains
-        the information needed to construct the component.
-
-  \item A ``datatype'' function that converts configuration data to an
-        instance of the component.
-\end{enumerate}
-
-For simplicity, let's assume that the implementation is defined by a
-Python class.
-
-The example component we build here will be in the \module{noise}
-package, but any package will do.  Components loadable using
-\keyword{\%import} must be contained in the \file{component.xml} file;
-alternate filenames may not be selected by the \keyword{\%import}
-construct.
-
-Create a ZConfig component that provides a section type to support
-your component.  The new section type must declare that it implements
-the appropriate abstract type; it should probably look something like
-this:
-
-\begin{verbatim}
-<component prefix="noise.server">
-  <import package="ZServer"/>
-
-  <sectiontype name="noise-generator"
-               implements="ZServer.server"
-               datatype=".NoiseServerFactory">
-
-    <!-- specific configuration data should be described here -->
-
-    <key name="port"
-         datatype="port-number"
-         required="yes">
-      <description>
-        Port number to listen on.
-      </description>
-    </key>
-
-    <key name="color"
-         datatype=".noise_color"
-         default="white">
-      <description>
-        Silly way to specify a noise generation algorithm.
-      </description>
-    </key>
-
-  </sectiontype>
-</component>
-\end{verbatim}
-
-This example uses one of the standard ZConfig datatypes,
-\datatype{port-number}, and requires two additional types to be
-provided by the \module{noise.server} module:
-\class{NoiseServerFactory} and \function{noise_color()}.
-
-The \function{noise_color()} function is a datatype conversion for a
-key, so it accepts a string and returns the value that should be used:
-
-\begin{verbatim}
-_noise_colors = {
-    # color -> r,g,b
-    'white': (255, 255, 255),
-    'pink':  (255, 182, 193),
-    }
-
-def noise_color(string):
-    if string in _noise_colors:
-        return _noise_colors[string]
-    else:
-        raise ValueError('unknown noise color: %r' % string)
-\end{verbatim}
-
-\class{NoiseServerFactory} is a little different, as it's the datatype
-function for a section rather than a key.  The parameter isn't a
-string, but a section value object with two attributes, \member{port}
-and \member{color}.
-
-Since the \datatype{ZServer.server} abstract type requires that the
-component returned is a factory object, the datatype function can be
-implemented at the constructor for the class of the factory object.
-(If the datatype function could select different implementation
-classes based on the configuration values, it makes more sense to use
-a simple function that returns the appropriate implementation.)
-
-A class that implements this datatype might look like this:
-
-\begin{verbatim}
-from ZServer.datatypes import ServerFactory
-from noise.generator import WhiteNoiseGenerator, PinkNoiseGenerator
-
-class NoiseServerFactory(ServerFactory):
-
-    def __init__(self, section):
-        # host and ip will be initialized by ServerFactory.prepare()
-        self.host = None
-        self.ip = None
-        self.port = section.port
-        self.color = section.color
-
-    def create(self):
-        if self.color == 'white':
-            generator = WhiteNoiseGenerator()
-        else:
-            generator = PinkNoiseGenerator()
-        return NoiseServer(self.ip, self.port, generator)
-\end{verbatim}
-
-You'll need to arrange for the package containing this component is
-available on Python's \code{sys.path} before the configuration file is
-loaded; this is mostly easily done by manipulating the
-\envvar{PYTHONPATH} environment variable.
-
-Your configuration file can now include the following to load and use
-your new component:
-
-\begin{verbatim}
-%import noise
-
-<noise-generator>
-  port 1234
-  color white
-</noise-generator>
-\end{verbatim}
-
-
-\section{\module{ZConfig} --- Basic configuration support}
-
-\declaremodule{}{ZConfig}
-\modulesynopsis{Configuration package.}
-
-The main \module{ZConfig} package exports these convenience functions:
-
-\begin{funcdesc}{loadConfig}{schema, url\optional{, overrides}}
-  Load and return a configuration from a URL or pathname given by
-  \var{url}.  \var{url} may be a URL, absolute pathname, or relative
-  pathname.  Fragment identifiers are not supported.  \var{schema} is
-  a reference to a schema loaded by \function{loadSchema()} or
-  \function{loadSchemaFile()}.
-  The return value is a tuple containing the configuration object and
-  a composite handler that, when called with a name-to-handler
-  mapping, calls all the handlers for the configuration.
-
-  The optional \var{overrides} argument represents information derived
-  from command-line arguments.  If given, it must be either a sequence
-  of value specifiers, or \code{None}.  A \dfn{value specifier} is a
-  string of the form \code{\var{optionpath}=\var{value}}.  The
-  \var{optionpath} specifies the ``full path'' to the configuration
-  setting: it can contain a sequence of names, separated by
-  \character{/} characters. Each name before the last names a section
-  from the configuration file, and the last name corresponds to a key
-  within the section identified by the leading section names.  If
-  \var{optionpath} contains only one name, it identifies a key in the
-  top-level schema.  \var{value} is a string that will be treated
-  just like a value in the configuration file.
-\end{funcdesc}
-
-\begin{funcdesc}{loadConfigFile}{schema, file\optional{,
-                                 url\optional{, overrides}}}
-  Load and return a configuration from an opened file object.  If
-  \var{url} is omitted, one will be computed based on the
-  \member{name} attribute of \var{file}, if it exists.  If no URL can
-  be determined, all \keyword{\%include} statements in the
-  configuration must use absolute URLs.  \var{schema} is a reference
-  to a schema loaded by \function{loadSchema()} or
-  \function{loadSchemaFile()}.
-  The return value is a tuple containing the configuration object and
-  a composite handler that, when called with a name-to-handler
-  mapping, calls all the handlers for the configuration.
-  The \var{overrides} argument is the same as for the
-  \function{loadConfig()} function.
-\end{funcdesc}
-
-\begin{funcdesc}{loadSchema}{url}
-  Load a schema definition from the URL \var{url}.
-  \var{url} may be a URL, absolute pathname, or relative pathname.
-  Fragment identifiers are not supported.
-  The resulting
-  schema object can be passed to \function{loadConfig()} or
-  \function{loadConfigFile()}.  The schema object may be used as many
-  times as needed.
-\end{funcdesc}
-
-\begin{funcdesc}{loadSchemaFile}{file\optional{, url}}
-  Load a schema definition from the open file object \var{file}.  If
-  \var{url} is given and not \code{None}, it should be the URL of
-  resource represented by \var{file}.  If \var{url} is omitted or
-  \code{None}, a URL may be computed from the \member{name} attribute
-  of \var{file}, if present.  The resulting schema object can
-  be passed to \function{loadConfig()} or \function{loadConfigFile()}.
-  The schema object may be used as many times as needed.
-\end{funcdesc}
-
-The following exceptions are defined by this package:
-
-\begin{excdesc}{ConfigurationError}
-  Base class for exceptions specific to the \module{ZConfig} package.
-  All instances provide a \member{message} attribute that describes
-  the specific error, and a \member{url} attribute that gives the URL
-  of the resource the error was located in, or \constant{None}.
-\end{excdesc}
-
-\begin{excdesc}{ConfigurationSyntaxError}
-  Exception raised when a configuration source does not conform to the
-  allowed syntax.  In addition to the \member{message} and
-  \member{url} attributes, exceptions of this type offer the
-  \member{lineno} attribute, which provides the line number at which
-  the error was detected.
-\end{excdesc}
-
-\begin{excdesc}{DataConversionError}
-  Raised when a data type conversion fails with
-  \exception{ValueError}.  This exception is a subclass of both
-  \exception{ConfigurationError} and \exception{ValueError}.  The
-  \function{str()} of the exception provides the explanation from the
-  original \exception{ValueError}, and the line number and URL of the
-  value which provoked the error.  The following additional attributes
-  are provided:
-
-  \begin{tableii}{l|l}{member}{Attribute}{Value}
-    \lineii{colno}
-           {column number at which the value starts, or \code{None}}
-    \lineii{exception}
-           {the original \exception{ValueError} instance}
-    \lineii{lineno}
-           {line number on which the value starts}
-    \lineii{message}
-           {\function{str()} returned by the original \exception{ValueError}}
-    \lineii{value}
-           {original value passed to the conversion function}
-    \lineii{url}
-           {URL of the resource providing the value text}
-  \end{tableii}
-\end{excdesc}
-
-\begin{excdesc}{SchemaError}
-  Raised when a schema contains an error.  This exception type
-  provides the attributes \member{url}, \member{lineno}, and
-  \member{colno}, which provide the source URL, the line number, and
-  the column number at which the error was detected.  These attributes
-  may be \code{None} in some cases.
-\end{excdesc}
-
-\begin{excdesc}{SchemaResourceError}
-  Raised when there's an error locating a resource required by the
-  schema.  This is derived from \exception{SchemaError}.  Instances of
-  this exception class add the attributes \member{filename},
-  \member{package}, and \member{path}, which hold the filename
-  searched for within the package being loaded, the name of the
-  package, and the \code{__path__} attribute of the package itself (or
-  \constant{None} if it isn't a package or could not be imported).
-\end{excdesc}
-
-\begin{excdesc}{SubstitutionReplacementError}
-  Raised when the source text contains references to names which are
-  not defined in \var{mapping}.  The attributes \member{source} and
-  \member{name} provide the complete source text and the name
-  (converted to lower case) for which no replacement is defined.
-\end{excdesc}
-
-\begin{excdesc}{SubstitutionSyntaxError}
-  Raised when the source text contains syntactical errors.
-\end{excdesc}
-
-
-\subsection{Basic Usage}
-
-The simplest use of \refmodule{ZConfig} is to load a configuration
-based on a schema stored in a file.  This example loads a
-configuration file specified on the command line using a schema in the
-same directory as the script:
-
-\begin{verbatim}
-import os
-import sys
-import ZConfig
-
-try:
-    myfile = __file__
-except NameError:
-    myfile = os.path.realpath(sys.argv[0])
-
-mydir = os.path.dirname(myfile)
-
-schema = ZConfig.loadSchema(os.path.join(mydir, 'schema.xml'))
-conf, handler = ZConfig.loadConfig(schema, sys.argv[1])
-\end{verbatim}
-
-If the schema file contained this schema:
-
-\begin{verbatim}
-<schema>
-  <key name='server' required='yes'/>
-  <key name='attempts' datatype='integer' default='5'/>
-</schema>
-\end{verbatim}
-
-and the file specified on the command line contained this text:
-
-\begin{verbatim}
-# sample configuration
-
-server www.example.com
-\end{verbatim}
-
-then the configuration object \code{conf} loaded above would have two
-attributes:
-
-\begin{tableii}{l|l}{member}{Attribute}{Value}
-  \lineii{server}{\code{'www.example.com'}}
-  \lineii{attempts}{\code{5}}
-\end{tableii}
-
-
-\section{\module{ZConfig.datatypes} --- Default data type registry}
-
-\declaremodule{}{ZConfig.datatypes}
-\modulesynopsis{Default implementation of a data type registry}
-
-The \module{ZConfig.datatypes} module provides the implementation of
-the default data type registry and all the standard data types
-supported by \module{ZConfig}.  A number of convenience classes are
-also provided to assist in the creation of additional data types.
-
-A \dfn{datatype registry} is an object that provides conversion
-functions for data types.  The interface for a registry is fairly
-simple.
-
-A \dfn{conversion function} is any callable object that accepts a
-single argument and returns a suitable value, or raises an exception
-if the input value is not acceptable.  \exception{ValueError} is the
-preferred exception for disallowed inputs, but any other exception
-will be properly propagated.
-
-\begin{classdesc}{Registry}{\optional{stock}}
-  Implementation of a simple type registry.  If given, \var{stock}
-  should be a mapping which defines the ``built-in'' data types for
-  the registry; if omitted or \code{None}, the standard set of data
-  types is used (see section~\ref{standard-datatypes}, ``Standard
-  \module{ZConfig} Datatypes'').
-\end{classdesc}
-
-\class{Registry} objects have the following methods:
-
-\begin{methoddesc}{get}{name}
-  Return the type conversion routine for \var{name}.  If the
-  conversion function cannot be found, an (unspecified) exception is
-  raised.  If the name is not provided in the stock set of data types
-  by this registry and has not otherwise been registered, this method
-  uses the \method{search()} method to load the conversion function.
-  This is the only method the rest of \module{ZConfig} requires.
-\end{methoddesc}
-
-\begin{methoddesc}{register}{name, conversion}
-  Register the data type name \var{name} to use the conversion
-  function \var{conversion}.  If \var{name} is already registered or
-  provided as a stock data type, \exception{ValueError} is raised
-  (this includes the case when \var{name} was found using the
-  \method{search()} method).
-\end{methoddesc}
-
-\begin{methoddesc}{search}{name}
-  This is a helper method for the default implementation of the
-  \method{get()} method.  If \var{name} is a Python dotted-name, this
-  method loads the value for the name by dynamically importing the
-  containing module and extracting the value of the name.  The name
-  must refer to a usable conversion function.
-\end{methoddesc}
-
-
-The following classes are provided to define conversion functions:
-
-\begin{classdesc}{MemoizedConversion}{conversion}
-  Simple memoization for potentially expensive conversions.  This
-  conversion helper caches each successful conversion for re-use at a
-  later time; failed conversions are not cached in any way, since it
-  is difficult to raise a meaningful exception providing information
-  about the specific failure.
-\end{classdesc}
-
-\begin{classdesc}{RangeCheckedConversion}{conversion\optional{,
-                                          min\optional{, max}}}
-  Helper that performs range checks on the result of another
-  conversion.  Values passed to instances of this conversion are
-  converted using \var{conversion} and then range checked.  \var{min}
-  and \var{max}, if given and not \code{None}, are the inclusive
-  endpoints of the allowed range.  Values returned by \var{conversion}
-  which lay outside the range described by \var{min} and \var{max}
-  cause \exception{ValueError} to be raised.
-\end{classdesc}
-
-\begin{classdesc}{RegularExpressionConversion}{regex}
-  Conversion that checks that the input matches the regular expression
-  \var{regex}.  If it matches, returns the input, otherwise raises
-  \exception{ValueError}.
-\end{classdesc}
-
-
-\section{\module{ZConfig.loader} --- Resource loading support}
-
-\declaremodule{}{ZConfig.loader}
-\modulesynopsis{Support classes for resource loading}
-
-This module provides some helper classes used by the primary APIs
-exported by the \module{ZConfig} package.  These classes may be useful
-for some applications, especially applications that want to use a
-non-default data type registry.
-
-\begin{classdesc}{Resource}{file, url\optional{, fragment}}
-  Object that allows an open file object and a URL to be bound
-  together to ease handling.  Instances have the attributes
-  \member{file}, \member{url}, and \member{fragment} which store the
-  constructor arguments.  These objects also have a \method{close()}
-  method which will call \method{close()} on \var{file}, then set the
-  \member{file} attribute to \code{None} and the \member{closed} to
-  \constant{True}.
-\end{classdesc}
-
-\begin{classdesc}{BaseLoader}{}
-  Base class for loader objects.  This should not be instantiated
-  directly, as the \method{loadResource()} method must be overridden
-  for the instance to be used via the public API.
-\end{classdesc}
-
-\begin{classdesc}{ConfigLoader}{schema}
-  Loader for configuration files.  Each configuration file must
-  conform to the schema \var{schema}.  The \method{load*()} methods
-  return a tuple consisting of the configuration object and a
-  composite handler.
-\end{classdesc}
-
-\begin{classdesc}{SchemaLoader}{\optional{registry}}
-  Loader that loads schema instances.  All schema loaded by a
-  \class{SchemaLoader} will use the same data type registry.  If
-  \var{registry} is provided and not \code{None}, it will be used,
-  otherwise an instance of \class{ZConfig.datatypes.Registry} will be
-  used.
-\end{classdesc}
-
-
-\subsection{Loader Objects}
-
-Loader objects provide a general public interface, an interface which
-subclasses must implement, and some utility methods.
-
-The following methods provide the public interface:
-
-\begin{methoddesc}[loader]{loadURL}{url}
-  Open and load a resource specified by the URL \var{url}.
-  This method uses the \method{loadResource()} method to perform the
-  actual load, and returns whatever that method returns.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{loadFile}{file\optional{, url}}
-  Load from an open file object, \var{file}.  If given and not
-  \code{None}, \var{url} should be the URL of the resource represented
-  by \var{file}.  If omitted or \code{None}, the \member{name}
-  attribute of \var{file} is used to compute a \code{file:} URL, if
-  present.
-  This method uses the \method{loadResource()} method to perform the
-  actual load, and returns whatever that method returns.
-\end{methoddesc}
-
-The following method must be overridden by subclasses:
-
-\begin{methoddesc}[loader]{loadResource}{resource}
-  Subclasses of \class{BaseLoader} must implement this method to
-  actually load the resource and return the appropriate
-  application-level object.
-\end{methoddesc}
-
-The following methods can be used as utilities:
-
-\begin{methoddesc}[loader]{isPath}{s}
-  Return true if \var{s} should be considered a filesystem path rather
-  than a URL.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{normalizeURL}{url-or-path}
-  Return a URL for \var{url-or-path}.  If \var{url-or-path} refers to
-  an existing file, the corresponding \code{file:} URL is returned.
-  Otherwise \var{url-or-path} is checked for sanity: if it
-  does not have a schema, \exception{ValueError} is raised, and if it
-  does have a fragment identifier, \exception{ConfigurationError} is
-  raised.
-  This uses \method{isPath()} to determine whether \var{url-or-path}
-  is a URL of a filesystem path.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{openResource}{url}
-  Returns a resource object that represents the URL \var{url}.  The
-  URL is opened using the \function{urllib2.urlopen()} function, and
-  the returned resource object is created using
-  \method{createResource()}.  If the URL cannot be opened,
-  \exception{ConfigurationError} is raised.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{createResource}{file, url}
-  Returns a resource object for an open file and URL, given as
-  \var{file} and \var{url}, respectively.  This may be overridden by a
-  subclass if an alternate resource implementation is desired.
-\end{methoddesc}
-
-
-\section{\module{ZConfig.cmdline} --- Command-line override support}
-
-\declaremodule{}{ZConfig.cmdline}
-\modulesynopsis{Support for command-line overrides for configuration
-                settings.}
-
-This module exports an extended version of the \class{ConfigLoader}
-class from the \refmodule{ZConfig.loader} module.  This provides
-support for overriding specific settings from the configuration file
-from the command line, without requiring the application to provide
-specific options for everything the configuration file can include.
-
-\begin{classdesc}{ExtendedConfigLoader}{schema}
-  Construct a \class{ConfigLoader} subclass that adds support for
-  command-line overrides.
-\end{classdesc}
-
-The following additional method is provided, and is the only way to
-provide position information to associate with command-line
-parameters:
-
-\begin{methoddesc}{addOption}{spec\optional{, pos}}
-  Add a single value to the list of overridden values.  The \var{spec}
-  argument is a value specified, as described for the
-  \function{\refmodule{ZConfig}.loadConfig()} function.  A source
-  position for the specifier may be given as \var{pos}.  If \var{pos}
-  is specified and not \code{None}, it must be a sequence of three
-  values.  The first is the URL of the source (or some other
-  identifying string).  The second and third are the line number and
-  column of the setting.  These position information is only used to
-  construct a \exception{DataConversionError} when data conversion
-  fails.
-\end{methoddesc}
-
-
-\section{\module{ZConfig.substitution} --- String substitution}
-
-\declaremodule{}{ZConfig.substitution}
-\modulesynopsis{Shell-style string substitution helper.}
-
-This module provides a basic substitution facility similar to that
-found in the Bourne shell (\program{sh} on most \UNIX{} platforms).  
-
-The replacements supported by this module include:
-
-\begin{tableiii}{l|l|c}{code}{Source}{Replacement}{Notes}
-  \lineiii{\$\$}{\code{\$}}{(1)}
-  \lineiii{\$\var{name}}{The result of looking up \var{name}}{(2)}
-  \lineiii{\$\{\var{name}\}}{The result of looking up \var{name}}{}
-\end{tableiii}
-
-\noindent
-Notes:
-\begin{description}
-  \item[(1)]  This is different from the Bourne shell, which uses
-              \code{\textbackslash\$} to generate a \character{\$} in
-              the result text.  This difference avoids having as many
-              special characters in the syntax.
-
-  \item[(2)]  Any character which immediately follows \var{name} may
-              not be a valid character in a name.
-\end{description}
-
-In each case, \var{name} is a non-empty sequence of alphanumeric and
-underscore characters not starting with a digit.  If there is not a
-replacement for \var{name}, the exception
-\exception{SubstitutionReplacementError} is raised.
-Note that the lookup is expected to be case-insensitive; this module
-will always use a lower-case version of the name to perform the query.
-
-This module provides these functions:
-
-\begin{funcdesc}{substitute}{s, mapping}
-  Substitute values from \var{mapping} into \var{s}.  \var{mapping}
-  can be a \class{dict} or any type that supports the \method{get()}
-  method of the mapping protocol.  Replacement
-  values are copied into the result without further interpretation.
-  Raises \exception{SubstitutionSyntaxError} if there are malformed
-  constructs in \var{s}.
-\end{funcdesc}
-
-\begin{funcdesc}{isname}{s}
-  Returns \constant{True} if \var{s} is a valid name for a substitution
-  text, otherwise returns \constant{False}.
-\end{funcdesc}
-
-
-\subsection{Examples}
-
-\begin{verbatim}
->>> from ZConfig.substitution import substitute
->>> d = {'name': 'value',
-...      'top': '$middle',
-...      'middle' : 'bottom'}
->>>
->>> substitute('$name', d)
-'value'
->>> substitute('$top', d)
-'$middle'
-\end{verbatim}
-
-
-\appendix
-\section{Schema Document Type Definition \label{schema-dtd}}
-
-The following is the XML Document Type Definition for \module{ZConfig}
-schema:
-
-\verbatiminput{schema.dtd}
-
-\end{document}
diff --git a/branches/bug1734/doc/ZEO/README.txt b/branches/bug1734/doc/ZEO/README.txt
deleted file mode 100644
index a23de5f5..00000000
--- a/branches/bug1734/doc/ZEO/README.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-ZEO Documentation
-=================
-
-This directory contains ZEO documentation.
-
-howto.txt
-    The first place to look.
-
-    It provides a high-level overview of ZEO features and details on
-    how to install and configure clients and servers.  Includes a 
-    configuration reference.
-
-cache.txt
-    Explains how the client cache works.
-    
-trace.txt
-    Describe cache trace tool used to determine ideal cache size.
-
-ZopeREADME.txt
-    A somewhat dated description of how to integrate ZEO with Zope.
-    It provides a few hints that are not yet in howto.txt.
diff --git a/branches/bug1734/doc/ZEO/ZopeREADME.txt b/branches/bug1734/doc/ZEO/ZopeREADME.txt
deleted file mode 100644
index 4f2865c1..00000000
--- a/branches/bug1734/doc/ZEO/ZopeREADME.txt
+++ /dev/null
@@ -1,96 +0,0 @@
-Zope Enterprise Objects (ZEO)
-
-  Installation
-
-    ZEO 2.0 requires Zope 2.4 or higher and Python 2.1 or higher.
-    If you use Python 2.1, we recommend the latest minor release
-    (2.1.3 as of this writing) because it includes a few bug fixes
-    that affect ZEO.
-
-    Put the package (the ZEO directory, without any wrapping directory
-    included in a distribution) in your Zope lib/python.
-
-    The setup.py script in the top-level ZEO directory can also be
-    used.  Run "python setup.py install --home=ZOPE" where ZOPE is the
-    top-level Zope directory.
-
-    You can test ZEO before installing it with the test script::
-
-      python test.py -v
-
-    Run the script with the -h option for a full list of options.  The
-    ZEO 2.0b2 release contains 122 unit tests on Unix.
-
-  Starting (and configuring) the ZEO Server
-
-    To start the storage server, go to your Zope install directory and
-    run::
-
-      python lib/python/ZEO/start.py -p port_number
-
-    This run the storage sever under zdaemon.  zdaemon automatically
-    restarts programs that exit unexpectedly.
-
-    The server and the client don't have to be on the same machine.
-    If they are on the same machine, then you can use a Unix domain
-    socket::
-
-      python lib/python/ZEO/start.py -U filename
-
-    The start script provides a number of options not documented here.
-    See doc/start.txt for more information.
-        
-  Running Zope as a ZEO client
-
-    To get Zope to use the server, create a custom_zodb module,
-    custom_zodb.py, in your Zope install directory, so that Zope uses a
-    ClientStorage::
-
-      from ZEO.ClientStorage import ClientStorage
-      Storage = ClientStorage(('', port_number))
-
-    You can specify a host name (rather than '') if you want.  The port
-    number is, of course, the port number used to start the storage
-    server.
-
-    You can also give the name of a Unix domain socket file::
-
-      from ZEO.ClientStorage import ClientStorage
-      Storage = ClientStorage(filename)
-
-    There are a number of configuration options available for the
-    ClientStorage. See doc/ClientStorage.txt for details.
-
-    If you want a persistent client cache which retains cache contents
-    across ClientStorage restarts, you need to define the environment
-    variable, ZEO_CLIENT, or set the client keyword argument to the
-    constructor to a unique name for the client.  This is needed so
-    that unique cache name files can be computed.  Otherwise, the
-    client cache is stored in temporary files which are removed when
-    the ClientStorage shuts down.  For example, to start two Zope
-    processes with unique caches, use something like::
-
-      python z2.py -P8700 ZEO_CLIENT=8700
-      python z2.py -P8800 ZEO_CLIENT=8800
-
-  Zope product installation
-
-    Normally, Zope updates the Zope database during startup to reflect
-    product changes or new products found. It makes no sense for
-    multiple ZEO clients to do the same installation. Further, if
-    different clients have different software installed, the correct
-    state of the database is ambiguous.
-
-    Zope will not modify the Zope database during product installation
-    if the environment variable ZEO_CLIENT is set.
-
-    Normally, Zope ZEO clients should be run with ZEO_CLIENT set so
-    that product initialization is not performed.
-
-    If you do install new Zope products, then you need to take a
-    special step to cause the new products to be properly registered
-    in the database.  The easiest way to do this is to start Zope
-    once with the environment variable FORCE_PRODUCT_LOAD set.
-
-    The interaction between ZEO and Zope product installation is
-    unfortunate.
diff --git a/branches/bug1734/doc/ZEO/cache.txt b/branches/bug1734/doc/ZEO/cache.txt
deleted file mode 100644
index 9e8dc5a1..00000000
--- a/branches/bug1734/doc/ZEO/cache.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-ZEO Client Cache
-
-  The Client cache provides a disk based cache for each ZEO client.
-  The client cache allows reads to be done from local disk rather than
-  by remote access to the storage server.
-
-  The cache may be persistent or transient. If the cache is
-  persistent, then the cache files are retained for use after process
-  restarts. A non-persistent cache uses temporary files that are
-  removed when the client storage is closed.
-
-  The client cache is managed as two files. The cache manager
-  endeavors to maintain the two files at sizes less than or equal to
-  one half the cache size.  One of the cache files is designated the
-  "current" cache file. The other cache file is designated the "old"
-  cache file, if it exists.  All writes are done to the current cache
-  files.  When transactions are committed on the client, transactions
-  are not split between cache files. Large transactions may cause
-  cache files to be larger than one half the target cache size.
-
-  The life of the cache is as follows:
-
-  - When the cache is created, the first of the two cache files is
-    created and designated the "current" cache file.
-
-  - Cache records are written to the cache file, either as
-    transactions commit locally, or as data are loaded from the
-    server.
-
-  - When the cache file size exceeds one half the cache size, the
-    second cache file is created and designated the "current" cache
-    file.  The first cache file becomes the "old" cache file.
-
-  - Cache records are written to the new current cache file, either as
-    transactions commit locally, or as data are loaded from the
-    server.
-
-  - When a cache hit is found in the old cache file, it is copied to
-    the current cache file.
-
-  - When the current cache file size exceeds one half the cache size, the
-    first cache file is recreated and designated the "current" cache
-    file.  The second cache file becomes the "old" cache file.
-
-  and so on.
-
-  Persistent cache files are created in the directory named in the
-  'var' argument to the ClientStorage (see ClientStorage.txt) or in
-  the 'var' subdirectory of the directory given by the INSTANCE_HOME
-  builtin (created by Zope), or in the current working directory.
-  Persistent cache files have names of the form::
-
-    cstorage-client-n.zec
-
-  where:
-
-    storage -- the storage name
-
-    client -- the client name, as given by the 'ZEO_CLIENT' environment
-      variable or the 'client' argument provided when creating a client
-      storage.
-
-    n -- '0' for the first cache file and '1' for the second. 
-
-  For example, the second cache file for storage 'spam' and client 8881
-  would be named 'cspam-8881-1.zec'.
-
diff --git a/branches/bug1734/doc/ZEO/howto.txt b/branches/bug1734/doc/ZEO/howto.txt
deleted file mode 100644
index 46a85233..00000000
--- a/branches/bug1734/doc/ZEO/howto.txt
+++ /dev/null
@@ -1,433 +0,0 @@
-==========================
-Running a ZEO Server HOWTO
-==========================
-
-Introduction
-------------
-
-ZEO (Zope Enterprise Objects) is a client-server system for sharing a
-single storage among many clients.  Normally, a ZODB storage can only
-be used by a single process.  When you use ZEO, the storage is opened
-in the ZEO server process.  Client programs connect to this process
-using a ZEO ClientStorage.  ZEO provides a consistent view of the
-database to all clients.  The ZEO client and server communicate using
-a custom RPC protocol layered on top of TCP.
-
-There are several configuration options that affect the behavior of a
-ZEO server.  This section describes how a few of these features
-working.  Subsequent sections describe how to configure every option.
-
-Client cache
-~~~~~~~~~~~~
-
-Each ZEO client keeps an on-disk cache of recently used objects to
-avoid fetching those objects from the server each time they are
-requested.  It is usually faster to read the objects from disk than it
-is to fetch them over the network.  The cache can also provide
-read-only copies of objects during server outages.
-
-The cache may be persistent or transient. If the cache is persistent,
-then the cache files are retained for use after process restarts. A
-non-persistent cache uses temporary files that are removed when the
-client storage is closed.
-
-The client cache size is configured when the ClientStorage is created.
-The default size is 20MB, but the right size depends entirely on the
-particular database.  Setting the cache size too small can hurt
-performance, but in most cases making it too big just wastes disk
-space.  The document "Client cache tracing" describes how to collect a
-cache trace that can be used to determine a good cache size.
-
-ZEO uses invalidations for cache consistency.  Every time an object is
-modified, the server sends a message to each client informing it of
-the change.  The client will discard the object from its cache when it
-receives an invalidation.  These invalidations are often batched.
-
-Each time a client connects to a server, it must verify that its cache
-contents are still valid.  (It did not receive any invalidation
-messages while it was disconnected.)  There are several mechanisms
-used to perform cache verification.  In the worst case, the client
-sends the server a list of all objects in its cache along with their
-timestamps; the server sends back an invalidation message for each
-stale object.  The cost of verification is one drawback to making the
-cache too large.
-
-Note that every time a client crashes or disconnects, it must verify
-its cache.  Every time a server crashes, all of its clients must
-verify their caches.
-
-The cache verification process is optimized in two ways to eliminate
-costs when restarting clients and servers.  Each client keeps the
-timestamp of the last invalidation message it has seen.  When it
-connects to the server, it checks to see if any invalidation messages
-were sent after that timestamp.  If not, then the cache is up-to-date
-and no further verification occurs.  The other optimization is the
-invalidation queue, described below.
-
-Invalidation queue
-~~~~~~~~~~~~~~~~~~
-
-The ZEO server keeps a queue of recent invalidation messages in
-memory.  When a client connects to the server, it sends the timestamp
-of the most recent invalidation message it has received.  If that
-message is still in the invalidation queue, then the server sends the
-client all the missing invalidations.  This is often cheaper than
-perform full cache verification.
-
-The default size of the invalidation queue is 100.  If the
-invalidation queue is larger, it will be more likely that a client
-that reconnects will be able to verify its cache using the queue.  On
-the other hand, a large queue uses more memory on the server to store
-the message.  Invalidation messages tend to be small, perhaps a few
-hundred bytes each on average; it depends on the number of objects
-modified by a transaction.
-
-Transaction timeouts
-~~~~~~~~~~~~~~~~~~~~
-
-A ZEO server can be configured to timeout a transaction if it takes
-too long to complete.  Only a single transaction can commit at a time;
-so if one transaction takes too long, all other clients will be
-delayed waiting for it.  In the extreme, a client can hang during the
-commit process.  If the client hangs, the server will be unable to
-commit other transactions until it restarts.  A well-behaved client
-will not hang, but the server can be configured with a transaction
-timeout to guard against bugs that cause a client to hang.
-
-If any transaction exceeds the timeout threshold, the client's
-connection to the server will be closed and the transaction aborted.
-Once the transaction is aborted, the server can start processing other
-client's requests.  Most transactions should take very little time to
-commit.  The timer begins for a transaction after all the data has
-been sent to the server.  At this point, the cost of commit should be
-dominated by the cost of writing data to disk; it should be unusual
-for a commit to take longer than 1 second.  A transaction timeout of
-30 seconds should tolerate heavy load and slow communications between
-client and server, while guarding against hung servers.
-
-When a transaction times out, the client can be left in an awkward
-position.  If the timeout occurs during the second phase of the two
-phase commit, the client will log a panic message.  This should only
-cause problems if the client transaction involved multiple storages.
-If it did, it is possible that some storages committed the client
-changes and others did not.
-
-Monitor server
-~~~~~~~~~~~~~~
-
-The ZEO server updates several counters while it is running.  It can
-be configured to run a separate monitor server that reports the
-counter values and other statistics.  If a client connects to the
-socket, the server send a text report and close the socket
-immediately.  It does not read any data from the client.
-
-An example of a monitor server report is included below::
-
-    ZEO monitor server version 2.1a1
-    Fri Apr  4 16:57:42 2003
-    
-    Storage: 1
-    Server started: Fri Apr  4 16:57:37 2003
-    Clients: 0
-    Clients verifying: 0
-    Active transactions: 0
-    Commits: 0
-    Aborts: 0
-    Loads: 0
-    Stores: 0
-    Conflicts: 0
-    Conflicts resolved: 0
-
-Connection management
-~~~~~~~~~~~~~~~~~~~~~
-
-A ZEO client manages its connection to the ZEO server.  If it loses
-the connection, it starts a thread that attempts to reconnect.  While
-it is disconnected, it can satisfy some reads by using its cache.
-
-The client can be configured to wait a connection when it is created
-or to return immediately and provide data from its persistent cache.
-It usually simplifies programming to have the client wait for a
-connection on startup.
-
-When the client is disconnected, it polls periodically to see if the
-server is available.  The rate at which it polls is configurable.
-
-The client can be configured with multiple server addresses.  In this
-case, it assumes that each server has identical content and will use
-any server that is available.  It is possible to configure the client
-to accept a read-only connection to one of these servers if no
-read-write connection is available.  If it has a read-only connection,
-it will continue to poll for a read-write connection.  This feature
-supports the Zope Replication Services product,
-http://www.zope.com/Products/ZopeProducts/ZRS.  In general, it could
-be used to with a system that arranges to provide hot backups of
-servers in the case of failure.
-
-Authentication
-~~~~~~~~~~~~~~
-
-ZEO supports optional authentication of client and server using a
-password scheme similar to HTTP digest authentication (RFC 2069).  It
-is a simple challenge-response protocol that does not send passwords
-in the clear, but does not offer strong security.  The RFC discusses
-many of the limitations of this kind of protocol.  Note that this
-feature provides authentication only.  It does not provide encryption
-or confidentiality.
-
-The challenge-response also produces a session key that is used to
-generate message authentication codes for each ZEO message.  This
-should prevent session hijacking.
-
-Guard the password database as if it contained plaintext passwords.
-It stores the hash of a username and password.  This does not expose
-the plaintext password, but it is sensitive nonetheless.  An attacker
-with the hash can impersonate the real user.  This is a limitation of
-the simple digest scheme.
-
-The authentication framework allows third-party developers to provide
-new authentication modules.
-
-Installing software
--------------------
-
-ZEO is distributed as part of the ZODB3 package and with Zope,
-starting with Zope 2.7.  You can download it from:
-
-- http://www.zope.org/Products/ZODB3.2, or
-- http://www.zope.org/Products/Zope
-
-To use ZEO with Zope 2.6, download ZODB3.2 and install it into your
-Zope software home.  ZODB3 comes with a distutils setup.py script.
-You can use the --home option to setup.py install to the software in
-custom location.  For example, if Zope is installed in /home/zope,
-then this command will install the new ZEO and ZODB:
-
-    python setup.py install --home /home/zope
-
-The install command should create a /home/zope/lib/python/ZEO directoy.
-
-Configuring server
-------------------
-
-The script runzeo.py runs the ZEO server.  The server can be
-configured using command-line arguments or a config file.  This
-document describes only describes the config file.  Run runzeo.py
--h to see the list of command-line arguments.
-
-The runzeo.py script imports the ZEO package.  ZEO must either be
-installed in Python's site-packages directory or be in a directory on
-PYTHONPATH.  
-
-The configuration file specifies the underlying storage the server
-uses, the address it binds, and a few other optional parameters.
-An example is::
-
-    <zeo>
-    address zeo.example.com:8090
-    monitor-address zeo.example.com:8091
-    </zeo>
-
-    <filestorage 1>
-    path /var/tmp/Data.fs
-    </filestorage>
-
-    <eventlog>
-    <logfile>
-    path /var/tmp/zeo.log
-    format %(asctime)s %(message)s
-    </logfile>
-    </eventlog>
-
-This file configures a server to use a FileStorage from
-/var/tmp/Data.fs.  The server listens on port 8090 of zeo.example.com.
-It also starts a monitor server that lists in port 8091.  The ZEO
-server writes its log file to /var/tmp/zeo.log and uses a custom
-format for each line.  Assuming the example configuration it stored in
-zeo.config, you can run a server by typing::
-
-    python /usr/local/bin/runzeo.py -C zeo.config
-
-A configuration file consists of a <zeo> section and a storage
-section, where the storage section can use any of the valid ZODB
-storage types.  It may also contain an eventlog configuration.  See
-the document "Configuring a ZODB database" for more information about
-configuring storages and eventlogs.
-
-The zeo section must list the address.  All the other keys are
-optional.
-
-address
-        The address at which the server should listen.  This can be in
-        the form 'host:port' to signify a TCP/IP connection or a
-        pathname string to signify a Unix domain socket connection (at
-        least one '/' is required).  A hostname may be a DNS name or a
-        dotted IP address.  If the hostname is omitted, the platform's
-        default behavior is used when binding the listening socket (''
-        is passed to socket.bind() as the hostname portion of the
-        address).
-
-read-only
-        Flag indicating whether the server should operate in read-only
-        mode.  Defaults to false.  Note that even if the server is
-        operating in writable mode, individual storages may still be
-        read-only.  But if the server is in read-only mode, no write
-        operations are allowed, even if the storages are writable.  Note
-        that pack() is considered a read-only operation.
-
-invalidation-queue-size
-        The storage server keeps a queue of the objects modified by the
-        last N transactions, where N == invalidation_queue_size.  This
-        queue is used to speed client cache verification when a client
-        disconnects for a short period of time.
-
-monitor-address
-        The address at which the monitor server should listen.  If
-        specified, a monitor server is started.  The monitor server
-        provides server statistics in a simple text format.  This can
-        be in the form 'host:port' to signify a TCP/IP connection or a
-        pathname string to signify a Unix domain socket connection (at
-        least one '/' is required).  A hostname may be a DNS name or a
-        dotted IP address.  If the hostname is omitted, the platform's
-        default behavior is used when binding the listening socket (''
-        is passed to socket.bind() as the hostname portion of the
-        address).
-
-transaction-timeout
-        The maximum amount of time to wait for a transaction to commit
-        after acquiring the storage lock, specified in seconds.  If the
-        transaction takes too long, the client connection will be closed
-        and the transaction aborted.
-
-authentication-protocol
-        The name of the protocol used for authentication.  The
-        only protocol provided with ZEO is "digest," but extensions
-        may provide other protocols.
-
-authentication-database
-        The path of the database containing authentication credentials.
-
-authentication-realm
-        The authentication realm of the server.  Some authentication
-        schemes use a realm to identify the logic set of usernames
-        that are accepted by this server.
-
-Configuring client
-------------------
-
-The ZEO client can also be configured using ZConfig.  The ZODB.config
-module provides several function for opening a storage based on its
-configuration.
-
-- ZODB.config.storageFromString()
-- ZODB.config.storageFromFile()
-- ZODB.config.storageFromURL()
-
-The ZEO client configuration requires the server address be
-specified.  Everything else is optional.  An example configuration is::
-
-    <zeoclient>
-    server zeo.example.com:8090
-    </zeoclient>
-
-To use a ZEO client from Zope, write a configuration file and load it
-from custom_zodb.py::
-
-    from ZODB.config import storageFromURL
-    Storage = storageFromURL("/path/to/client.txt")
-
-The other configuration options are listed below.
-
-storage
-        The name of the storage that the client wants to use.  If the
-        ZEO server serves more than one storage, the client selects
-        the storage it wants to use by name.  The default name is '1',
-        which is also the default name for the ZEO server.
-
-cache-size
-        The maximum size of the client cache, in bytes.
-
-name
-        The storage name.  If unspecified, the address of the server
-        will be used as the name.
-
-client
-        Enables persistent cache files.  The string passed here is
-        used to construct the cache filenames.  If it is not
-        specified, the client creates a temporary cache that will
-        only be used by the current object.
-
-var
-        The directory where persistent cache files are stored.  By
-        default cache files, if they are persistent, are stored in 
-        the current directory.
-
-min-disconnect-poll
-        The minimum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 5 seconds.
-
-max-disconnect-poll
-        The maximum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 300 seconds.
-
-wait
-        A boolean indicating whether the constructor should wait
-        for the client to connect to the server and verify the cache
-        before returning.  The default is true.
-
-read-only
-        A flag indicating whether this should be a read-only storage,
-        defaulting to false (i.e. writing is allowed by default).
-
-read-only-fallback
-        A flag indicating whether a read-only remote storage should be
-        acceptable as a fallback when no writable storages are
-        available.  Defaults to false.  At most one of read_only and
-        read_only_fallback should be true.
-realm
-        The authentication realm of the server.  Some authentication
-        schemes use a realm to identify the logic set of usernames
-        that are accepted by this server.
-
-A ZEO client can also be created by calling the ClientStorage
-constructor explicitly.  For example::
-
-    from ZEO.ClientStorage import ClientStorage
-    storage = ClientStorage(("zeo.example.com", 8090))
-
-Running the ZEO server as a daemon
-----------------------------------
-
-In an operational setting, you will want to run the ZEO server a
-daemon process that is restarted when it dies.  The zdaemon package
-provides two tools for running daemons: zdrun.py and zdctl.py.  The
-document "Using zdctl and zdrun to manage server processes"
-(Doc/zdctl.txt) explains how to use these scripts to manage daemons.
-
-Rotating log files
-~~~~~~~~~~~~~~~~~~
-
-ZEO will re-initialize its logging subsystem when it receives a
-SIGUSR2 signal.  If you are using the standard event logger, you
-should first rename the log file and then send the signal to the
-server.  The server will continue writing to the renamed log file
-until it receives the signal.  After it receives the signal, the
-server will create a new file with the old name and write to it.
-
-Tools
------
-
-There are a few scripts that may help running a ZEO server.  The
-zeopack.py script connects to a server and packs the storage.  It can
-be run as a cron job.  The zeoup.py script attempts to connect to a
-ZEO server and verify that is is functioning.  The zeopasswd.py script
-manages a ZEO servers password database.
-
-Diagnosing problems
--------------------
-
-If an exception occurs on the server, the server will log a traceback
-and send an exception to the client.  The traceback on the client will
-show a ZEO protocol library as the source of the error.  If you need
-to diagnose the problem, you will have to look in the server log for
-the rest of the traceback.
diff --git a/branches/bug1734/doc/ZEO/trace.txt b/branches/bug1734/doc/ZEO/trace.txt
deleted file mode 100644
index 67d12a33..00000000
--- a/branches/bug1734/doc/ZEO/trace.txt
+++ /dev/null
@@ -1,126 +0,0 @@
-ZEO Client Cache Tracing
-========================
-
-An important question for ZEO users is: how large should the ZEO
-client cache be?  ZEO 2 (as of ZEO 2.0b2) has a new feature that lets
-you collect a trace of cache activity and tools to analyze this trace,
-enabling you to make an informed decision about the cache size.
-
-Don't confuse the ZEO client cache with the Zope object cache.  The
-ZEO client cache is only used when an object is not in the Zope object
-cache; the ZEO client cache avoids roundtrips to the ZEO server.
-
-Enabling Cache Tracing
-----------------------
-
-To enable cache tracing, set the environment variable ZEO_CACHE_TRACE
-to the name of a file to which the ZEO client process can write.  ZEO
-will append a hyphen and the storage name to the filename, to
-distinguish different storages.  If the file doesn't exist, the ZEO
-will try to create it.  If there are problems with the file, a log
-message is written to the standard Zope log file.  To start or stop
-tracing, the ZEO client process (typically a Zope application server)
-must be restarted.
-
-The trace file can grow pretty quickly; on a moderately loaded server,
-we observed it growing by 5 MB per hour.  The file consists of binary
-records, each 24 bytes long; a detailed description of the record
-lay-out is given in stats.py.  No sensitive data is logged.
-
-Analyzing a Cache Trace
------------------------
-
-The stats.py command-line tool is the first-line tool to analyze a
-cache trace.  Its default output consists of two parts: a one-line
-summary of essential statistics for each segment of 15 minutes,
-interspersed with lines indicating client restarts and "cache flip
-events" (more about those later), followed by a more detailed summary
-of overall statistics.
-
-The most important statistic is probably the "hit rate", a percentage
-indicating how many requests to load an object could be satisfied from
-the cache.  Hit rates around 70% are good.  90% is probably close to
-the theoretical maximum.  If you see a hit rate under 60% you can
-probably improve the cache performance (and hence your Zope
-application server's performance) by increasing the ZEO cache size.
-This is normally configured using the cache_size keyword argument to
-the ClientStorage() constructor in your custom_zodb.py file.  The
-default cache size is 20 MB.
-
-The stats.py tool shows its command line syntax when invoked without
-arguments.  The tracefile argument can be a gzipped file if it has a
-.gz extension.  It will read from stdin (assuming uncompressed data)
-if the tracefile argument is '-'.
-
-Simulating Different Cache Sizes
---------------------------------
-
-Based on a cache trace file, you can make a prediction of how well the
-cache might do with a different cache size.  The simul.py tool runs an
-accurate simulation of the ZEO client cache implementation based upon
-the events read from a trace file.  A new simulation is started each
-time the trace file records a client restart event; if a trace file
-contains more than one restart event, a separate line is printed for
-each simulation, and line with overall statistics is added at the end.
-
-Example, assuming the trace file is in /tmp/cachetrace.log::
-
-    $ python simul.py -s 100 /tmp/cachetrace.log
-      START TIME  DURATION    LOADS     HITS INVALS WRITES  FLIPS HITRATE
-    Sep  4 11:59     38:01    59833    40473    257     20      2  67.6%
-    $
-
-This shows that with a 100 MB cache size, the cache hit rate is
-67.6%.  So let's try this again with a 200 MB cache size::
-
-    $ python simul.py -s 200 /tmp/cachetrace.log
-      START TIME  DURATION    LOADS     HITS INVALS WRITES  FLIPS HITRATE
-    Sep  4 11:59     38:01    59833    40921    258     20      1  68.4%
-    $
-
-This showed hardly any improvement.  So let's try a 300 MB cache
-size::
-
-    $ python2.0 simul.py -s 300 /tmp/cachetrace.log
-    ZEOCacheSimulation, cache size 300,000,000 bytes
-      START TIME  DURATION    LOADS     HITS INVALS WRITES  FLIPS HITRATE
-    Sep  4 11:59     38:01    59833    40921    258     20      0  68.4%
-    $ 
-
-This shows that for this particular trace file, the maximum attainable
-hit rate is 68.4%.  This is probably caused by the fact that nearly a
-third of the objects mentioned in the trace were loaded only once --
-the cache only helps if an object is loaded more than once.
-
-The simul.py tool also supports simulating different cache
-strategies.  Since none of these are implemented, these are not
-further documented here.
-
-Cache Flips
------------
-
-The cache uses two files, which are managed as follows:
-
-  - Data are written to file 0 until file 0 exceeds limit/2 in size.
-
-  - Data are written to file 1 until file 1 exceeds limit/2 in size.
-
-  - File 0 is truncated to size 0 (or deleted and recreated).
-
-  - Data are written to file 0 until file 0 exceeds limit/2 in size.
-
-  - File 1 is truncated to size 0 (or deleted and recreated).
-
-  - Data are written to file 1 until file 1 exceeds limit/2 in size.
-
-and so on.
-
-A switch from file 0 to file 1 is called a "cache flip".  At all cache
-flips except the first, half of the cache contents is wiped out.  This
-affects cache performance.  How badly this impact is can be seen from
-the per-15-minutes summaries printed by stats.py.  The -i option lets
-you choose a smaller summary interval which shows the impact more
-acutely.
-
-The simul.py tool shows the number of cache flips in the FLIPS column.
-If you see more than one flip per hour the cache may be too small.
diff --git a/branches/bug1734/doc/guide/README b/branches/bug1734/doc/guide/README
deleted file mode 100644
index 314aee9a..00000000
--- a/branches/bug1734/doc/guide/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This directory contains Andrew Kuchling's programmer's guide to ZODB
-and ZEO.  It was originally taken from Andrew's zodb.sf.net project on
-SourceForge.  Because the original version is no longer updated, this
-version is best viewed as an independent fork now.
diff --git a/branches/bug1734/doc/guide/TODO b/branches/bug1734/doc/guide/TODO
deleted file mode 100644
index dda8704c..00000000
--- a/branches/bug1734/doc/guide/TODO
+++ /dev/null
@@ -1,4 +0,0 @@
-Write section on __setstate__
-Continue working on it
-Suppress the full GFDL text in the PDF/PS versions
-
diff --git a/branches/bug1734/doc/guide/admin.tex b/branches/bug1734/doc/guide/admin.tex
deleted file mode 100644
index ee389ed1..00000000
--- a/branches/bug1734/doc/guide/admin.tex
+++ /dev/null
@@ -1,6 +0,0 @@
-
-%  Administration
-%    Importing and exporting data
-%    Disaster recovery/avoidance
-%    Security
-
diff --git a/branches/bug1734/doc/guide/chatter.py b/branches/bug1734/doc/guide/chatter.py
deleted file mode 100644
index acc31501..00000000
--- a/branches/bug1734/doc/guide/chatter.py
+++ /dev/null
@@ -1,125 +0,0 @@
-
-import sys, time, os, random
-
-import transaction
-from persistent import Persistent
-
-from ZEO import ClientStorage
-import ZODB
-from ZODB.POSException import ConflictError
-from BTrees import OOBTree
-
-class ChatSession(Persistent):
-
-    """Class for a chat session.
-    Messages are stored in a B-tree, indexed by the time the message
-    was created.  (Eventually we'd want to throw messages out,
-
-    add_message(message) -- add a message to the channel
-    new_messages()       -- return new messages since the last call to
-                            this method
-
-
-    """
-
-    def __init__(self, name):
-        """Initialize new chat session.
-        name -- the channel's name
-        """
-
-        self.name = name
-
-        # Internal attribute: _messages holds all the chat messages.
-        self._messages = OOBTree.OOBTree()
-
-
-    def new_messages(self):
-        "Return new messages."
-
-        # self._v_last_time is the time of the most recent message
-        # returned to the user of this class.
-        if not hasattr(self, '_v_last_time'):
-            self._v_last_time = 0
-
-        new = []
-        T = self._v_last_time
-
-        for T2, message in self._messages.items():
-            if T2 > T:
-                new.append( message )
-                self._v_last_time = T2
-
-        return new
-
-    def add_message(self, message):
-        """Add a message to the channel.
-        message -- text of the message to be added
-        """
-
-        while 1:
-            try:
-                now = time.time()
-                self._messages[ now ] = message
-                transaction.commit()
-            except ConflictError:
-                # Conflict occurred; this process should pause and
-                # wait for a little bit, then try again.
-                time.sleep(.2)
-                pass
-            else:
-                # No ConflictError exception raised, so break
-                # out of the enclosing while loop.
-                break
-        # end while
-
-def get_chat_session(conn, channelname):
-    """Return the chat session for a given channel, creating the session
-    if required."""
-
-    # We'll keep a B-tree of sessions, mapping channel names to
-    # session objects.  The B-tree is stored at the ZODB's root under
-    # the key 'chat_sessions'.
-    root = conn.root()
-    if not root.has_key('chat_sessions'):
-        print 'Creating chat_sessions B-tree'
-        root['chat_sessions'] = OOBTree.OOBTree()
-        transaction.commit()
-
-    sessions = root['chat_sessions']
-
-    # Get a session object corresponding to the channel name, creating
-    # it if necessary.
-    if not sessions.has_key( channelname ):
-        print 'Creating new session:', channelname
-        sessions[ channelname ] = ChatSession(channelname)
-        transaction.commit()
-
-    session = sessions[ channelname ]
-    return session
-
-
-if __name__ == '__main__':
-    if len(sys.argv) != 2:
-        print 'Usage: %s <channelname>' % sys.argv[0]
-        sys.exit(0)
-
-    storage = ClientStorage.ClientStorage( ('localhost', 9672) )
-    db = ZODB.DB( storage )
-    conn = db.open()
-
-    s = session = get_chat_session(conn, sys.argv[1])
-
-    messages = ['Hi.', 'Hello', 'Me too', "I'M 3L33T!!!!"]
-
-    while 1:
-        # Send a random message
-        msg = random.choice(messages)
-        session.add_message( '%s: pid %i' % (msg,os.getpid() ))
-
-        # Display new messages
-        for msg in session.new_messages():
-            print msg
-
-        # Wait for a few seconds
-        pause = random.randint( 1, 4 )
-        time.sleep( pause )
diff --git a/branches/bug1734/doc/guide/gfdl.tex b/branches/bug1734/doc/guide/gfdl.tex
deleted file mode 100644
index a6774c2a..00000000
--- a/branches/bug1734/doc/guide/gfdl.tex
+++ /dev/null
@@ -1,367 +0,0 @@
-% gfdl.tex 
-% This file is a chapter.  It must be included in a larger document to work
-% properly.
-
-\section{GNU Free Documentation License}
-
-Version 1.1, March 2000\\
-
- Copyright $\copyright$ 2000  Free Software Foundation, Inc.\\
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\\
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-\subsection*{Preamble}
-
-The purpose of this License is to make a manual, textbook, or other
-written document ``free'' in the sense of freedom: to assure everyone
-the effective freedom to copy and redistribute it, with or without
-modifying it, either commercially or noncommercially.  Secondarily,
-this License preserves for the author and publisher a way to get
-credit for their work, while not being considered responsible for
-modifications made by others.
-
-This License is a kind of ``copyleft'', which means that derivative
-works of the document must themselves be free in the same sense.  It
-complements the GNU General Public License, which is a copyleft
-license designed for free software.
-
-We have designed this License in order to use it for manuals for free
-software, because free software needs free documentation: a free
-program should come with manuals providing the same freedoms that the
-software does.  But this License is not limited to software manuals;
-it can be used for any textual work, regardless of subject matter or
-whether it is published as a printed book.  We recommend this License
-principally for works whose purpose is instruction or reference.
-
-\subsection{Applicability and Definitions}
-
-This License applies to any manual or other work that contains a
-notice placed by the copyright holder saying it can be distributed
-under the terms of this License.  The ``Document'', below, refers to any
-such manual or work.  Any member of the public is a licensee, and is
-addressed as ``you''.
-
-A ``Modified Version'' of the Document means any work containing the
-Document or a portion of it, either copied verbatim, or with
-modifications and/or translated into another language.
-
-A ``Secondary Section'' is a named appendix or a front-matter section of
-the Document that deals exclusively with the relationship of the
-publishers or authors of the Document to the Document's overall subject
-(or to related matters) and contains nothing that could fall directly
-within that overall subject.  (For example, if the Document is in part a
-textbook of mathematics, a Secondary Section may not explain any
-mathematics.)  The relationship could be a matter of historical
-connection with the subject or with related matters, or of legal,
-commercial, philosophical, ethical or political position regarding
-them.
-
-The ``Invariant Sections'' are certain Secondary Sections whose titles
-are designated, as being those of Invariant Sections, in the notice
-that says that the Document is released under this License.
-
-The ``Cover Texts'' are certain short passages of text that are listed,
-as Front-Cover Texts or Back-Cover Texts, in the notice that says that
-the Document is released under this License.
-
-A ``Transparent'' copy of the Document means a machine-readable copy,
-represented in a format whose specification is available to the
-general public, whose contents can be viewed and edited directly and
-straightforwardly with generic text editors or (for images composed of
-pixels) generic paint programs or (for drawings) some widely available
-drawing editor, and that is suitable for input to text formatters or
-for automatic translation to a variety of formats suitable for input
-to text formatters.  A copy made in an otherwise Transparent file
-format whose markup has been designed to thwart or discourage
-subsequent modification by readers is not Transparent.  A copy that is
-not ``Transparent'' is called ``Opaque''.
-
-Examples of suitable formats for Transparent copies include plain
-ASCII without markup, Texinfo input format, \LaTeX~input format, SGML
-or XML using a publicly available DTD, and standard-conforming simple
-HTML designed for human modification.  Opaque formats include
-PostScript, PDF, proprietary formats that can be read and edited only
-by proprietary word processors, SGML or XML for which the DTD and/or
-processing tools are not generally available, and the
-machine-generated HTML produced by some word processors for output
-purposes only.
-
-The ``Title Page'' means, for a printed book, the title page itself,
-plus such following pages as are needed to hold, legibly, the material
-this License requires to appear in the title page.  For works in
-formats which do not have any title page as such, ``Title Page'' means
-the text near the most prominent appearance of the work's title,
-preceding the beginning of the body of the text.
-
-
-\subsection{Verbatim Copying}
-
-You may copy and distribute the Document in any medium, either
-commercially or noncommercially, provided that this License, the
-copyright notices, and the license notice saying this License applies
-to the Document are reproduced in all copies, and that you add no other
-conditions whatsoever to those of this License.  You may not use
-technical measures to obstruct or control the reading or further
-copying of the copies you make or distribute.  However, you may accept
-compensation in exchange for copies.  If you distribute a large enough
-number of copies you must also follow the conditions in section 3.
-
-You may also lend copies, under the same conditions stated above, and
-you may publicly display copies.
-
-
-\subsection{Copying in Quantity}
-
-If you publish printed copies of the Document numbering more than 100,
-and the Document's license notice requires Cover Texts, you must enclose
-the copies in covers that carry, clearly and legibly, all these Cover
-Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
-the back cover.  Both covers must also clearly and legibly identify
-you as the publisher of these copies.  The front cover must present
-the full title with all words of the title equally prominent and
-visible.  You may add other material on the covers in addition.
-Copying with changes limited to the covers, as long as they preserve
-the title of the Document and satisfy these conditions, can be treated
-as verbatim copying in other respects.
-
-If the required texts for either cover are too voluminous to fit
-legibly, you should put the first ones listed (as many as fit
-reasonably) on the actual cover, and continue the rest onto adjacent
-pages.
-
-If you publish or distribute Opaque copies of the Document numbering
-more than 100, you must either include a machine-readable Transparent
-copy along with each Opaque copy, or state in or with each Opaque copy
-a publicly-accessible computer-network location containing a complete
-Transparent copy of the Document, free of added material, which the
-general network-using public has access to download anonymously at no
-charge using public-standard network protocols.  If you use the latter
-option, you must take reasonably prudent steps, when you begin
-distribution of Opaque copies in quantity, to ensure that this
-Transparent copy will remain thus accessible at the stated location
-until at least one year after the last time you distribute an Opaque
-copy (directly or through your agents or retailers) of that edition to
-the public.
-
-It is requested, but not required, that you contact the authors of the
-Document well before redistributing any large number of copies, to give
-them a chance to provide you with an updated version of the Document.
-
-
-\subsection{Modifications}
-
-You may copy and distribute a Modified Version of the Document under
-the conditions of sections 2 and 3 above, provided that you release
-the Modified Version under precisely this License, with the Modified
-Version filling the role of the Document, thus licensing distribution
-and modification of the Modified Version to whoever possesses a copy
-of it.  In addition, you must do these things in the Modified Version:
-
-\begin{itemize}
-
-\item Use in the Title Page (and on the covers, if any) a title distinct
-   from that of the Document, and from those of previous versions
-   (which should, if there were any, be listed in the History section
-   of the Document).  You may use the same title as a previous version
-   if the original publisher of that version gives permission.
-\item List on the Title Page, as authors, one or more persons or entities
-   responsible for authorship of the modifications in the Modified
-   Version, together with at least five of the principal authors of the
-   Document (all of its principal authors, if it has less than five).
-\item State on the Title page the name of the publisher of the
-   Modified Version, as the publisher.
-\item Preserve all the copyright notices of the Document.
-\item Add an appropriate copyright notice for your modifications
-   adjacent to the other copyright notices.
-\item Include, immediately after the copyright notices, a license notice
-   giving the public permission to use the Modified Version under the
-   terms of this License, in the form shown in the Addendum below.
-\item Preserve in that license notice the full lists of Invariant Sections
-   and required Cover Texts given in the Document's license notice.
-\item Include an unaltered copy of this License.
-\item Preserve the section entitled ``History'', and its title, and add to
-   it an item stating at least the title, year, new authors, and
-   publisher of the Modified Version as given on the Title Page.  If
-   there is no section entitled ``History'' in the Document, create one
-   stating the title, year, authors, and publisher of the Document as
-   given on its Title Page, then add an item describing the Modified
-   Version as stated in the previous sentence.
-\item Preserve the network location, if any, given in the Document for
-   public access to a Transparent copy of the Document, and likewise
-   the network locations given in the Document for previous versions
-   it was based on.  These may be placed in the ``History'' section.
-   You may omit a network location for a work that was published at
-   least four years before the Document itself, or if the original
-   publisher of the version it refers to gives permission.
-\item In any section entitled ``Acknowledgements'' or ``Dedications'',
-   preserve the section's title, and preserve in the section all the
-   substance and tone of each of the contributor acknowledgements
-   and/or dedications given therein.
-\item Preserve all the Invariant Sections of the Document,
-   unaltered in their text and in their titles.  Section numbers
-   or the equivalent are not considered part of the section titles.
-\item Delete any section entitled ``Endorsements''.  Such a section
-   may not be included in the Modified Version.
-\item Do not retitle any existing section as ``Endorsements''
-   or to conflict in title with any Invariant Section.
-
-\end{itemize}
-
-If the Modified Version includes new front-matter sections or
-appendices that qualify as Secondary Sections and contain no material
-copied from the Document, you may at your option designate some or all
-of these sections as invariant.  To do this, add their titles to the
-list of Invariant Sections in the Modified Version's license notice.
-These titles must be distinct from any other section titles.
-
-You may add a section entitled ``Endorsements'', provided it contains
-nothing but endorsements of your Modified Version by various
-parties -- for example, statements of peer review or that the text has
-been approved by an organization as the authoritative definition of a
-standard.
-
-You may add a passage of up to five words as a Front-Cover Text, and a
-passage of up to 25 words as a Back-Cover Text, to the end of the list
-of Cover Texts in the Modified Version.  Only one passage of
-Front-Cover Text and one of Back-Cover Text may be added by (or
-through arrangements made by) any one entity.  If the Document already
-includes a cover text for the same cover, previously added by you or
-by arrangement made by the same entity you are acting on behalf of,
-you may not add another; but you may replace the old one, on explicit
-permission from the previous publisher that added the old one.
-
-The author(s) and publisher(s) of the Document do not by this License
-give permission to use their names for publicity for or to assert or
-imply endorsement of any Modified Version.
-
-
-\subsection{Combining Documents}
-
-You may combine the Document with other documents released under this
-License, under the terms defined in section 4 above for modified
-versions, provided that you include in the combination all of the
-Invariant Sections of all of the original documents, unmodified, and
-list them all as Invariant Sections of your combined work in its
-license notice.
-
-The combined work need only contain one copy of this License, and
-multiple identical Invariant Sections may be replaced with a single
-copy.  If there are multiple Invariant Sections with the same name but
-different contents, make the title of each such section unique by
-adding at the end of it, in parentheses, the name of the original
-author or publisher of that section if known, or else a unique number.
-Make the same adjustment to the section titles in the list of
-Invariant Sections in the license notice of the combined work.
-
-In the combination, you must combine any sections entitled ``History''
-in the various original documents, forming one section entitled
-``History''; likewise combine any sections entitled ``Acknowledgements'',
-and any sections entitled ``Dedications''.  You must delete all sections
-entitled ``Endorsements.''
-
-
-\subsection{Collections of Documents}
-
-You may make a collection consisting of the Document and other documents
-released under this License, and replace the individual copies of this
-License in the various documents with a single copy that is included in
-the collection, provided that you follow the rules of this License for
-verbatim copying of each of the documents in all other respects.
-
-You may extract a single document from such a collection, and distribute
-it individually under this License, provided you insert a copy of this
-License into the extracted document, and follow this License in all
-other respects regarding verbatim copying of that document.
-
-
-
-\subsection{Aggregation With Independent Works}
-
-A compilation of the Document or its derivatives with other separate
-and independent documents or works, in or on a volume of a storage or
-distribution medium, does not as a whole count as a Modified Version
-of the Document, provided no compilation copyright is claimed for the
-compilation.  Such a compilation is called an ``aggregate'', and this
-License does not apply to the other self-contained works thus compiled
-with the Document, on account of their being thus compiled, if they
-are not themselves derivative works of the Document.
-
-If the Cover Text requirement of section 3 is applicable to these
-copies of the Document, then if the Document is less than one quarter
-of the entire aggregate, the Document's Cover Texts may be placed on
-covers that surround only the Document within the aggregate.
-Otherwise they must appear on covers around the whole aggregate.
-
-
-\subsection{Translation}
-
-Translation is considered a kind of modification, so you may
-distribute translations of the Document under the terms of section 4.
-Replacing Invariant Sections with translations requires special
-permission from their copyright holders, but you may include
-translations of some or all Invariant Sections in addition to the
-original versions of these Invariant Sections.  You may include a
-translation of this License provided that you also include the
-original English version of this License.  In case of a disagreement
-between the translation and the original English version of this
-License, the original English version will prevail.
-
-
-\subsection{Termination}
-
-You may not copy, modify, sublicense, or distribute the Document except
-as expressly provided for under this License.  Any other attempt to
-copy, modify, sublicense or distribute the Document is void, and will
-automatically terminate your rights under this License.  However,
-parties who have received copies, or rights, from you under this
-License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-
-\subsection{Future Revisions of This Licence}
-
-The Free Software Foundation may publish new, revised versions
-of the GNU Free Documentation License from time to time.  Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns. See
-\url{http://www.gnu.org/copyleft/}.
-
-Each version of the License is given a distinguishing version number.
-If the Document specifies that a particular numbered version of this
-License "or any later version" applies to it, you have the option of
-following the terms and conditions either of that specified version or
-of any later version that has been published (not as a draft) by the
-Free Software Foundation.  If the Document does not specify a version
-number of this License, you may choose any version ever published (not
-as a draft) by the Free Software Foundation.
-
-\subsection*{ADDENDUM: How to use this License for your documents}
-
-To use this License in a document you have written, include a copy of
-the License in the document and put the following copyright and
-license notices just after the title page:
-
-\begin{quote}
-
-      Copyright $\copyright$  YEAR  YOUR NAME.
-      Permission is granted to copy, distribute and/or modify this document
-      under the terms of the GNU Free Documentation License, Version 1.1
-      or any later version published by the Free Software Foundation;
-      with the Invariant Sections being LIST THEIR TITLES, with the
-      Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
-      A copy of the license is included in the section entitled ``GNU
-      Free Documentation License''.
-
-\end{quote}
-
-If you have no Invariant Sections, write ``with no Invariant Sections''
-instead of saying which ones are invariant.  If you have no
-Front-Cover Texts, write ``no Front-Cover Texts'' instead of
-``Front-Cover Texts being LIST''; likewise for Back-Cover Texts.
-
-If your document contains nontrivial examples of program code, we
-recommend releasing these examples in parallel under your choice of
-free software license, such as the GNU General Public License,
-to permit their use in free software.
-
diff --git a/branches/bug1734/doc/guide/indexing.tex b/branches/bug1734/doc/guide/indexing.tex
deleted file mode 100644
index 0bd268f7..00000000
--- a/branches/bug1734/doc/guide/indexing.tex
+++ /dev/null
@@ -1,5 +0,0 @@
-
-% Indexing Data
-%    BTrees
-%    Full-text indexing
-
diff --git a/branches/bug1734/doc/guide/introduction.tex b/branches/bug1734/doc/guide/introduction.tex
deleted file mode 100644
index 08cd0987..00000000
--- a/branches/bug1734/doc/guide/introduction.tex
+++ /dev/null
@@ -1,197 +0,0 @@
-
-%Introduction
-%   What is ZODB?
-%   What is ZEO?
-%   OODBs vs. Relational DBs
-%   Other OODBs
-
-\section{Introduction}
-
-This guide explains how to write Python programs that use the Z Object
-Database (ZODB) and Zope Enterprise Objects (ZEO).  The latest version
-of the guide is always available at
-\url{http://www.zope.org/Wikis/ZODB/guide/index.html}.
-
-\subsection{What is the ZODB?}
-
-The ZODB is a persistence system for Python objects.  Persistent
-programming languages provide facilities that automatically write
-objects to disk and read them in again when they're required by a
-running program.  By installing the ZODB, you add such facilities to
-Python.
-
-It's certainly possible to build your own system for making Python
-objects persistent.  The usual starting points are the \module{pickle}
-module, for converting objects into a string representation, and
-various database modules, such as the \module{gdbm} or \module{bsddb}
-modules, that provide ways to write strings to disk and read them
-back.  It's straightforward to combine the \module{pickle} module and
-a database module to store and retrieve objects, and in fact the
-\module{shelve} module, included in Python's standard library, does
-this.
-
-The downside is that the programmer has to explicitly manage objects,
-reading an object when it's needed and writing it out to disk when the
-object is no longer required.  The ZODB manages objects for you,
-keeping them in a cache, writing them out to disk when they are
-modified, and dropping them from the cache if they haven't been used
-in a while.
-
-
-\subsection{OODBs vs. Relational DBs}
-
-Another way to look at it is that the ZODB is a Python-specific
-object-oriented database (OODB).  Commercial object databases for C++
-or Java often require that you jump through some hoops, such as using
-a special preprocessor or avoiding certain data types.  As we'll see,
-the ZODB has some hoops of its own to jump through, but in comparison
-the naturalness of the ZODB is astonishing.
-
-Relational databases (RDBs) are far more common than OODBs.
-Relational databases store information in tables; a table consists of
-any number of rows, each row containing several columns of
-information.  (Rows are more formally called relations, which is where
-the term ``relational database'' originates.)
-
-Let's look at a concrete example.  The example comes from my day job
-working for the MEMS Exchange, in a greatly simplified version.  The
-job is to track process runs, which are lists of manufacturing steps
-to be performed in a semiconductor fab.  A run is owned by a
-particular user, and has a name and assigned ID number.  Runs consist
-of a number of operations; an operation is a single step to be
-performed, such as depositing something on a wafer or etching
-something off it.
-
-Operations may have parameters, which are additional information
-required to perform an operation.  For example, if you're depositing
-something on a wafer, you need to know two things: 1) what you're
-depositing, and 2) how much should be deposited.  You might deposit
-100 microns of silicon oxide, or 1 micron of copper.
-
-Mapping these structures to a relational database is straightforward:
-
-\begin{verbatim}
-CREATE TABLE runs (
-  int      run_id,
-  varchar  owner,
-  varchar  title,
-  int      acct_num,
-  primary key(run_id)
-);
-
-CREATE TABLE operations (
-  int      run_id,
-  int      step_num, 
-  varchar  process_id,
-  PRIMARY KEY(run_id, step_num),
-  FOREIGN KEY(run_id) REFERENCES runs(run_id),
-);
-
-CREATE TABLE parameters (
-  int      run_id,
-  int      step_num, 
-  varchar  param_name, 
-  varchar  param_value,
-  PRIMARY KEY(run_id, step_num, param_name)
-  FOREIGN KEY(run_id, step_num) 
-     REFERENCES operations(run_id, step_num),
-);  
-\end{verbatim}
-
-In Python, you would write three classes named \class{Run},
-\class{Operation}, and \class{Parameter}.  I won't present code for
-defining these classes, since that code is uninteresting at this
-point. Each class would contain a single method to begin with, an
-\method{__init__} method that assigns default values, such as 0 or
-\code{None}, to each attribute of the class.
-
-It's not difficult to write Python code that will create a \class{Run}
-instance and populate it with the data from the relational tables;
-with a little more effort, you can build a straightforward tool,
-usually called an object-relational mapper, to do this automatically.
-(See
-\url{http://www.amk.ca/python/unmaintained/ordb.html} for a quick hack
-at a Python object-relational mapper, and
-\url{http://www.python.org/workshops/1997-10/proceedings/shprentz.html}
-for Joel Shprentz's more successful implementation of the same idea;
-Unlike mine, Shprentz's system has been used for actual work.)
-
-However, it is difficult to make an object-relational mapper
-reasonably quick; a simple-minded implementation like mine is quite
-slow because it has to do several queries to access all of an object's
-data.  Higher performance object-relational mappers cache objects to
-improve performance, only performing SQL queries when they actually
-need to.
-
-That helps if you want to access run number 123 all of a sudden.  But
-what if you want to find all runs where a step has a parameter named
-'thickness' with a value of 2.0?  In the relational version, you have
-two unappealing choices:
-
-\begin{enumerate}
- \item Write a specialized SQL query for this case: \code{SELECT run_id
-  FROM operations WHERE param_name = 'thickness' AND param_value = 2.0}
-  
-  If such queries are common, you can end up with lots of specialized
-  queries.  When the database tables get rearranged, all these queries
-  will need to be modified.
-
- \item An object-relational mapper doesn't help much.  Scanning
-  through the runs means that the the mapper will perform the required
-  SQL queries to read run \#1, and then a simple Python loop can check
-  whether any of its steps have the parameter you're looking for.
-  Repeat for run \#2, 3, and so forth.  This does a vast
-  number of SQL queries, and therefore is incredibly slow.
-
-\end{enumerate}
-
-An object database such as ZODB simply stores internal pointers from
-object to object, so reading in a single object is much faster than
-doing a bunch of SQL queries and assembling the results. Scanning all
-runs, therefore, is still inefficient, but not grossly inefficient.
-
-\subsection{What is ZEO?}
-
-The ZODB comes with a few different classes that implement the
-\class{Storage} interface.  Such classes handle the job of
-writing out Python objects to a physical storage medium, which can be
-a disk file (the \class{FileStorage} class), a BerkeleyDB file
-(\class{BDBFullStorage}), a relational database
-(\class{DCOracleStorage}), or some other medium.  ZEO adds
-\class{ClientStorage}, a new \class{Storage} that doesn't write to
-physical media but just forwards all requests across a network to a
-server.  The server, which is running an instance of the
-\class{StorageServer} class, simply acts as a front-end for some
-physical \class{Storage} class.  It's a fairly simple idea, but as
-we'll see later on in this document, it opens up many possibilities.
-
-\subsection{About this guide}
-
-The primary author of this guide works on a project which uses the
-ZODB and ZEO as its primary storage technology.  We use the ZODB to
-store process runs and operations, a catalog of available processes,
-user information, accounting information, and other data.  Part of the
-goal of writing this document is to make our experience more widely
-available.  A few times we've spent hours or even days trying to
-figure out a problem, and this guide is an attempt to gather up the
-knowledge we've gained so that others don't have to make the same
-mistakes we did while learning.
-
-The author's ZODB project is described in a paper available here,
-\url{http://www.amk.ca/python/writing/mx-architecture/}
-
-This document will always be a work in progress.  If you wish to
-suggest clarifications or additional topics, please send your comments to
-\email{zodb-dev@zope.org}.
-
-\subsection{Acknowledgements}
-
-Andrew Kuchling wrote the original version of this guide, which
-provided some of the first ZODB documentation for Python programmers.
-His initial version has been updated over time by Jeremy Hylton and
-Tim Peters.
-
-I'd like to thank the people who've pointed out inaccuracies and bugs,
-offered suggestions on the text, or proposed new topics that should be
-covered: Jeff Bauer, Willem Broekema, Thomas Guettler,
-Chris McDonough, George Runyan.
diff --git a/branches/bug1734/doc/guide/links.tex b/branches/bug1734/doc/guide/links.tex
deleted file mode 100644
index aa48c38b..00000000
--- a/branches/bug1734/doc/guide/links.tex
+++ /dev/null
@@ -1,21 +0,0 @@
-% links.tex
-% Collection of relevant links
-
-\section{Resources}
-
-Introduction to the Zope Object Database, by Jim Fulton:
-\\
-Goes into much greater detail, explaining advanced uses of the ZODB and 
-how it's actually implemented.  A definitive reference, and highly recommended.
-\\
-\url{http://www.python.org/workshops/2000-01/proceedings/papers/fulton/zodb3.html}
-
-Persistent Programing with ZODB, by Jeremy Hylton and Barry Warsaw:
-\\
-Slides for a tutorial presented at the 10th Python conference.  Covers
-much of the same ground as this guide, with more details in some areas
-and less in others.
-\\
-\url{http://www.zope.org/Members/bwarsaw/ipc10-slides}
-
-
diff --git a/branches/bug1734/doc/guide/modules.tex b/branches/bug1734/doc/guide/modules.tex
deleted file mode 100644
index cfdbc9a6..00000000
--- a/branches/bug1734/doc/guide/modules.tex
+++ /dev/null
@@ -1,471 +0,0 @@
-% Related Modules
-%    PersistentMapping
-%    PersistentList
-%    BTrees
-%       Total Ordering and Persistence
-%       Iteration and Mutation
-%       BTree Diagnostic Tools
-
-\section{Related Modules}
-
-The ZODB package includes a number of related modules that provide
-useful data types such as BTrees.
-
-\subsection{\module{persistent.mapping.PersistentMapping}}
-
-The \class{PersistentMapping} class is a wrapper for mapping objects
-that will set the dirty bit when the mapping is modified by setting or
-deleting a key.
-
-\begin{funcdesc}{PersistentMapping}{container = \{\}}
-Create a \class{PersistentMapping} object that wraps the
-mapping object \var{container}.  If you don't specify a
-value for \var{container}, a regular Python dictionary is used.
-\end{funcdesc}
-
-\class{PersistentMapping} objects support all the same methods as
-Python dictionaries do.
-
-\subsection{\module{persistent.list.PersistentList}}
-
-The \class{PersistentList} class is a wrapper for mutable sequence objects,
-much as \class{PersistentMapping} is a wrapper for mappings.
-
-\begin{funcdesc}{PersistentList}{initlist = []}
-Create a \class{PersistentList} object that wraps the
-mutable sequence object \var{initlist}.  If you don't specify a
-value for \var{initlist}, a regular Python list is used.
-\end{funcdesc}
-
-\class{PersistentList} objects support all the same methods as
-Python lists do.
-
-
-\subsection{BTrees Package}
-
-When programming with the ZODB, Python dictionaries aren't always what
-you need.  The most important case is where you want to store a very
-large mapping.  When a Python dictionary is accessed in a ZODB, the
-whole dictionary has to be unpickled and brought into memory.  If
-you're storing something very large, such as a 100,000-entry user
-database, unpickling such a large object will be slow.  BTrees are a
-balanced tree data structure that behave like a mapping but distribute
-keys throughout a number of tree nodes.  The nodes are stored in
-sorted order (this has important consequences -- see below).  Nodes are
-then only unpickled and brought into memory as they're accessed, so the
-entire tree doesn't have to occupy memory (unless you really are
-touching every single key).
-
-The BTrees package provides a large collection of related data
-structures.  There are variants of the data structures specialized to
-integers, which are faster and use less memory.  There
-are five modules that handle the different variants.  The first two
-letters of the module name specify the types of the keys and values in
-mappings -- O for any object, I for 32-bit signed integer, and (new in
-ZODB 3.4) F for 32-bit C float.  For example, the \module{BTrees.IOBTree}
-module provides a mapping with integer keys and arbitrary objects as values.
-
-The four data structures provide by each module are a BTree, a Bucket,
-a TreeSet, and a Set.  The BTree and Bucket types are mappings and
-support all the usual mapping methods, e.g. \function{update()} and
-\function{keys()}.  The TreeSet and Set types are similar to mappings
-but they have no values; they support the methods that make sense for
-a mapping with no keys, e.g. \function{keys()} but not
-\function{items()}.  The Bucket and Set types are the individual
-building blocks for BTrees and TreeSets, respectively.  A Bucket or
-Set can be used when you are sure that it will have few elements.  If
-the data structure will grow large, you should use a BTree or TreeSet.
-Like Python lists, Buckets and Sets are allocated in one
-contiguous piece, and insertions and deletions can take time
-proportional to the number of existing elements.  Also like Python lists,
-a Bucket or Set is a single object, and is pickled and unpickled in its
-entirety.  BTrees and TreeSets are multi-level tree structures with
-much better (logarithmic) worst-case time bounds, and the tree structure
-is built out of multiple objects, which ZODB can load individually
-as needed.
-
-The five modules are named \module{OOBTree}, \module{IOBTree},
-\module{OIBTree}, \module{IIBTree}, and (new in ZODB 3.4)
-\module{IFBTree}.  The two letter prefixes are repeated in the data types
-names.  The \module{BTrees.OOBTree} module defines the following types:
-\class{OOBTree}, \class{OOBucket}, \class{OOSet}, and \class{OOTreeSet}.
-Similarly, the other four modules each define their own variants of those
-four types.
-
-The \function{keys()}, \function{values()}, and \function{items()}
-methods on BTree and TreeSet types do not materialize a list with all
-of the data.  Instead, they return lazy sequences that fetch data
-from the BTree as needed.  They also support optional arguments to
-specify the minimum and maximum values to return, often called "range
-searching".  Because all these types are stored in sorted order, range
-searching is very efficient.
-
-The \function{keys()}, \function{values()}, and \function{items()}
-methods on Bucket and Set types do return lists with all the data.
-Starting in ZODB 3.3, there are also \function{iterkeys()},
-\function{itervalues()}, and \function{iteritems()} methods that
-return iterators (in the Python 2.2 sense).  Those methods also apply to
-BTree and TreeSet objects.
-
-A BTree object supports all the methods you would expect of a mapping,
-with a few extensions that exploit the fact that the keys are sorted.
-The example below demonstrates how some of the methods work.  The
-extra methods are \function{minKey()} and \function{maxKey()}, which
-find the minimum and maximum key value subject to an optional bound
-argument, and \function{byValue()}, which should probably be ignored
-(it's hard to explain exactly what it does, and as a result it's
-almost never used -- best to consider it deprecated).  The various
-methods for enumerating keys, values and items also accept minimum
-and maximum key arguments ("range search"), and (new in ZODB 3.3)
-optional Boolean arguments to control whether a range search is
-inclusive or exclusive of the range's endpoints.
-
-\begin{verbatim}
->>> from BTrees.OOBTree import OOBTree
->>> t = OOBTree()
->>> t.update({1: "red", 2: "green", 3: "blue", 4: "spades"})
->>> len(t)
-4
->>> t[2]
-'green'
->>> s = t.keys() # this is a "lazy" sequence object
->>> s
-<OOBTreeItems object at 0x0088AD20>
->>> len(s)  # it acts like a Python list
-4
->>> s[-2]
-3
->>> list(s) # materialize the full list
-[1, 2, 3, 4]
->>> list(t.values())
-['red', 'green', 'blue', 'spades']
->>> list(t.values(1, 2)) # values at keys in 1 to 2 inclusive
-['red', 'green']
->>> list(t.values(2))    # values at keys >= 2
-['green', 'blue', 'spades']
->>> list(t.values(min=1, max=4))  # keyword args new in ZODB 3.3
-['red', 'green', 'blue', 'spades']
->>> list(t.values(min=1, max=4, excludemin=True, excludemax=True))
-['green', 'blue']
->>> t.minKey()     # smallest key
-1
->>> t.minKey(1.5)  # smallest key >= 1.5
-2
->>> for k in t.keys():
-...     print k,
-1 2 3 4
->>> for k in t:    # new in ZODB 3.3
-...     print k,
-1 2 3 4
->>> for pair in t.iteritems():  # new in ZODB 3.3
-...     print pair,
-...
-(1, 'red') (2, 'green') (3, 'blue') (4, 'spades')
->>> t.has_key(4)  # returns a true value, but exactly what undefined
-2
->>> t.has_key(5)
-0
->>> 4 in t  # new in ZODB 3.3
-True
->>> 5 in t  # new in ZODB 3.3
-False
->>>
-
-\end{verbatim}
-
-% XXX I'm not sure all of the following is actually correct.  The
-% XXX set functions have complicated behavior.
-Each of the modules also defines some functions that operate on
-BTrees -- \function{difference()}, \function{union()}, and
-\function{intersection()}.  The \function{difference()} function returns
-a Bucket, while the other two methods return a Set.
-If the keys are integers, then the module also defines
-\function{multiunion()}.  If the values are integers or floats, then the
-module also defines \function{weightedIntersection()} and
-\function{weightedUnion()}.  The function doc strings describe each
-function briefly.
-
-\code{BTrees/Interfaces.py} defines the operations, and is the official
-documentation.  Note that the interfaces don't define the concrete types
-returned by most operations, and you shouldn't rely on the concrete types
-that happen to be returned:  stick to operations guaranteed by the
-interface.  In particular, note that the interfaces don't specify anything
-about comparison behavior, and so nothing about it is guaranteed.  In ZODB
-3.3, for example, two BTrees happen to use Python's default object
-comparison, which amounts to comparing the (arbitrary but fixed) memory
-addresses of the BTrees. This may or may not be true in future releases.
-If the interfaces don't specify a behavior, then whether that behavior
-appears to work, and exactly happens if it does appear to work, are
-undefined and should not be relied on.
-
-\subsubsection{Total Ordering and Persistence}
-
-The BTree-based data structures differ from Python dicts in several
-fundamental ways.  One of the most important is that while dicts
-require that keys support hash codes and equality comparison,
-the BTree-based structures don't use hash codes and require a total
-ordering on keys.
-
-Total ordering means three things:
-
-\begin{enumerate}
-\item  Reflexive.  For each \var{x}, \code{\var{x} == \var{x}} is true.
-
-\item  Trichotomy.  For each \var{x} and \var{y}, exactly one of
-       \code{\var{x} < \var{y}}, \code{\var{x} == \var{y}}, and
-       \code{\var{x} > \var{y}} is true.
-
-\item  Transitivity.  Whenever \code{\var{x} <= \var{y}} and
-       \code{\var{y} <= \var{z}}, it's also true that
-       \code{\var{x} <= \var{z}}.
-\end{enumerate}
-
-The default comparison functions for most objects that come with Python
-satisfy these rules, with some crucial cautions explained later.  Complex
-numbers are an example of an object whose default comparison function
-does not satisfy these rules:  complex numbers only support \code{==}
-and \code{!=} comparisons, and raise an exception if you try to compare
-them in any other way.  They don't satisfy the trichotomy rule, and must
-not be used as keys in BTree-based data structures (although note that
-complex numbers can be used as keys in Python dicts, which do not require
-a total ordering).
-
-Examples of objects that are wholly safe to use as keys in BTree-based
-structures include ints, longs, floats, 8-bit strings, Unicode strings,
-and tuples composed (possibly recursively) of objects of wholly safe
-types.
-
-It's important to realize that even if two types satisfy the
-rules on their own, mixing objects of those types may not.  For example,
-8-bit strings and Unicode strings both supply total orderings, but mixing
-the two loses trichotomy; e.g., \code{'x' < chr(255)} and
-\code{u'x' == 'x'}, but trying to compare \code{chr(255)} to
-\code{u'x'} raises an exception.  Partly for this reason (another is
-given later), it can be dangerous to use keys with multiple types in
-a single BTree-based structure.  Don't try to do that, and you don't
-have to worry about it.
-
-Another potential problem is mutability:  when a key is inserted in a
-BTree-based structure, it must retain the same order relative to the
-other keys over time.  This is easy to run afoul of if you use mutable
-objects as keys.  For example, lists supply a total ordering, and then
-
-\begin{verbatim}
->>> L1, L2, L3 = [1], [2], [3]
->>> from BTrees.OOBTree import OOSet
->>> s = OOSet((L2, L3, L1))  # this is fine, so far
->>> list(s.keys())           # note that the lists are in sorted order
-[[1], [2], [3]]
->>> s.has_key([3])           # and [3] is in the set
-1
->>> L2[0] = 5                # horrible -- the set is insane now
->>> s.has_key([3])           # for example, it's insane this way
-0
->>> s
-OOSet([[1], [5], [3]])
->>>
-\end{verbatim}
-
-Key lookup relies on that the keys remain in sorted order (an efficient
-form of binary search is used).  By mutating key L2 after inserting it,
-we destroyed the invariant that the OOSet is sorted.  As a result, all
-future operations on this set are unpredictable.
-
-A subtler variant of this problem arises due to persistence:  by default,
-Python does several kinds of comparison by comparing the memory
-addresses of two objects.  Because Python never moves an object in memory,
-this does supply a usable (albeit arbitrary) total ordering across the
-life of a program run (an object's memory address doesn't change).  But
-if objects compared in this way are used as keys of a BTree-based
-structure that's stored in a database, when the objects are loaded from
-the database again they will almost certainly wind up at different
-memory addresses.  There's no guarantee then that if key K1 had a memory
-address smaller than the memory address of key K2 at the time K1 and
-K2 were inserted in a BTree, K1's address will also be smaller than
-K2's when that BTree is loaded from a database later.  The result will
-be an insane BTree, where various operations do and don't work as
-expected, seemingly at random.
-
-Now each of the types identified above as "wholly safe to use" never
-compares two instances of that type by memory address, so there's
-nothing to worry about here if you use keys of those types.  The most
-common mistake is to use keys that are instances of a user-defined class
-that doesn't supply its own \method{__cmp__()} method.  Python compares
-such instances by memory address.  This is fine if such instances are
-used as keys in temporary BTree-based structures used only in a single
-program run.  It can be disastrous if that BTree-based structure is
-stored to a database, though.
-
-\begin{verbatim}
->>> class C:
-...     pass
-...
->>> a, b = C(), C()
->>> print a < b   # this may print 0 if you try it
-1
->>> del a, b
->>> a, b = C(), C()
->>> print a < b   # and this may print 0 or 1
-0
->>>
-\end{verbatim}
-
-That example illustrates that comparison of instances of classes that
-don't define \method{__cmp__()} yields arbitrary results (but consistent
-results within a single program run).
-
-Another problem occurs with instances of classes that do define
-\method{__cmp__()}, but define it incorrectly.  It's possible but
-rare for a custom \method{__cmp__()} implementation to violate one
-of the three required formal properties directly.  It's more common for
-it to "fall back" to address-based comparison by mistake.
-For example,
-
-\begin{verbatim}
-class Mine:
-    def __cmp__(self, other):
-        if other.__class__ is Mine:
-            return cmp(self.data, other.data)
-        else:
-            return cmp(self.data, other)
-\end{verbatim}
-
-It's quite possible there that the \keyword{else} clause allows
-a result to be computed based on memory address.  The bug won't show
-up until a BTree-based structure uses objects of class \class{Mine} as
-keys, and also objects of other types as keys, and the structure is
-loaded from a database, and a sequence of comparisons happens to execute
-the \keyword{else} clause in a case where the relative order of object
-memory addresses happened to change.
-
-This is as difficult to track down as it sounds, so best to stay far
-away from the possibility.
-
-You'll stay out of trouble by follwing these rules, violating them
-only with great care:
-
-\begin{enumerate}
-\item  Use objects of simple immutable types as keys in
-       BTree-based data structures.
-
-\item  Within a single BTree-based data structure, use objects of
-       a single type as keys.  Don't use multiple key types in a
-       single structure.
-
-\item  If you want to use class instances as keys, and there's
-       any possibility that the structure may be stored in a
-       database, it's crucial that the class define a
-       \method{__cmp__()} method, and that the method is
-       carefully implemented.
-
-       Any part of a comparison implementation that relies (explicitly
-       or implicitly) on an address-based comparison result will
-       eventually cause serious failure.
-
-\item  Do not use \class{Persistent} objects as keys, or objects of a
-       subclass of \class{Persistent}.
-\end{enumerate}
-
-That last item may be surprising.  It stems from details of how
-conflict resolution is implemented:  the states passed to conflict
-resolution do not materialize persistent subobjects (if a persistent
-object P is a key in a BTree, then P is a subobject of the bucket
-containing P).  Instead, if an object O references a persistent subobject
-P directly, and O is involved in a conflict, the states passed to
-conflict resolution contain an instance of an internal
-\class{PersistentReference} stub class everywhere O references P.
-Two \class{PersistentReference} instances compare equal if and only if
-they "represent" the same persistent object; when they're not equal,
-they compare by memory address, and, as explained before, memory-based
-comparison must never happen in a sane persistent BTree.  Note that it
-doesn't help in this case if your \class{Persistent} subclass defines
-a sane \method{__cmp__()} method:  conflict resolution doesn't know
-about your class, and so also doesn't know about its \method{__cmp__()}
-method.  It only sees instances of the internal \class{PersistentReference}
-stub class.
-
-
-\subsubsection{Iteration and Mutation}
-
-As with a Python dictionary or list, you should not mutate a BTree-based
-data structure while iterating over it, except that it's fine to replace
-the value associated with an existing key while iterating.  You won't
-create internal damage in the structure if you try to remove, or add new
-keys, while iterating, but the results are undefined and unpredictable.  A
-weak attempt is made to raise \exception{RuntimeError} if the size of a
-BTree-based structure changes while iterating, but it doesn't catch most
-such cases, and is also unreliable.  Example:
-
-\begin{verbatim}
-    >>> from BTrees.IIBTree import *
-    >>> s = IISet(range(10))
-    >>> list(s)
-    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-    >>> for i in s:  # the output is undefined
-    ...     print i,
-    ...     s.remove(i)
-    0 2 4 6 8
-    Traceback (most recent call last):
-      File "<stdin>", line 1, in ?
-    RuntimeError: the bucket being iterated changed size
-    >>> list(s)      # this output is also undefined
-    [1, 3, 5, 7, 9]
-    >>>
-\end{verbatim}
-
-Also as with Python dictionaries and lists, the safe and predictable way
-to mutate a BTree-based structure while iterating over it is to iterate
-over a copy of the keys.  Example:
-
-\begin{verbatim}
-    >>> from BTrees.IIBTree import *
-    >>> s = IISet(range(10))
-    >>> for i in list(s.keys()):  # this is well defined
-    ...     print i,
-    ...     s.remove(i)
-    0 1 2 3 4 5 6 7 8 9
-    >>> list(s)
-    []
-    >>>
-\end{verbatim}
-
-
-\subsubsection{BTree Diagnostic Tools}
-
-A BTree (or TreeSet) is a complex data structure, really a graph of
-variable-size nodes, connected in multiple ways via three distinct kinds
-of C pointers.  There are some tools available to help check internal
-consistency of a BTree as a whole.
-
-Most generally useful is the \module{BTrees.check} module.  The
-\function{check.check()} function examines a BTree (or Bucket, Set, or
-TreeSet) for value-based consistency, such as that the keys are in
-strictly increasing order.  See the function docstring for details.
-The \function{check.display()} function displays the internal structure
-of a BTree.
-
-BTrees and TreeSets also have a \method{_check()} method.  This verifies
-that the (possibly many) internal pointers in a BTree or TreeSet
-are mutually consistent, and raises \exception{AssertionError} if they're
-not.
-
-If a \function{check.check()} or \method{_check()} call fails,
-it may point to a bug in the implementation of BTrees or conflict
-resolution, or may point to database corruption.
-
-Repairing a damaged BTree is usually best done by making a copy of it.
-For example, if \var{self.data} is bound to a corrupted IOBTree,
-
-\begin{verbatim}
-    self.data = IOBTree(self.data)
-\end{verbatim}
-
-usually suffices.  If object identity needs to be preserved,
-
-\begin{verbatim}
-    acopy = IOBTree(self.data)
-    self.data.clear()
-    self.data.update(acopy)
-\end{verbatim}
-
-does the same, but leaves \var{self.data} bound to the same object.
diff --git a/branches/bug1734/doc/guide/prog-zodb.tex b/branches/bug1734/doc/guide/prog-zodb.tex
deleted file mode 100644
index 9e233edc..00000000
--- a/branches/bug1734/doc/guide/prog-zodb.tex
+++ /dev/null
@@ -1,484 +0,0 @@
-
-%ZODB Programming
-%   How ZODB works (ExtensionClass, dirty bits)
-%   Installing ZODB
-%   Rules for Writing Persistent Classes
-   
-
-\section{ZODB Programming}
-
-\subsection{Installing ZODB}
-
-ZODB is packaged using the standard distutils tools.
-
-\subsubsection{Requirements}
-
-You will need Python 2.3 or higher.  Since the code is packaged using
-distutils, it is simply a matter of untarring or unzipping the release
-package, and then running \code{python setup.py install}.
-
-You'll need a C compiler to build the packages, because there are
-various C extension modules.  Binary installers are provided for
-Windows users.
-
-\subsubsection{Installing the Packages}
-
-Download the ZODB tarball containing all the packages for both ZODB
-and ZEO from \url{http://www.zope.org/Products/ZODB3.3}.  See
-the \file{README.txt} file in the top level of the release directory
-for details on building, testing, and installing.
-
-You can find information about ZODB and the most current releases in
-the ZODB Wiki at \url{http://www.zope.org/Wikis/ZODB}.
-
-\subsection{How ZODB Works}
-
-The ZODB is conceptually simple.  Python classes subclass a 
-\class{persistent.Persistent} class to become ZODB-aware. 
-Instances of persistent objects are brought in from a permanent
-storage medium, such as a disk file, when the program needs them, and
-remain cached in RAM.  The ZODB traps modifications to objects, so
-that when a statement such as \code{obj.size = 1} is executed, the
-modified object is marked as ``dirty.''  On request, any dirty objects
-are written out to permanent storage; this is called committing a
-transaction.  Transactions can also be aborted or rolled back, which
-results in any changes being discarded, dirty objects reverting to
-their initial state before the transaction began.
-
-The term ``transaction'' has a specific technical meaning in computer
-science.  It's extremely important that the contents of a database
-don't get corrupted by software or hardware crashes, and most database
-software offers protection against such corruption by supporting four
-useful properties, Atomicity, Consistency, Isolation, and Durability.
-In computer science jargon these four terms are collectively dubbed
-the ACID properties, forming an acronym from their names.  
-
-The ZODB provides all of the ACID properties.  Definitions of the
-ACID properties are:
-
-\begin{itemize}
-
-\item[Atomicity] means that any changes to data made during a transaction 
-are all-or-nothing.  Either all the changes are applied, or none of
-them are.  If a program makes a bunch of modifications and then
-crashes, the database won't be partially modified, potentially leaving
-the data in an inconsistent state; instead all the changes will be
-forgotten.  That's bad, but it's better than having a
-partially-applied modification put the database into an inconsistent
-state.
-
-\item[Consistency] means that each transaction executes a valid
-transformation of the database state.  Some databases, but not ZODB,
-provide a variety of consistency checks in the database or language;
-for example, a relational database constraint columns to be of
-particular types and can enforce relations across tables.  Viewed more
-generally, atomicity and isolation make it possible for applications
-to provide consistency.
-
-\item[Isolation] means that two programs or threads running in two
- different transactions cannot see each other's changes until they
- commit their transactions.
-
-\item[Durability] means that once a transaction has been committed,
-a subsequent crash will not cause any data to be lost or corrupted.
-
-\end{itemize}
-
-\subsection{Opening a ZODB}
-
-There are 3 main interfaces supplied by the ZODB:
-\class{Storage}, \class{DB}, and \class{Connection} classes. The
-\class{DB} and \class{Connection} interfaces both have single
-implementations, but there are several different classes that
-implement the \class{Storage} interface.
-
-\begin{itemize}
- \item \class{Storage} classes are the lowest layer, and handle
- storing and retrieving objects from some form of long-term storage.
- A few different types of Storage have been written, such as
- \class{FileStorage}, which uses regular disk files, and
- \class{BDBFullStorage}, which uses Sleepycat Software's BerkeleyDB
- database.  You could write a new Storage that stored objects in a
- relational database, for example, if that would
- better suit your application.  Two example storages,
- \class{DemoStorage} and \class{MappingStorage}, are available to use
- as models if you want to write a new Storage.
-
- \item The \class{DB} class sits on top of a storage, and mediates the
- interaction between several connections.  One \class{DB} instance is
- created per process.
-
- \item Finally, the \class{Connection} class caches objects, and moves
- them into and out of object storage.  A multi-threaded program should
- open a separate \class{Connection} instance for each thread.
- Different threads can then modify objects and commit their
- modifications independently.
-
-\end{itemize}
-
-Preparing to use a ZODB requires 3 steps: you have to open the
-\class{Storage}, then create a \class{DB} instance that uses the
-\class{Storage}, and then get a \class{Connection} from the \class{DB
-instance}.  All this is only a few lines of code:
-
-\begin{verbatim}
-from ZODB import FileStorage, DB
-
-storage = FileStorage.FileStorage('/tmp/test-filestorage.fs')
-db = DB(storage)
-conn = db.open()
-\end{verbatim}
-
-Note that you can use a completely different data storage mechanism by
-changing the first line that opens a \class{Storage}; the above example uses a
-\class{FileStorage}.  In section~\ref{zeo}, ``How ZEO Works'',
-you'll see how ZEO uses this flexibility to good effect.
-
-\subsection{Using a ZODB Configuration File}
-
-ZODB also supports configuration files written in the ZConfig format.
-A configuration file can be used to separate the configuration logic
-from the application logic.  The storages classes and the \class{DB}
-class support a variety of keyword arguments; all these options can be
-specified in a config file.
-
-The configuration file is simple.  The example in the previous section
-could use the following example:
-
-\begin{verbatim}
-<zodb>
-  <filestorage>
-  path /tmp/test-filestorage.fs
-  </filestorage>
-</zodb>
-\end{verbatim}
-
-The \module{ZODB.config} module includes several functions for opening
-database and storages from configuration files.
-
-\begin{verbatim}
-import ZODB.config
-
-db = ZODB.config.databaseFromURL('/tmp/test.conf')
-conn = db.open()
-\end{verbatim}
-
-The ZConfig documentation, included in the ZODB3 release, explains
-the format in detail.  Each configuration file is described by a
-schema, by convention stored in a \file{component.xml} file.  ZODB,
-ZEO, zLOG, and zdaemon all have schemas.
-
-\subsection{Writing a Persistent Class}
-
-Making a Python class persistent is quite simple; it simply needs to
-subclass from the \class{Persistent} class, as shown in this example:
-
-\begin{verbatim}
-from persistent import Persistent
-
-class User(Persistent):
-    pass
-\end{verbatim}
-
-The \class{Persistent} base class is a new-style class implemented in
-C.
-
-For simplicity, in the examples the \class{User} class will
-simply be used as a holder for a bunch of attributes.  Normally the
-class would define various methods that add functionality, but that
-has no impact on the ZODB's treatment of the class.
-
-The ZODB uses persistence by reachability; starting from a set of root
-objects, all the attributes of those objects are made persistent,
-whether they're simple Python data types or class instances.  There's
-no method to explicitly store objects in a ZODB database; simply
-assign them as an attribute of an object, or store them in a mapping,
-that's already in the database.  This chain of containment must
-eventually reach back to the root object of the database.
-
-As an example, we'll create a simple database of users that allows
-retrieving a \class{User} object given the user's ID.  First, we
-retrieve the primary root object of the ZODB using the \method{root()}
-method of the \class{Connection} instance.  The root object behaves
-like a Python dictionary, so you can just add a new key/value pair for
-your application's root object.  We'll insert an \class{OOBTree} object
-that will contain all the \class{User} objects.  (The
-\class{BTree} module is also included as part of Zope.)
-
-\begin{verbatim}
-dbroot = conn.root()
-
-# Ensure that a 'userdb' key is present 
-# in the root
-if not dbroot.has_key('userdb'):
-    from BTrees.OOBTree import OOBTree
-    dbroot['userdb'] = OOBTree()
-
-userdb = dbroot['userdb']
-\end{verbatim}
-
-Inserting a new user is simple: create the \class{User} object, fill
-it with data, insert it into the \class{BTree} instance, and commit
-this transaction.
-
-\begin{verbatim}# Create new User instance
-import transaction
-
-newuser = User() 
-
-# Add whatever attributes you want to track
-newuser.id = 'amk' 
-newuser.first_name = 'Andrew' ; newuser.last_name = 'Kuchling'
-...
-
-# Add object to the BTree, keyed on the ID
-userdb[newuser.id] = newuser
-
-# Commit the change
-transaction.commit()
-\end{verbatim}
-
-The \module{transaction} module defines a few top-level functions for
-working with transactions.  \function{commit()} writes any modified
-objects to disk, making the changes permanent.  \function{abort()} rolls
-back any changes that have been made, restoring the original state of
-the objects.  If you're familiar with database transactional
-semantics, this is all what you'd expect.  \function{get()} returns a
-\class{Transaction} object that has additional methods like
-\method{note()}, to add a note to the transaction metadata.
-
-More precisely, the \module{transaction} module exposes an instance of
-the \class{ThreadTransactionManager} transaction manager class as
-\code{transaction.manager}, and the \module{transaction} functions
-\function{get()} and \function{begin()} redirect to the same-named
-methods of \code{transaction.manager}.  The \function{commit()} and
-\function{abort()} functions apply the methods of the same names to
-the \class{Transaction} object returned by \code{transaction.manager.get()}.
-This is for convenience.  It's also possible to create your own transaction
-manager instances, and to tell \code{DB.open()} to use your transaction
-manager instead.
-
-Because the integration with Python is so complete, it's a lot like
-having transactional semantics for your program's variables, and you
-can experiment with transactions at the Python interpreter's prompt:
-
-\begin{verbatim}>>> newuser
-<User instance at 81b1f40>
->>> newuser.first_name           # Print initial value
-'Andrew'         
->>> newuser.first_name = 'Bob'   # Change first name
->>> newuser.first_name           # Verify the change
-'Bob'
->>> transaction.abort()          # Abort transaction
->>> newuser.first_name           # The value has changed back
-'Andrew'
-\end{verbatim}
-
-\subsection{Rules for Writing Persistent Classes}
-
-Practically all persistent languages impose some restrictions on
-programming style, warning against constructs they can't handle or
-adding subtle semantic changes, and the ZODB is no exception.
-Happily, the ZODB's restrictions are fairly simple to understand, and
-in practice it isn't too painful to work around them.
-
-The summary of rules is as follows:
-
-\begin{itemize}
-
-\item If you modify a mutable object that's the value of an object's
-attribute, the ZODB can't catch that, and won't mark the object as
-dirty.  The solution is to either set the dirty bit yourself when you
-modify mutable objects, or use a wrapper for Python's lists and
-dictionaries (\class{PersistentList},
-\class{PersistentMapping})
-that will set the dirty bit properly.
-
-\item Recent versions of the ZODB allow writing a class with 
-\method{__setattr__} , \method{__getattr__}, or \method{__delattr__}
-methods.  (Older versions didn't support this at all.)  If you write
-such a \method{__setattr__} or \method{__delattr__} method, its code
-has to set the dirty bit manually.
-
-\item A persistent class should not have a \method{__del__} method.
-The database moves objects freely between memory and storage.  If an
-object has not been used in a while, it may be released and its
-contents loaded from storage the next time it is used.  Since the
-Python interpreter is unaware of persistence, it would call
-\method{__del__} each time the object was freed.
-
-\end{itemize}
-
-Let's look at each of these rules in detail.
-
-\subsubsection{Modifying Mutable Objects}
-
-The ZODB uses various Python hooks to catch attribute accesses, and
-can trap most of the ways of modifying an object, but not all of them.
-If you modify a \class{User} object by assigning to one of its
-attributes, as in \code{userobj.first_name = 'Andrew'}, the ZODB will
-mark the object as having been changed, and it'll be written out on
-the following \method{commit()}.
-
-The most common idiom that \emph{isn't} caught by the ZODB is
-mutating a list or dictionary.  If \class{User} objects have a
-attribute named \code{friends} containing a list, calling
-\code{userobj.friends.append(otherUser)} doesn't mark
-\code{userobj} as modified; from the ZODB's point of
-view, \code{userobj.friends} was only read, and its value, which
-happened to be an ordinary Python list, was returned.  The ZODB isn't
-aware that the object returned was subsequently modified.
-
-This is one of the few quirks you'll have to remember when using the
-ZODB; if you modify a mutable attribute of an object in place, you
-have to manually mark the object as having been modified by setting
-its dirty bit to true.  This is done by setting the
-\member{_p_changed} attribute of the object to true:
-
-\begin{verbatim}
-userobj.friends.append(otherUser)
-userobj._p_changed = True
-\end{verbatim}
-
-You can hide the implementation detail of having to mark objects as
-dirty by designing your class's API to not use direct attribute
-access; instead, you can use the Java-style approach of accessor
-methods for everything, and then set the dirty bit within the accessor
-method.  For example, you might forbid accessing the \code{friends}
-attribute directly, and add a \method{get_friend_list()} accessor and
-an \method{add_friend()} modifier method to the class.  \method{add_friend()}
-would then look like this:
-
-\begin{verbatim}
-    def add_friend(self, friend):
-        self.friends.append(otherUser)
-        self._p_changed = True
-\end{verbatim}
-
-Alternatively, you could use a ZODB-aware list or mapping type that
-handles the dirty bit for you.  The ZODB comes with a
-\class{PersistentMapping} class, and I've contributed a
-\class{PersistentList} class that's included in my ZODB distribution, 
-and may make it into a future upstream release of Zope.
-
-% XXX It'd be nice to discuss what happens when an object is ``ghosted'' (e.g.
-% you set an object's _p_changed = None).  The __p_deactivate__ method should
-% not be used (it's also obsolete). 
-
-\subsubsection{\method{__getattr__}, \method{__delattr__}, and \method{__setattr__}}
-
-ZODB allows persistent classes to have hook methods like
-\method{__getattr__} and \method{__setattr__}.  There are four special
-methods that control attribute access; the rules for each are a little
-different.
-
-The \method{__getattr__} method works pretty much the same for
-persistent classes as it does for other classes.  No special handling
-is needed.  If an object is a ghost, then it will be activated before
-\method{__getattr__} is called.
-
-The other methods are more delicate.  They will override the hooks
-provided by \class{Persistent}, so user code must call special methods
-to invoke those hooks anyway.
-
-The \method{__getattribute__} method will be called for all attribute
-access; it overrides the attribute access support inherited from
-\class{Persistent}.  A user-defined
-\method{__getattribute__} must always give the \class{Persistent} base
-class a chance to handle special attribute, as well as
-\member{__dict__} or \member{__class__}.  The user code should
-call \method{_p_getattr}, passing the name of the attribute as the
-only argument.  If it returns True, the user code should call
-\class{Persistent}'s \method{__getattribute__} to get the value.  If
-not, the custom user code can run.
-
-A \method{__setattr__} hook will also override the \class{Persistent}
-\method{__setattr__} hook.  User code must treat it much like 
-\method{__getattribute__}.  The user-defined code must call
-\method{_p_setattr} first to all \class{Persistent} to handle special
-attributes; \method{_p_setattr} takes the attribute name and value.
-If it returns True, \class{Persistent} handled the attribute.  If not,
-the user code can run.  If the user code modifies the object's state,
-it must assigned to \member{_p_changed}.
-
-A \method{__delattr__} hooks must be implemented the same was as a the
-last two hooks.  The user code must call \method{_p_delattr}, passing
-the name of the attribute as an argument.  If the call returns True,
-\class{Persistent} handled the attribute; if not, the user code can
-run.
-
-\subsubsection{\method{__del__} methods}
-
-A \method{__del__} method is invoked just before the memory occupied by an
-unreferenced Python object is freed.  Because ZODB may materialize, and
-dematerialize, a given persistent object in memory any number of times,
-there isn't a meaningful relationship between when a persistent object's
-\method{__del__} method gets invoked and any natural aspect of a
-persistent object's life cycle.  For example, it is emphatically not the
-case that a persistent object's \method{__del__} method gets invoked only
-when the object is no longer referenced by other objects in the database.
-\method{__del__} is only concerned with reachability from objects in
-memory.
-
-Worse, a \method{__del__} method can interfere with the persistence
-machinery's goals.  For example, some number of persistent objects reside
-in a \class{Connection}'s memory cache.  At various times, to reduce memory
-burden, objects that haven't been referenced recently are removed from the
-cache.  If a persistent object with a \method{__del___} method is so
-removed, and the cache was holding the last memory reference to the object,
-the object's \method{__del__} method will be invoked.  If the
-\method{__del__} method then references any attribute of the object, ZODB
-needs to load the object from the database again, in order to satisfy the
-attribute reference.  This puts the object back into the cache again:  such
-an object is effectively immortal, occupying space in the memory cache
-forever, as every attempt to remove it from cache puts it back into the
-cache.  In ZODB versions prior to 3.2.2, this could even cause the cache
-reduction code to fall into an infinite loop.  The infinite loop no longer
-occurs, but such objects continue to live in the memory cache forever.
-
-Because \method{__del__} methods don't make good sense for persistent
-objects, and can create problems, persistent classes should not define
-\method{__del__} methods.
-
-\subsection{Writing Persistent Classes}
-
-Now that we've looked at the basics of programming using the ZODB,
-we'll turn to some more subtle tasks that are likely to come up for
-anyone using the ZODB in a production system.
-
-\subsubsection{Changing Instance Attributes}
-
-Ideally, before making a class persistent you would get its interface
-right the first time, so that no attributes would ever need to be
-added, removed, or have their interpretation change over time.  It's a
-worthy goal, but also an impractical one unless you're gifted with
-perfect knowledge of the future.  Such unnatural foresight can't be
-required of any person, so you therefore have to be prepared to handle
-such structural changes gracefully.  In object-oriented database
-terminology, this is a schema update.  The ZODB doesn't have an actual
-schema specification, but you're changing the software's expectations
-of the data contained by an object, so you're implicitly changing the
-schema.
-
-One way to handle such a change is to write a one-time conversion
-program that will loop over every single object in the database and
-update them to match the new schema.  This can be easy if your network
-of object references is quite structured, making it easy to find all
-the instances of the class being modified.  For example, if all
-\class{User} objects can be found inside a single dictionary or
-BTree, then it would be a simple matter to loop over every
-\class{User} instance with a \keyword{for} statement.
-This is more difficult if your object graph is less structured; if
-\class{User} objects can be found as attributes of any number of
-different class instances, then there's no longer any easy way to find
-them all, short of writing a generalized object traversal function
-that would walk over every single object in a ZODB, checking each one
-to see if it's an instance of \class{User}.  
-
-Some OODBs support a feature called extents, which allow quickly
-finding all the instances of a given class, no matter where they are
-in the object graph; unfortunately the ZODB doesn't offer extents as a
-feature.
-
-% XXX Rest of section not written yet: __getstate__/__setstate__
-
diff --git a/branches/bug1734/doc/guide/storages.tex b/branches/bug1734/doc/guide/storages.tex
deleted file mode 100644
index 24cebfc7..00000000
--- a/branches/bug1734/doc/guide/storages.tex
+++ /dev/null
@@ -1,22 +0,0 @@
-
-% Storages
-%    FileStorage
-%    BerkeleyStorage
-%    OracleStorage
-
-\section{Storages}
-
-This chapter will examine the different \class{Storage} subclasses
-that are considered stable, discuss their varying characteristics, and
-explain how to administer them.
-
-\subsection{Using Multiple Storages}
-
-XXX explain mounting substorages
-
-\subsection{FileStorage}
-
-\subsection{BDBFullStorage}
-
-\subsection{OracleStorage}
-
diff --git a/branches/bug1734/doc/guide/transactions.tex b/branches/bug1734/doc/guide/transactions.tex
deleted file mode 100644
index cbcf367e..00000000
--- a/branches/bug1734/doc/guide/transactions.tex
+++ /dev/null
@@ -1,202 +0,0 @@
-%Transactions and Versioning
-%   Committing and Aborting
-%   Subtransactions
-%   Undoing
-%   Versions
-%   Multithreaded ZODB Programs
-
-
-\section{Transactions and Versioning}
-
-\subsection{Committing and Aborting}
-
-Changes made during a transaction don't appear in the database until
-the transaction commits.  This is done by calling the \method{commit()}
-method of the current \class{Transaction} object, where the latter is
-obtained from the \method{get()} method of the current transaction
-manager.  If the default thread transaction manager is being used, then
-\code{transaction.commit()} suffices.
-
-Similarly, a transaction can be explicitly aborted (all changes within
-the transaction thrown away) by invoking the \method{abort()} method
-of the current \class{Transaction} object, or simply
-\code{transaction.abort()} if using the default thread transaction manager.
-
-Prior to ZODB 3.3, if a commit failed (meaning the \code{commit()} call
-raised an exception), the transaction was implicitly aborted and a new
-transaction was implicitly started.  This could be very surprising if the
-exception was suppressed, and especially if the failing commit was one
-in a sequence of subtransaction commits.
-
-So, starting with ZODB 3.3, if a commit fails, all further attempts to
-commit, join, or register with the transaction raise
-\exception{ZODB.POSException.TransactionFailedError}.  You must explicitly
-start a new transaction then, either by calling the \method{abort()} method
-of the current transaction, or by calling the \method{begin()} method of the
-current transaction's transaction manager.
-
-\subsection{Subtransactions}
-
-Subtransactions can be created within a transaction.  Each
-subtransaction can be individually committed and aborted, but the
-changes within a subtransaction are not truly committed until the
-containing transaction is committed.
-
-The primary purpose of subtransactions is to decrease the memory usage
-of transactions that touch a very large number of objects.  Consider a
-transaction during which 200,000 objects are modified.  All the
-objects that are modified in a single transaction have to remain in
-memory until the transaction is committed, because the ZODB can't
-discard them from the object cache.  This can potentially make the
-memory usage quite large.  With subtransactions, a commit can be be
-performed at intervals, say, every 10,000 objects.  Those 10,000
-objects are then written to permanent storage and can be purged from
-the cache to free more space.
-
-To commit a subtransaction instead of a full transaction,
-pass a true value to the \method{commit()}
-or \method{abort()} method of the \class{Transaction} object.
-
-\begin{verbatim}
-# Commit a subtransaction
-transaction.commit(True)
-
-# Abort a subtransaction
-transaction.abort(True)
-\end{verbatim}
-
-A new subtransaction is automatically started upon successful committing
-or aborting the previous subtransaction.
-
-
-\subsection{Undoing Changes}
-
-Some types of \class{Storage} support undoing a transaction even after
-it's been committed.  You can tell if this is the case by calling the
-\method{supportsUndo()} method of the \class{DB} instance, which
-returns true if the underlying storage supports undo.  Alternatively
-you can call the \method{supportsUndo()} method on the underlying
-storage instance.
-
-If a database supports undo, then the \method{undoLog(\var{start},
-\var{end}\optional{, func})} method on the \class{DB} instance returns
-the log of past transactions, returning transactions between the times
-\var{start} and \var{end}, measured in seconds from the epoch.
-If present, \var{func} is a function that acts as a filter on the
-transactions to be returned; it's passed a dictionary representing
-each transaction, and only transactions for which \var{func} returns
-true will be included in the list of transactions returned to the
-caller of \method{undoLog()}.  The dictionary contains keys for
-various properties of the transaction.  The most important keys are
-\samp{id}, for the transaction ID, and \samp{time}, for the time at
-which the transaction was committed.
-
-\begin{verbatim}
->>> print storage.undoLog(0, sys.maxint)
-[{'description': '',
-  'id': 'AzpGEGqU/0QAAAAAAAAGMA',
-  'time': 981126744.98,
-  'user_name': ''},
- {'description': '',
-  'id': 'AzpGC/hUOKoAAAAAAAAFDQ',
-  'time': 981126478.202,
-  'user_name': ''}
-  ...
-\end{verbatim}
-
-To store a description and a user name on a commit, get the current
-transaction and call the \method{note(\var{text})} method to store a
-description, and the
-\method{setUser(\var{user_name})} method to store the user name.
-While \method{setUser()} overwrites the current user name and replaces
-it with the new value, the \method{note()} method always adds the text
-to the transaction's description, so it can be called several times to
-log several different changes made in the course of a single
-transaction.
-
-\begin{verbatim}
-transaction.get().setUser('amk')
-transaction.get().note('Change ownership')
-\end{verbatim}
-
-To undo a transaction, call the \method{DB.undo(\var{id})} method,
-passing it the ID of the transaction to undo.  If the transaction
-can't be undone, a \exception{ZODB.POSException.UndoError} exception
-will be raised, with the message ``non-undoable
-transaction''.  Usually this will happen because later transactions
-modified the objects affected by the transaction you're trying to
-undo.
-
-After you call \method{undo()} you must commit the transaction for the
-undo to actually be applied.
-\footnote{There are actually two different ways a storage can
-implement the undo feature.  Most of the storages that ship with ZODB
-use the transactional form of undo described in the main text.  Some
-storages may use a non-transactional undo makes changes visible
-immediately.}  There is one glitch in the undo process.  The thread
-that calls undo may not see the changes to the object until it calls
-\method{Connection.sync()} or commits another transaction.
-
-\subsection{Versions}
-
-\begin{notice}[warning]
-  Versions should be avoided.  They're going to be deprecated,
-  replaced by better approaches to long-running transactions.
-\end{notice}
-
-While many subtransactions can be contained within a single regular
-transaction, it's also possible to contain many regular transactions
-within a long-running transaction, called a version in ZODB
-terminology.  Inside a version, any number of transactions can be
-created and committed or rolled back, but the changes within a version
-are not made visible to other connections to the same ZODB.
-
-Not all storages support versions, but you can test for versioning
-ability by calling \method{supportsVersions()} method of the
-\class{DB} instance, which returns true if the underlying storage
-supports versioning.
-
-A version can be selected when creating the \class{Connection}
-instance using the \method{DB.open(\optional{\var{version}})} method.
-The \var{version} argument must be a string that will be used as the
-name of the version.
-
-\begin{verbatim}
-vers_conn = db.open(version='Working version')
-\end{verbatim}
-
-Transactions can then be committed and aborted using this versioned
-connection.  Other connections that don't specify a version, or
-provide a different version name, will not see changes committed
-within the version named \samp{Working~version}.  To commit or abort a
-version, which will either make the changes visible to all clients or
-roll them back, call the \method{DB.commitVersion()} or
-\method{DB.abortVersion()} methods.
-XXX what are the source and dest arguments for?
-
-The ZODB makes no attempt to reconcile changes between different
-versions.  Instead, the first version which modifies an object will
-gain a lock on that object.  Attempting to modify the object from a
-different version or from an unversioned connection will cause a
-\exception{ZODB.POSException.VersionLockError} to be raised:
-
-\begin{verbatim}
-from ZODB.POSException import VersionLockError
-
-try:
-    transaction.commit()
-except VersionLockError, (obj_id, version):
-    print ('Cannot commit; object %s '
-           'locked by version %s' % (obj_id, version))
-\end{verbatim}
-
-The exception provides the ID of the locked object, and the name of
-the version having a lock on it.
-
-\subsection{Multithreaded ZODB Programs}
-
-ZODB databases can be accessed from multithreaded Python programs.
-The \class{Storage} and \class{DB} instances can be shared among
-several threads, as long as individual \class{Connection} instances
-are created for each thread.
-
diff --git a/branches/bug1734/doc/guide/zeo.tex b/branches/bug1734/doc/guide/zeo.tex
deleted file mode 100644
index e199751a..00000000
--- a/branches/bug1734/doc/guide/zeo.tex
+++ /dev/null
@@ -1,273 +0,0 @@
-
-% ZEO
-%    Installing ZEO
-%    How ZEO works (ClientStorage)
-%    Configuring ZEO
-   
-\section{ZEO}
-\label{zeo}
-
-\subsection{How ZEO Works}
-
-The ZODB, as I've described it so far, can only be used within a
-single Python process (though perhaps with multiple threads).  ZEO,
-Zope Enterprise Objects, extends the ZODB machinery to provide access
-to objects over a network.  The name "Zope Enterprise Objects" is a
-bit misleading; ZEO can be used to store Python objects and access
-them in a distributed fashion without Zope ever entering the picture.
-The combination of ZEO and ZODB is essentially a Python-specific
-object database.
-
-ZEO consists of about 12,000 lines of Python code, excluding tests.  The
-code is relatively small because it contains only code for a TCP/IP
-server, and for a new type of Storage, \class{ClientStorage}.
-\class{ClientStorage} simply makes remote procedure calls to the
-server, which then passes them on a regular \class{Storage} class such
-as \class{FileStorage}.  The following diagram lays out the system:
-
-XXX insert diagram here later
-
-Any number of processes can create a \class{ClientStorage}
-instance, and any number of threads in each process can be using that
-instance.  \class{ClientStorage} aggressively caches objects
-locally, so in order to avoid using stale data the ZEO server sends
-an invalidation message to all the connected \class{ClientStorage}
-instances on every write operation.  The invalidation message contains
-the object ID for each object that's been modified, letting the
-\class{ClientStorage} instances delete the old data for the
-given object from their caches.
-
-This design decision has some consequences you should be aware of.
-First, while ZEO isn't tied to Zope, it was first written for use with
-Zope, which stores HTML, images, and program code in the database.  As
-a result, reads from the database are \emph{far} more frequent than
-writes, and ZEO is therefore better suited for read-intensive
-applications.  If every \class{ClientStorage} is writing to the
-database all the time, this will result in a storm of invalidate
-messages being sent, and this might take up more processing time than
-the actual database operations themselves. These messages are
-small and sent in batches, so there would need to be a lot of writes
-before it became a problem.
-
-On the other hand, for applications that have few writes in comparison
-to the number of read accesses, this aggressive caching can be a major
-win.  Consider a Slashdot-like discussion forum that divides the load
-among several Web servers.  If news items and postings are represented
-by objects and accessed through ZEO, then the most heavily accessed
-objects -- the most recent or most popular postings -- will very
-quickly wind up in the caches of the
-\class{ClientStorage} instances on the front-end servers.  The
-back-end ZEO server will do relatively little work, only being called
-upon to return the occasional older posting that's requested, and to
-send the occasional invalidate message when a new posting is added.
-The ZEO server isn't going to be contacted for every single request,
-so its workload will remain manageable.
-
-\subsection{Installing ZEO}
-
-This section covers how to install the ZEO package, and how to 
-configure and run a ZEO Storage Server on a machine. 
-
-\subsubsection{Requirements}
-
-The ZEO server software is included in ZODB3.  As with the rest of
-ZODB3, you'll need Python 2.3 or higher.
-
-\subsubsection{Running a server}
-
-The runzeo.py script in the ZEO directory can be used to start a
-server.  Run it with the -h option to see the various values.  If
-you're just experimenting, a good choise is to use 
-\code{python ZEO/runzeo.py -a /tmp/zeosocket -f /tmp/test.fs} to run
-ZEO with a Unix domain socket and a \class{FileStorage}.
-
-\subsection{Testing the ZEO Installation}
-
-Once a ZEO server is up and running, using it is just like using ZODB
-with a more conventional disk-based storage; no new programming
-details are introduced by using a remote server.  The only difference
-is that programs must create a \class{ClientStorage} instance instead
-of a \class{FileStorage} instance.  From that point onward, ZODB-based
-code is happily unaware that objects are being retrieved from a ZEO
-server, and not from the local disk.
-
-As an example, and to test whether ZEO is working correctly, try
-running the following lines of code, which will connect to the server,
-add some bits of data to the root of the ZODB, and commits the
-transaction:
-
-\begin{verbatim}
-from ZEO import ClientStorage
-from ZODB import DB
-import transaction
-
-# Change next line to connect to your ZEO server
-addr = 'kronos.example.com', 1975
-storage = ClientStorage.ClientStorage(addr)
-db = DB(storage)
-conn = db.open()
-root = conn.root()
-
-# Store some things in the root
-root['list'] = ['a', 'b', 1.0, 3]
-root['dict'] = {'a':1, 'b':4}
-
-# Commit the transaction
-transaction.commit()
-\end{verbatim}
-
-If this code runs properly, then your ZEO server is working correctly.
-
-You can also use a configuration file.
-
-\begin{verbatim}
-<zodb>
-    <zeoclient>
-    server localhost:9100
-    </zeoclient>
-</zodb>
-\end{verbatim}
-
-One nice feature of the configuration file is that you don't need to
-specify imports for a specific storage.  That makes the code a little
-shorter and allows you to change storages without changing the code.
-
-\begin{verbatim}
-import ZODB.config
-
-db = ZODB.config.databaseFromURL('/tmp/zeo.conf')
-\end{verbatim}
-
-\subsection{ZEO Programming Notes}
-
-ZEO is written using \module{asyncore}, from the Python standard
-library.  It assumes that some part of the user application is running
-an \module{asyncore} mainloop.  For example, Zope run the loop in a
-separate thread and ZEO uses that.  If your application does not have
-a mainloop, ZEO will not process incoming invalidation messages until
-you make some call into ZEO.  The \method{Connection.sync} method can
-be used to process pending invalidation messages.  You can call it
-when you want to make sure the \class{Connection} has the most recent
-version of every object, but you don't have any other work for ZEO to do.
-
-\subsection{Sample Application: chatter.py}
-
-For an example application, we'll build a little chat application.
-What's interesting is that none of the application's code deals with
-network programming at all; instead, an object will hold chat
-messages, and be magically shared between all the clients through ZEO.
-I won't present the complete script here; it's included in my ZODB
-distribution, and you can download it from
-\url{http://www.amk.ca/zodb/demos/}.  Only the interesting portions of
-the code will be covered here.
-
-The basic data structure is the \class{ChatSession} object,
-which provides an \method{add_message()} method that adds a
-message, and a \method{new_messages()} method that returns a list
-of new messages that have accumulated since the last call to
-\method{new_messages()}.  Internally, \class{ChatSession}
-maintains a B-tree that uses the time as the key, and stores the
-message as the corresponding value.
-
-The constructor for \class{ChatSession} is pretty simple; it simply
-creates an attribute containing a B-tree:
-
-\begin{verbatim}
-class ChatSession(Persistent):
-    def __init__(self, name):
-        self.name = name
-        # Internal attribute: _messages holds all the chat messages.        
-        self._messages = BTrees.OOBTree.OOBTree()        
-\end{verbatim}
-
-\method{add_message()} has to add a message to the
-\code{_messages} B-tree.  A complication is that it's possible
-that some other client is trying to add a message at the same time;
-when this happens, the client that commits first wins, and the second
-client will get a \exception{ConflictError} exception when it tries to
-commit.  For this application, \exception{ConflictError} isn't serious
-but simply means that the operation has to be retried; other
-applications might treat it as a fatal error.  The code uses
-\code{try...except...else} inside a \code{while} loop,
-breaking out of the loop when the commit works without raising an
-exception.
-
-\begin{verbatim}
-    def add_message(self, message):
-        """Add a message to the channel.
-        message -- text of the message to be added
-        """
-
-        while 1:
-            try:
-                now = time.time()
-                self._messages[now] = message
-                get_transaction().commit()
-            except ConflictError:
-                # Conflict occurred; this process should pause and
-                # wait for a little bit, then try again.
-                time.sleep(.2)
-                pass
-            else:
-                # No ConflictError exception raised, so break
-                # out of the enclosing while loop.
-                break
-        # end while
-\end{verbatim}
-
-\method{new_messages()} introduces the use of \textit{volatile}
-attributes.  Attributes of a persistent object that begin with
-\code{_v_} are considered volatile and are never stored in the
-database.  \method{new_messages()} needs to store the last time
-the method was called, but if the time was stored as a regular
-attribute, its value would be committed to the database and shared
-with all the other clients.  \method{new_messages()} would then
-return the new messages accumulated since any other client called
-\method{new_messages()}, which isn't what we want.
-
-\begin{verbatim}
-    def new_messages(self):
-        "Return new messages."
-
-        # self._v_last_time is the time of the most recent message
-        # returned to the user of this class. 
-        if not hasattr(self, '_v_last_time'):
-            self._v_last_time = 0
-
-        new = []
-        T = self._v_last_time
-
-        for T2, message in self._messages.items():
-            if T2 > T:
-                new.append(message)
-                self._v_last_time = T2
-
-        return new
-\end{verbatim}
-
-This application is interesting because it uses ZEO to easily share a
-data structure; ZEO and ZODB are being used for their networking
-ability, not primarily for their data storage ability.  I can foresee
-many interesting applications using ZEO in this way:
-
-\begin{itemize}
-  \item With a Tkinter front-end, and a cleverer, more scalable data
-  structure, you could build a shared whiteboard using the same
-  technique.
-
-  \item A shared chessboard object would make writing a networked chess
-  game easy.  
-
-  \item You could create a Python class containing a CD's title and
-  track information.  To make a CD database, a read-only ZEO server
-  could be opened to the world, or an HTTP or XML-RPC interface could
-  be written on top of the ZODB.
-
-  \item A program like Quicken could use a ZODB on the local disk to
-  store its data.  This avoids the need to write and maintain
-  specialized I/O code that reads in your objects and writes them out;
-  instead you can concentrate on the problem domain, writing objects
-  that represent cheques, stock portfolios, or whatever.
-
-\end{itemize}
-
diff --git a/branches/bug1734/doc/guide/zodb.tex b/branches/bug1734/doc/guide/zodb.tex
deleted file mode 100644
index a7e8d163..00000000
--- a/branches/bug1734/doc/guide/zodb.tex
+++ /dev/null
@@ -1,32 +0,0 @@
-\documentclass{howto}
-
-\title{ZODB/ZEO Programming Guide}
-\release{3.4a0}
-\date{\today}
-
-\author{A.M.\ Kuchling}
-\authoraddress{\email{amk@amk.ca}}
-
-\begin{document}
-\maketitle
-\tableofcontents
-
-\copyright{Copyright 2002 A.M. Kuchling.
-      Permission is granted to copy, distribute and/or modify this document
-      under the terms of the GNU Free Documentation License, Version 1.1
-      or any later version published by the Free Software Foundation;
-      with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
-      A copy of the license is included in the appendix entitled ``GNU
-      Free Documentation License''.}
-
-\input{introduction}
-\input{prog-zodb}
-\input{zeo}
-\input{transactions}
-\input{modules}
-
-\appendix
-\input links.tex
-\input gfdl.tex
-
-\end{document}
diff --git a/branches/bug1734/doc/storage.pdf b/branches/bug1734/doc/storage.pdf
deleted file mode 100644
index bead99c5b770f3c0c5f8be64de54d3435b417714..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 25330
zcmeFZXH-<pwl0k1oO5h~WN300$&!&A1sZ6ep^4o@0ZEcG2$DpJf<#G@1OWvlgMcIf
zMNl##LE^UneP8$9=X_`Hd&aoGuEW8qs<qafvu1^Uo;9oL`fVi@K0$sFTw#y^$j!zH
zBqM_h)`Yq`ARR%1lET8E%YV3F6&M@|^#p-c;1DEK32N(R2gQ|@#YG@Jp%52b-{}Dp
z)5MuxlAt`RC%161B?EWfWpC79qmysVrm+j*-49_g*K^?1>+7nWw#qB3WKm|IxrvU0
z%Cuf^^sJ!0vNGKQ=aAL7G_>8ukdYBRu~kOZHQMmuEx&w^{l-YEAc|+Wg)e2IIEPZl
zc`BncA5{<%vA=zB=cde!6A{*M6sBT5C54ix?`J<3cT(ROzf*#xS3YXwsZdUP{fsS|
zwcy8U)6w<Ao=K*URf-SnUyTiWJ9CBFIKUR>67we8W$*c&6kfZd;z!RK@!m8|eYhA!
z!=7=;t)W4AsLzfKy#j4+RB4*+1;2=pP-#e2j18Zf-`yo;COMy{%I<b_n3H1V78RWQ
z!6Y2X@5V8@Giv0cOwiR&U{2U!&c+ZHmWn;@G*}wv=P&1a@V3xY229JtR&8bLzQD1G
zjZGz6Cb;m2!2(r+$`X`LW|SloC(`2@gw|UDrf0^9vxe{Q(MmaE2`Sz3k5HBAE*Ugx
zKzz|?H{~ITTkxh_{}9$*Rg@6svz{QNz|!B<v*K5!MSlyGnmCq?w1$I#_%#aH=Ckr(
zH!jDNJF|=jYuND7MiZtI-9eZ8$?oURYiny#B=SFc%-&qvqz=e`enZBRj)KkuGEC5;
znD#<YF}s$AyN{KiTnNTkq_#QW+xRWAk~1=ss20hIprB!6U3afErKfoSj+!IdkV=ym
zCp-R7|Fuipc<NDjWLuHST%{p>YefC)EphT%QLStJ=*d{-OFTAOPACODS&aSY%ljlr
zk~*P@?NpJhav}SVx<mN)Oayy5jAu9$UdIomEbttv*{a42r(&^SI!GnHl};JAwirPf
z5fN}5GagqFCUz7vWt1sUl#n!!XOJ0jfA`&*DApMnMCvh+ASLYN5ILwzzN|KBkf2gl
z^AuJ-cQTf)sPm!{LVm=nK)Qd+yxGOC=em?wi5F^JzE>Vrevs|~U%h=;4I2G=K*U1O
zh9z<6&|(dzm9JmdEst&n$FlUryn_T|BNs`=g{O$2edvl<DlLn0oU0=L<2peG@5tjA
z9=>NE+&?Y&qM>aT%Jp_P1<O44A{TDDSr<GacTzb>y5yOgsap6g+ksC1Ou<!32l2+D
z<FOb{2(iNBiBU$kdUXsMBMz!EH=P}}z2apPMTO*_4~}>OO;HmvA7~r0J@6mXrzD_y
zUWo9xY4OVMLGnRnN?BUShQCgZb5DFU8~p8PigZHl`)n1})LZIHR7NN}HrU7a4s5@l
z+$;Ec{?;k8Y$@GX_1L6C?BQ14quSZIv97Tfu`xPBV*z9+6|%=m!O@FZdbb1ZSegWu
z2#ZWswWOD8k&u3dJVVM?qTd7ahCF=&mth74Qx2^w`3JX`?nn{A+zBFag%>Sq`Qk&T
zF=k`Iye$NsfcX#vG3CVTgHV(&_ajJuw#ZBL%@LsL6wr3bA395GW^_?*$y=d!Plj(B
zqp1{yEM{vKq(vyKjrL2EKuX0ca#)`W>{%+27h03`ZK>EX`+!--L`t7Toh&>SLl|@p
z`YaCo47Q%V!GeqX3IDKFWLujTu=cpv#Ddn3gHnwY%Kekb$`miHG)2~G_o?z5-Tb}w
zHU#YTNpYyZY6u6nxuq#;Rk<dwTX|os%3-L&SnASu?=UzixX*(2YY%H{73S(8u2Bz8
zTI&G+`kUe>LPW)t+*9Ze6ZY7wa~-4I&I!FVylR6*rxZu<E7xm`1|A<V^xfd&`|!fX
zk*E^c72Sd&Y8bKNV?+cl%=f-WPG?-eX%O}DcKWsSAKdX_q!##uJ;}9+OmzFN<@h*n
z#RqdUewdmt=syraTdujuE&8hX>z%i;R4;2Q&pu4$=fh&2-dc6!i%494%<bj!8h4uS
zvt%AF^{UsF0I}VxXj;0vJ-cDf3o?5rj5*)?-%>Idrdytzl(kI<VubkD=N3&oVR-7)
zbB-%jB(Tn7bV&K*i7I0NF`3RIf?~mWkAd)}Z*u~#C<*92`%fI9DOfoe#iqfw1ay{y
z``*$M8A*HwQw*Zd_HVy<dt&2VP+1U*qPu+FjB&)=Vz+8Vr;t}M)`=cj!{q2|`^~(Q
z&L(p3bKSkC9Uw9?6?WpFxI;mkQ9q9%Tt7OGPXcef_r<q;k#lw({Q~6XvZn6)DYrOv
zhtX58-OU=v9FZD?3u*VzW8&rvwkP0b^Qv6a0^0+7uKEav@G+Aynbz6GP!`$_{2M9N
z9yz2D;?Fv#`_H9NCBGt(P~Az66#B2H=oP{mD897oCACIcpYBL+?z1T+a@X~qTcWcr
zu@p9}=S<_D^`-^S$l~E+xV|IYO+6RA?@%T5T72!ERC6ll6KhovS8Nmc(5D64c{D_>
zPt0gc#Lz+`lV_|LH>U?v!r;x+C$Su~?HGZBaA6_NlgchmA>ZJ8vH?$59Pq7X+J@_F
z%USP4yZCH}EB#nTWeC8$C-2x3xScpf+LKCjbBnKvrB*&Rk?6Q3F435G+;8_Z5xbQ^
zBIMP!wj3j*z0iy|Ov2#(NAHc0rRCRl8wt`5`hoX41UwODD6OS$zP(KHnHBcyA3{=s
zUFN|qojk$1(ld>=K9&4u7#N>KxuLFhzkZg16!4RdEA+=tyMdoO6a>Bvae(5272RBs
zP*)@Zbn)A-hYQw&BHX+@ZJ`K|;8lSZ)D8wwaPtM33jhUTf<hp1NufKq!0VpCdmy0;
z!N7a+uC8u?6myW^<$EAOk&AMWpy<^_?CK(Zb&<HbNM2s<02l)F%gW1_e<P49GcN_>
zf)(66?Evg?X@mfvUJq)E1euG7it-CcN`OQKMEC_IL_ne<0w7UAA$~Ej3lsH0U_(8%
zt3RBMNTj<I80_QY!|(6r4&}FXa{+Vxsrtg;|3J045J*@;h+jYyBq$`tFC;7m5|$L;
zmlP4aQ0!%cy!5dq%+(q24FIS>7uMWzGjIcel|Y>CfbRta1i3%}eEOsI7tZ<*Opp)*
z^of9k#Q`$}!~vHBP7sp>@)G=#f@1#*J1$!5Kd|G%6(SP+g5rP;VgmdU0)Q_>#rXxq
zB>sIHg#OtV7hUB)wBe#1L?uB&A{RD@f`o-d`2|J)eG^3g*@TO3`X8DA^h040L10t}
z1N{<ki=Z&jHH7~cHe8I6|I7v<pjV0r34??L1pzmRfP^Fk`Gx;Ce)w}>D1po`2RsOD
zVrm8g8eSaebdo^lbM=D5|6wHmVYdn}$6O8p6(LE`)l2|Z5f%VlO#?tu2qbthx#$7Z
zLRbVOcsUhaq$Pk#T$TgF1bo}m%~l@@40>SdQc?kd4WPbA(4Q)W{_HZp=nGJBp?{aY
z6q#ux&b}adIaKv^rtDgy(R=fjNb9yi%za2|M;Ln4XEgIqn!-jJ&z1Z}&t(nE^BMX4
zCVNreL=~3BzP1@RCi=Pex$B@^Y4OQ+tcaA1EW`8dr05Q!{Pk^<z-^?hf$&$`hrU(`
zEIaRa$T3PevT?C89j4QbDqY4VrY(@)m~=dheBB7zhS7Az_`q>mQ<fn)_Bt*)TUg_7
zEor|*`5ShX=fnJx2Wn|&AJzHVPd_Vve-vNX6DvhDy)8{Cx!12uuzI+&Gsi5E6TThG
zR&y)&pjTl_c~Uf<7J5H8`3s>=3s|PFVQkGO*&(5`g_G7N&^-P+FSUyfo<`WJe0*t%
zf7V*DUM?!BuUPR*hOut6(<0O58<rw{qtAv4HgoNsKjCmvC3ImO676HH+W0_C+o*Gw
z13vR|G+xQil0gST>iaY>z_MrhE1~2fT!7The_}v*sN#eB@#%t2fbdNM+r9&<dx5*O
z*!s6U4MQHIy^GPe9SUY0#_09)>I;;XwQ|&J^gUV8zbQuG5=Gty$284<jHaH2Q<dgL
zFdZ@X%6fOox=Z0u0h<^{`6p@?1xVm52}bpXLHQ-N9y`X3Tcz7U-jfN(DO{#FX0jwV
z*!4TrqJ4tHgr`QhaBXjr)9^^wuV)&vMN!Bk8<P8Lr%rIXpU%sZb_9}aD3griIo{t7
zpG1Wc*i_@#bKOGJ7-OY{DWK6h+R(qYr<%UTl_Y$gmSA`AAVTE^oLZ?&q$^ixoynI$
z_fu{nWFh_}Tf5#)S|CXNwDFXqwXXuFp@62&Ym&2;MOjzde4$g_F#s)#eKtzy?x3io
z2Kqa-j*K%uU4jZ>`2_Td7v)<W7W<nS8x-j5zOTGu399*}1q{eT*-9;+#G|J(`);_9
z#ylNr9RTUljOjF#ynLsV3cgmw6aVt7d|qy!DXV;B-Ce)a4npoVXK8BMHB9>2kb7ke
zSXh+iJ|{gSCn4ieSVOE>%n~8yOf8ig1Z{q+Gd%H_WBcDtN0zwhs0&*1o`bAv>{hRn
z#TrQ3lJ(CJ-Y}kMy`N&go>|j|m?lVUno6?wW!4Z)(a^e^B4i@}BX|LVl$=PpR*|Ce
zXlEbGNPxLH5F;s;%5{9e!rL_UTbDJu@8;S}T%@)krIfDckJ7V;+xG*`HB9K0)^B;l
zRHKzddETVDe?!?<CK`m@deDI$c%K^DbU<n2a*m9%E;TUUM&*YrQ;~f81TNEUTO#`g
zp&sAvdPmv&9b54B*2;OcPJ#bj%hzF#$v!9t{cNPO)=>Pcoss9X2JyHpVK2zr<+nqi
zB^ZG>9QLHtUgao;yPnZ;I13Lyw}>9=;|ZQU#q&w){#LqKW+FbbSE-74amD6cjROg(
zurB_B6Nhe%5Eb2^>D{Ef8BBTYjDSVr+G+nfi2pN%=cj6eY$l!LRVE%8lm*}TcOr|9
zVz9q_I4I*vo{D#w%KRd97IJn{^|AbceW{AfP0tgzQYm4)-a|rGO;t9!!%Wj3dPHT@
zWV;;2Y`aY>MI_2qNlgp<9*{)04Kd3P71)Y5-{K`SZ%uiLEi;6*D3Rz(?j`fMM|`q&
z|MD#LO`h4d8#dx;e8yY-D%`jxmPwSFe82WoRsDACu5k}7W0o&nc}z?K1=xh2Zled0
zGJFDU$MW7tn~WSHVv@y-TxFJMy^DSFw3i4hKp3D)9=WFYN%4DM?a|_C*F1*`$0wv&
zwSWB7f!<S=Jv2d=2+~}NLWp@$#QnS#NoPE=C>NVFl*BkL%|tQ5srQAaGzFi!u^5Q=
zBKc}R3<)(dv}BHq6EY<qj#XMMrdIrv(-i((-Gw{&ke$Eg*jI)E<76(2YLz=jl)0hU
zi_~Kk51XaW`ulYA+g;2xg6je$n=E&}cKBmxYS1~rPWP=@LPbP$?%kTNWu#nXZ$}f>
zJe5#leifnAukHGZ+>=Pijy7+t;iP*lX~#JZHFS*8BD*W~@ll^FoxczTmEDWbp0PJc
zLm%=K7w>0~sl=)N5Vjv4Ur6m@5S~sE)_6F-(J*a?H|Ko|UnuG3OJz<zi;t$fVJ;-Y
zu!V{*Rxg~x?vFF$KIc9o$c4Q9?x#B}c2e0G^6n0`i_omgCu^GcS0sY8{C26L2KM9e
zltWJEy}VUgf>URkd?Y-Ko#SPOai5Ve>^Vm?AKe1uBnQdv$;oGBBAicNHlbtlI*M$H
z$5t0Sy3L^TZlVl9E+=f!orN9L5zR0>E*(r3b}EHs65Q2!3&lNmTrr>BbIY%LTfRS1
z`@usKIY&a5H-ye_HsC)DU*%d#=e#j+>-wH)rKqPnLg&O;9^~XkAWX^M4cXYB^VeHW
z#Aom-fTJ(&dVWyE<qWMae``EOFUM_p$5GgFC{4h2c_*C=HInA&?93?g4)v=$)(_zE
zj)WNv4Da2EjJ3gQcVnp}708Ehe(sMd+$3V?{V2af?~<a7Ze^LnhxtV6<=5{DUW>1b
zldFpg{fao_66>FMsT;5Aj@)%Jn3eZOJ=S`7Hn)?e(0nqNa04Pm&zFz$-e&AfF*Zrf
zm-w|sc$f^|zzCgV5nMWLTP&471$>Xg1N++3-InfG?4$Q2G#jT)QE6tLHKi{u=C?1x
z?Qum@@4HdZIwbiQwv(}?&L+nCQn1Yr^_cBhu-x<xitF64)MEeidb~TiK1w2I4Ubws
z@YPnu$|r6bas1+PvNvCA4&PItT;G4+i*Vctn2GLH{(1{0^~{?pDO8U4NM3&VREd6G
zN0-C<hMe^CGdeuXJ83&_Ib{hl9tzX&?oGQJglCRXd?2=C!#SKQ3pNFb?OiJfPG(!0
zpC&PMV-}1YaCtgtfsT`VW-SmoNQ?`SJ8#aXa$x`JzuwGkSuaa!<2>T8#%Ztpf`UjC
zuY0t*QPV|RLrCB)&G#QrhzFJy4jU#Rk*davPaue2CfD-d;J5?Z%dpb^p%UzHArsf!
z6B!$gFSj~KqKcw=Uu?clb|+V}Xh`|6KZLILLOaAza&<<tZSU~*VK6tZ(%d`TxuE$k
zHrJL{2$e@39jYrzLl`|cOz+Bi3<=_#7%1`JDY)(U%!*>gz-O4}U`Yx={4qtCWHXK3
zJ=Xqs$!`r%KXEs+9<!vTK7D%!PiWD8H%8%(G{W3-o=i4Akdyg5ubY;hl|bKo!%WxZ
zxO}PY0F`Ke=@iA=ikrfm`0O)1+3W|?6H3F;{>0*Ks5Dbf#0tOK5TZ&cwg)r#iR2@=
zEH!)Ag50dfTbWe~>SaMZMLeJWNpb^=YE2FIttIQ)V(78QJD%lA+bZ>0zmjd-C$fm;
z7JbP`mV+?VJVBY-n+DR-mL(X%Wsi(sQptZ5*bsj()V>(<0U1TbC^2qx+mLeVhK=dM
z06MFC1jL`8?I|6WTZ^}Q{G6tMH}Ow&o*%lbUy3QAj5Nlgz4FuhDsN8JO#GVUgu<-i
z-4!y@_%vAA<f9ljIVGGxTgP9YSEv=IkFp_1%fHtf(v6EIPOIoX7?t(N#vpHS$DDbN
zj(RhMH|9H0vdaM1e(dc1Pgcn*lpmi2Je2*>>KWLeo-xYXvW6H`Vtt2QGE0YkgU8re
z>r{t&K5Z(7=;P7~#*r3WxT*eR#CRzqLE2`TXHqE`ZYgfARuUSvE6dA&$2KD-rfH-4
z?0U&9^CxPQDNTponFZK0Ux@i?xL`N;zWcBqBa3dMOTNuEPMB7)Y58jRdQaQw7=yK0
z+ILBobBamx!32Lvh~@1YR2T>>I^&+oPgPyp5zpJDZH=B+)vx-nh}dn&%Nok?1#KS@
z1Yj2vMMM#WIoE&4XHr4O5U|dp_+c6wRmZxO(ZNWG0rf%*v1bpuNegdIU??8XRy-j}
zX;}OO=ZPzQXF)u3#xG->Ypu|rDK}AFX|ck4SBg5W7%5mxbmy>8R<LSr7JOKAD%Wy?
zNm^lzqS5U8-K`+#gU9y!Nc<ewi#>`KePlh{LX>`2*JHB5j~46p>qLpjqbjeD<h!>l
z0=U)tsYajY?#A8B$WvnOfKI@g>Y?89$2IUTVLTM)(J|+f&$q7G>0{|_f!r<lR==Z?
zA0QAl>1{!uwDBXT3FVV+Q68%KUCY@LAHfXKMjS-!hTY9PoT=X6*l0MVnJhU=Ezk?x
zA}--$K7VZHOS8b7r`jPu>W01Hk+p^8-LGcPE>;-%dbPcv`w3X7H*fH^D2bRlhxyU|
z5RCKa9K~-+iSu8-!2Bz#{-;kcmjvb?zQG7xem=nk|9c-{{{E5x(7(TF;{|8?#k&3n
z(srrtH{1N1wfzG>`-_u-|73F_LIBVFMd3t*0p9y74SWUy7~MZpyMK$$3I7*#PDoN*
z;NPWl8)kEfpBqT-&9`^D?V$>G=>*p4yk$0K<HW6NgrTwdB|-fw9`Q-01k9{i&C_^t
z>ZZ50WVB?U2Ac44=`Ok3A7;<vpKf%`O}gE;{KBjAeihPAs!*f(%#d5+Gxwbv>p_<1
zoyz{_Yh6TE?c<*GrKXRfDSTKzitL@4JUI1nnW+nU8WUlkqeg4$bwCF@Z00K)+zdGW
zX6<~$Nq197a?DgA&#R##=5v5O>|rkf!L(?)eO2~HacyNQ#(>(JCjJx19}V?i+DvA$
zn(LzS7Con@CmIh3M6)*9;ZsA(5A=kY^+*d!y%F;A6**X5g6Fam$11z%<jvFAU7n}v
z_2*p;cgEk}gS|Tt7s)H(9WpGx`yj44vSF;vd2ZUm-Xu$=UA!wT**KP7{^{LUlaMDd
z>0Br6L<gPaZHGTvz0u-p$275r`Nv=?&H*TW9q%_kXNsr`2s=Iv4w*XI@%*k5;q|U~
zGq!AVyK7wKTU@k3@H9qW{LZ|u?N-@oo5)(53l>%;ZoH*lPH$;wEc$r^$96g5d`DKW
zZ`s@agw45qExW<Dj>tvVM|moYSUirOTx4u%SKDI#lYL~-^0TjLMXO`lmVU5X(EVzn
zVUmINLRJ_X^{rEWzPa04xLBe%dR-_3lU5-T(<t0^oJA9B_VY7Yk0{S{eWucT^4B_1
z&f8aE2FhSe3dL+f$~xR5tMN7$XDor?T5Y+TRyZ-=IvprDzo_Q>4m?2#GX4mIk$rSX
zjZP+Vk4rE-mEaC4E?4T9yk-P3JH`{IKkE*S7S&fD`|w<X@vS;BlL`+`bqZ;e#kS@V
z>w7ct-O!@&SpJ+i*kLGD=$1X}YygF=u$Q;8)otNWRR7Viv-ALQO2yz^TBwR7rC2)&
zmxI{~8T=4ao(;==tyI@9DI0vvLEy``XM4O*>Xx>QfWRj|^UBVK!wIQ7#C!Q9LQNyj
zMnihiU#uklII-?KU}&{IpnE;G*Y&E)^W~;>^XI|F?4!ex#)gUW_N3$Q%>2RFc<#K2
zE=|&e+2dwEGwgvF7>U99^@yoq<V!;mbZ353S>=VEVjOQQLG`-~9ekWnD(32Wjt`4@
zk*mHpnx8SQ<+s<pzachoSBkeWp2G`OUgzbge~r#P(Q-ygUWEc@h3Y6p4Oq)feux#f
zNy-v-X#dhO?um_g_kvF<I~3o>ikvuZCuBktb?cS)!i_g<gexZ<+c<EHrL)^39vW%e
z!HHWZU+r@C=~cut?hn1PQBc@A{&<fveVadu4|}wY&%0|~n8(G@yk-r#|H6mU>tL>n
zq3nJYI{g7X$O)qlMIqYIulaambH(bXlTVNF%~_A)VBZ_^*=U*l#wF5d7OW*7cQ)td
z<|8u2M8?^(*36fN$&i9suMQx+hzQ*XXmsWgVR$gHBwBeGUKrkHdZ=AiwPx=Z>QdM4
z-gYtTQDQdCAnV1X_kFQ4T}G}=`O;qzx0c*1pEKvb$JW8=96G@ceY6L1bD_G%PF1j)
zMlSCq;b6E`_WWydhIQk%+@ZsauZrX8XE!`_e4?DgN2=9#BRLqUymLzwcO&o0@B~>*
zO?Td>JotWl_;W@5kEcwgv>p3B%<c#(uAYv-QlDx})IL2p2KBqbN8uCGvUgg12ALmL
zAS(r`?<F9Pqs2aJeynGW{*LZVoSwyoo{ENI8W~e?GJuUIx%}hg?8lf6o`3KOccgU;
z6I)^AnZ?Ip#eC4ULl%E(`{p97n;k`LTJ1~S$+0i{y^k^+W5qOASJd!cJT&;EZ&+q^
zyL6QQ?kTAhLrHUy0c{BB#<S*CLcAsJhyfiV-o^DZy$rk`5>Rd}E9(duTlKQ&^m%_>
z2K*-Cue2_$Pf;fj+#jWv4E0di<qz=}&`ure&Rfx?&c71lOF7IuilH6lh{{n}-OE1w
z@-0-90PX_&re9MVG`-DSJ~7rAn3SqYod^FiYw$wdH{|iwz|Lt<<w_0?OqR=m+&KDM
zlF-LNvbk{3xpj+Ff_3DlfCjIg<xUq6x&wPO>!jn}vloRCsD!wfH!^lWG$HGl(GunN
zwsV_z&_1oA2JSA=a3*}WL>W@}zARPA|H&*U6Q6U_W2IFwah7R5FT3(tgX&04WBuKX
zC))NebC~=6N%Z4hcuUppUZe7JyjSzUC!^s5BcGtdJ7RjP+%(PV^bn&{pZh<Rbi$4z
zBsRVTxK5vCBJ~SPZRHi8R5ys{uqJD=hZG*k(JNGb<`T7EV@i^z>~XraNHw1`Q|$K6
zUQqf$G(=`HbyycJZ&#K(fPwkx#?jZl56_c=U!s484c3u><>F9X9&&ZJN}34}O2l+#
zp)O+j4q%gQM-1MmYxpsJ7f;5woc`2XxqP}EkAhEN!z4VD{3GbSgW}JvpPkuuZ@TMM
zEK(^a$EGvyjwL-5=`el0d4}Fb-g}R!L9xZ1lO%DA$fRY@<!+~5_5m_efUiVSeje3`
z7f!ev`6OcRxMK~YpCwl+Yp^LHgfmBi;!Ca5#&f^<b$_47HBGfB?$U{;>P!5v6qU*@
z{K{_hb4A^^oKjETg;3v+R7%yEn2o$9R<0TBb)AG+RC78<+i*V2D0j%2``){s2Ksr~
zJy~k~v17@81`jFmPiuKH*mHb~KPIwgjftp=EiEkJiKxPQzrF}5M8|3R$;(2%Jgt;E
zpr;C5n9(G{Ywk=6EMGNFXYO6A))v+xWDXC*!pZPCOedmDh%TR_2@`wO?B$`>obz(u
zt+<Dp@i3II;Y620NuFLaXYiJP!?Wh+jy?}9ZGr?-!-zaBssbH{yLG%pkeoBx*UjT(
z15)6xJMNh$o01mQ5LA~WeJc%d;L;XY5)bwu{~~CMiR<Qm5cP~PQ&+C1<$XQr_+zDq
zV!WUT3=kO1osr>&pHs~wO;>FGVJjj$uOrXc@qktJXua~ay92TD2Zg8eQ;bn|hGV7Q
zIc`+e?maZC!V$-Ga*t2_xUl(-?WuQ789v=YX4Vo1qn9g|Ylp+5cIp?AS#Shwi=tX4
zmYL5W-39BZ(GT(brh><K^jNV-G}adsBf<k|nV!`?ZS7tHB$GV}M4vu<cVu^+joC?y
zeiFyv;#i9EaEaNgwfRk_a%-|1Z))&DSCLxb6AXNH6RBB{a^H*UqKSFl!n&+)ym=oT
zUV3=H^lG|KHRdOa)5X#}OZqj_z=X1&?JfGX^l|Lq^3h3{ps0MTF9ZlcC*%!UmX>Lj
zbcq%T`!qMS-#H;bsxylgZCLyiT*`x;nMDsMnR3<KPcun%#*9JlWeLgT<-}oC+ds-B
zP}Lj8HKzkVil-{9T$%K_#eXaBAfp!dY0gM085zZ`zWkQTy|InX!c|LaZH$)>%Qq<U
z$F<rr?|dUY$SiI@=YXFVgliQB_XmTm!mE{2=h)zo{*3GJ#Rg2e!HLHcw^l#T-95$X
zpSnv+?CH5~{X*F(C0g#mo2CxDZEoT`O&XlRRT`W+nToJiGu75gUr1YDO%Z880)8;`
zEUQ3dvhftUEh*fm0z1aT-8~o%iA!3E9uY6*FC{vk_mLcw&Jf6CQjRVVY+$Y+S{9Ka
z+i$&dU<!Kr+7_{E(8{9i1JchFThB};*aOI!N_Fhbr4n^cR`e?FtGoNbo4-hB^7dI)
zuC38v1>%i-GC9S<NWBJM3O0{Y6ABv(s#9L+^A{@fu<n_B9Km$EBXz=NfyZTK^TqvG
zvz@~CI0th=kDvUg3_t(YOgA}wusLa|EfMJ`6l&oZ>F|=h*V5;f2XFJ)9po<68tDm$
z;c&kX?)CH0^)uZm0(0swzAEsz_Ll6I&Tgh>d)d_YPQ~K7U39VcAyDk{$(oRt6rURx
zLkA>9F?(032B8nFk3A|eisbdtd;$j&*DRzCPlLS4n0jsIG`#~Tr1YAEvOJ>r!I%W`
z#+e$-Xv-hsQeRjz@Ag*Xnz*qTnF(EUOw@Y@dUNj@*->TpzYrrJbow`0`LAQsmvQ4i
zgVVzQhr|d7DgCVrAV!zEfw<HEK#VTc{VOs0limEufc`te^d~d=D-`x`ks%<a__y7^
zVp~A0Q}o{@Lv3c}i8C)KfTekhw`fdiTcgvaBlE)wJJ4eZyoLEf+5Lulf5e%Ye^>Wp
zAG4b8%2v7qm&~mriY}dD{u#J3*wohkv&)L+P7x;s+^gNZ2(`$hlI<0?a8vVqP|Ulr
zMfveHf+DrLXRCV|$3ELyqxZTnNFP>J`?YX4Y{tuRN0U4lZQIGzgB9JlN%1q4u~_%f
zgMCZGO{?_^?jap#(((PW4^u13&Q;D{<%R|C8fp%NkM+w$;|m-(78mol-8Rr}K0AAU
zV;pL0{5H&R^dSpoUqjzX5vplw?(GP@Paowr1-F=bINzvoJT|<SlMEuh&RB6=(pfQo
zR;+8<gyA;s5C<#8&v`~b#-;AtI!zopDj{a&0}pQj(UYv#ob?=wH+~g!ejnLTR@;+F
zP$WniP{%rWv_jV@L0a?3ghjr|&n-`MBQW=YR(hkdp*B@=p(=L!+aZZJ+liVS^VkV?
zyja;s1H)+4-4ae=lKt^5;`_#1v-taunX9SqCL2d8jx?Z59j?)oaYCxWHL8>acB+)B
z`|atx)IaRJ`m(K4Y`Vvodhil)I=gpmYVI0rfT!fOR+iDzBKdaZJ5DkmE-Jb`+<6+P
z=iia9r(^4C*fLUp*-$v_-{p><)YK@#!X{-`(boU2f`J_R%&B@w48ax_Y-zJtyjNS(
zI}f9GUAB3aP&OM17O4IHb#NfXxTGz6<yBzbl*Yis#|89<;Vt9Br*>-n^!29MH&FBw
zcFvld5FbYAMbqdwq->TIcea@N`mJ(w?RsqQ1oEI*H1?_t*{NWvyWmD(J@RhxxXYOn
z6`5fq8f(5dXJl~4?)aO%O@v(*dUhseEOn6(O;1e7xed1MXx~G$FpV^eZCV{htQJj;
zoJ1R1iUk*O|AX6pV7+Jb25%GEE7j;ro4?WD7Cozs6{OXw2)jSQw}s7US&S{v2507f
zqY;0t&H5xxQ6x$6^StpCeh#FgIfkT!#_Aexz9DR|O|e@?u_*U5_N+cGt?-t*0(zqI
zSykI=`~a614x0;j_k(hXakYGrTp^-v<vG<Of3cXFl>6L5uZ<qR&<J2pB4p8u;cQvj
z6lCMD(5AlW9K)5O$4d(4=oWWC_r$hhYwGt?Af`|pyf<M+bENOSy3i6kOD5%JvruNK
z$Q{vEv@>2P3w6fj;d<9oiz}YVN)lyzQyF_Y*f7Q}gLa74vs_<A%)L6&$$TBRzK!Rc
zado^y%_bCG?fl@8Q3><H&E*Oc_5?O+e3*!iW;N0Lx2W(P#E9&auG}~LezyAtdi(-h
zMGoIh%}tgmOG4HSl!pwQ7<4}M9umArZmKWqstK&=Dz2!Y{d!Q({_aOVW@jdn#c>o_
zwudX%5FDK_blbKK?=3r~I~1KiYu=zCu!^S$+HzBo*KRLb75rl5XsN%7I<i&v0F`I2
z)SZB5`r#+7?qtMuiU7TvRI)y0ia({%=3nX>v)>;0=3Bnnfb%W^e%|P5K42)v<?gPm
zjxp99m)j-hO|2Rk@`E}G1?}{Q`|L9`#~K}*#Ya|E8RX^D>JqP}3A9TMZ}~p!vXdy~
z@>}#V=i(rI?^2u(Nn!nnk-SSmKvfW`qM{7`uqxO?ef=$QwsQ<Up=D%{!=4}42s7#g
z&uh8U8+E(u8lh~ged~&ZsmPzvzW1+b_V%EC_ZSjM2<Q~DZq9qM{iV>B7E2Q>iE*<e
zmPYtAULN@3M-zQb{tV5ci$y48&Dk<!cn@lF_x{1*;PCW>KUtV%@4(D4UA(X*NiPZr
zgpw5}7kaCk`{wLP(C#-pOBc88)j0wceB5FOYFqRtu3j%fCZVg9`{GQlK^sagt>s#v
z^$Fy5=WHaT_wX1;t7H4^T))k&Ma9mEuU~S43m;Q^QcL5f6B~plIK}mJO~k*Pr#Jj~
zU;gxs#U~Z&`^_pAI2Gvki+Lih+vP5K^_uBs%T17*^t!5vXW#dV(GGPlk#TH`*8etJ
z{$?Sm?L%9}yL?do#4G#7*n(E>YiveeMHJ_&4D~m39Q;j`*pujYFcR}X>Zu`plc5`K
zvn>IbnUu^Y=byg6&F^Cpqt~e|J_@Yg2o)pYPevD)zpj}*Y+Ra}QI8X@^&`3)U;fFc
z7PS*OlhgZ9dE5l*G<*HKg(*Lm9>iyry0ca}4OiM4y54!y$GV};BQA5C3+wFMvVC%Q
zV<NKUR_*JDs>I(`<1i>CUKI~$Vc6|GHmriY>YGOIWao9yH*Y-+r&@0w(ih`>H8ST)
zvQS>fU<0E~nt1mVo#_$&cfyLfBDc38FFYG+)8BmT6=`x?YnG0E5*C4`)q4KTa~I!p
zyTVee`~ATkwn4=BVYM>?Vckyio<08PwD4WPdo6st?=9kYZgL<R9~o}Q_IjHsdT&2}
z6toFvR<Xt;({JbT5nirr2}-k=8$CSS`K*c5NwQeyds7h8m{FHx`L+Js;)dnOmkoAy
z>z1UX#DMXVOntGZYez-4PIuKd;y|48pTjLU-i}leA(z>TI5+DyrPPXRKbHBcWDQ!r
zxOdv;pISV3uK$RQMS*vT86>4Olxrk6_h>3lE_x9+%{PmN%%q?(>v3m5K~^h&!UnuY
zn<;Lj&{>N*+nsFY4I6%)f*sLaajykXKyBc|lVDFwoTIeQx8pOn&p3Z{4Lr8JCh6N`
zc=SqqC6E-i=~TcgyRL4a|0SKQWat=9?6;PXT5P<#Ul5vOQ<<c=XAjsc+jnxpzOeK+
zw|s#=sKC%Zp5P44z2h~|Tz#L&nHIU@;K^QX9JpOVK*gJ&BUG}~|82fQN#Vw87RI0E
zy^G!@n3RVcKDm<XuC>iUG!v3r9j1)dQ((r;4Cemyw$HL_bVJI!4#tJT-J~S0ORnN$
zPs0^tk@RS5_z8+6c#ZPP$a0<=KPPRRzu$aeo48@Ukn%?N3xQyYRiJ-m{e2RjM?|GN
zr5>yZgIo7+j8Khi8u)L7J=@g5T#)RpvYk21D50fN<lC__5mgk@#xmL%o~S914rxr5
zri<vaHFCfjKib8?n|!=mbUeM%=P+zCbZ>Ww5FG#fu3)qGCd<!UqMpSk2Nn^70%5&e
z8KumMR*yCgPMdM9+#UApBoTowxUe&U=yNBY)5hYd=Gn%koB=DDJX|9EK?cLy;WTaA
z8Cb$>Ua)wkv_Vy`Y5K!`M2UVF>ET`Tc8@3sBG6kzTadD|splXmFk1t=E_#<gT!)H8
zRl<iKdwgy^8dZ*EeSQa<srH%?S%b1uLE3gCj(e9)Nr)T$@{#aqBbNEGy>7xw_W>@K
z8!V5!9*`?YeM=Jv2T3H#l|Ms|D+%Yqd@!s7F$E#%@F^u%!(X|K;SDpb=omt$Em%D6
z-A2`!ak8LMYmSjb&*pJr;ZOM)gKY=*XD+79hgpXrUvsf^Pvp<OxWVovy2(|tM(~RI
zR&%qs{LhixxY=**pFdR4%)WbIb#xCUJgN2Zzo2qpCFS3U^S?ypmzeY)!WF`oA%Oop
zTmfL`zjXnq{8IP7fy)0}9QQwmEB*nY|FO*Me}T$>g;{X_|JHW`o&}f#uE?*&ow%HW
zAYjq0Ez})};KKcV^A`pBw*>v7Fn}O_L68yD69IE`{X^6*r~EgfuGU%pQY-q4Rp4?8
zf%K7Xp1`V9kQ%Ta*wY?j3;oMDU<KV}>-;woua-^zmiTM2ATFmc$V^8`fnNcFfc_=g
z#T^cH0agt|kbud5%m3|}|JsZHDF0Vb2$xd?r0s?TmMPo2dH!wtZv}3ie>+6<x4-@k
z@nXL^JuasxNC^h4poZCaA)$7EEBF`Zi2WI9`%TOw{{1Lbg_&vU><a>bn2jW^mWi<L
z3@5h^Oq=f*ahJw(OqKbbK2Co>7UT6p-DyZWC=dnp{m7`VjG^DoZ5cUY^qT>4$7hS1
zi8r~MG?Ylo=DJ`R;Ve4^cgPPrjYl?_&zHPjyKiKj%G1t2o2h&};jW#1x)PaZ?I*)W
zWJ|*FYOoP+?T61J!5NUM5^=Z8=QUXY%aJ{3-}me~et-FAby-FXp>i>7*iT5JZ#%Ud
zS?nv$+}K^8Vw55lEHAzhv=`rQ^gfEs)2t3EyGrBu1nEd&*m*FgD_e2hkwlq<=scwE
z+zff#aj@<&f%<?_$Tz#=<%wHO5p8+$@=VI7qC%@cD<ib8jp^+1!^8RC@(v=|XwL7L
zQkFA?kJwFF2fm7w3nRMjYZ#&$SEGOaO19A8Q_&&E{?VsK=Mg&$uZEI6dk+?y&j~-N
zn%55o7F+{f!t9h5T7>I*&dH}xT#*90UvK32#Xgs;zmrQiF;)F}wEpWotG?E+Y9(oy
zW&0lQ-<>h2s;om7_7X*;ubmH?{utKYl@pine=1*Z4MEexs~c7E+Zw$lmWs2EBm0@+
z{br2D41uz?)pgkzw$P7lBA?9{OQ)KgvnfURYU}d-Tr#>a=sxc-GHE+cCT)w^<<A|@
zxPR~{OJPyX(dv8D5Xj4MhjyFq3&&?^yQ16&izWmIE`02{dG8iA*;A8Ar695Q<77@T
zG%<y3U0mSjpYB+ry|IohSDI{K4|_@T`0-~B+&BeR_s))IkKF7;To4Z{JZ#=Lze)U#
ztbO_v405(MJso~9z^e1*dTiJy7xdAVm7jhd3fU!-PSxD%;0kVM)O(Bksf1r2QhJXs
zF{5e?4aT1i)XP#b)Q>ptY&D^a&W(5YTyrZyk~&ltA4Uwf&tMLwrpMDs$W{faUK4pY
z?Dqz>fc-JbITJJWP-)=SHw$|QQfXn-E&RzAR21KF%$I7tnD0SR(gHSN@SieYZB&BI
zuPvfmn4v1%^7p`Vr2eudX|(jAptbHT>?tggU>#TKF}?RGfsAuoo`DVigTs{2ULGt`
zQ>)_;Z%NAcsiS#g^>3*=tdj_wdTPsG;J0PBV!EOYY=!AXF|wP<acOM)T!1Ing;~d^
z7ml!(_~1)$vRLt#tCHtCEDztwRQ!+uZ9?}UH@c1+=zyN&{9tS)*`fcGiT*2^hU1zi
z$l*<*YhVx;Gw0jwJv^Lr53>2?r?fX1*Eft-%ei9F58~_W-zh}AD4fb5_+tNPTj~yt
zNxf?oOO*u04=NT|JBeaEc{1)xhH5Rge7TR^Ygs3A3e=rxA$K*hW$QO5^@mEGof)Nf
z3+X>i3vSG%AYxRbnVv*yIg2~8Qa8RYhd=hMTforB%J1bup$!c<G)DswaNX~kvYe`4
zTQK`tz%&|i%y;I(T_5t?ML0O_(doB5t&b^QdFnp9hc7c^qps2A5#JM@x3*f|Gh*7b
zl{N)9a2P**=_b~SHVp&zG|hD1x9a-|>R~fVgGD2?!#)jLCNw0G_Tlg9Yn_-LwIxuU
zNI8Rth-x*?NfK*RD6I?L^cv6kgp*4guoGa|kiLB2$}|%c3woyd)FeL5I~UX+-qvj+
z_y8w8U@3AxR2Pp}{r=7m#^H<|HEWJLq79=F+D<oz&8^LjOhBVmrAfF21GjljReN_%
z`L)EgvP=n2B=9M=LsKX`??tZQ3B6v;6l>WROL)~WmPuay>K+j!>*uhwDZ!u?%y%b-
zQ16Z0WIBEZ5fPSRz<rgkdxvK;goN7j(7@6d+8h}M>u?qtq&X_VTFyvRH!VeTY6o?j
zYEIw=AJp>m({zR7O&e+d0`N`TjvMoW5PW5$kTxYe>u2Sow$LpN=dGyI$&h`YENYR@
zZ(_HZ&wu0|x8<X^%Dj-RWn(Ded$Jwz-iL{g5>oCkdxrzx&fQ^6F;w!^Y)Dn%0bX<0
zo6SYuSN>2$?>S56tgEXBLpcoJw|mNK@|&<|6j3?_?CWSK9Uq$>9u?L-$83E=`>x|k
zXK_nq-upCUxqRyG!%4}v-llAw#*&7#H%w(vEwQVFOC4gSVUA+Gqyc{l!ja)(Xs8=b
zj{2gqo!2!*FKeZCz`Ty^GJ1<*`OMFJ1A<aF6Aa^xoi)O1Xfq6=gOFKui^K2ol65Ny
z+p482Qq||OGR1T3<D~}Yu1xkXe|a(bQrq$NG~s8B)lfxz!!cO~d-H8<7FEZ_6l@GE
zq8?Vh0U6uO;O<?ANV-{t8*r5-v^bY#!ek=+6D@;rmgf^Mv9EbAW8Pu3(D+s!F(6ip
z&s_q(rB&@h5J?c{BKdglq20`g8mCvG>z$t;g7Mi_YVfT@3IyMxvt`70h1jAyB`|@G
z-o$v%L{LmO<ddtvWWJU?bvvjlLOpwA7b#K8OjyYAF48(UfavJm{nKg($juz*4TqJK
z)3Z4GS54qJTZ(S=UQ1$ujgSz*l0iP%8n$+D*1RMWhE&w;^kXL`y$1GYmFbUd!Frx2
z8fU2SFCVkvl&}yqhG{T`GX_<DHRnD3sWKRqk&c}d<6mr(64lZDd4uqEp4eb=Fxrb$
z;`r)((;G1nvPa^yl^zaKE^ly)$QFAv>W&WP=jUSYS}QSSE}q=8Q8t!Q2(?6RZHkXd
zVh8IU&Xw8(*$Gsst0sx-VPAhw&1x0=O%<;+jPBtZ2URl`qVIfBoq4mxcUd(oxGAxS
z6;)n~sjJ{_ruFQQs@g>FiP~FD)P{9`C$dlbnyh4&JYWFbTx}81qY5M%gELbb=`vFv
zkInZTnT<vA<Ur_zN65tQXh$-2YkNFHd!>|LqF1zNCG_T0@EIP#gZB!x-2Mua6Mb=k
zBy5CK4wLoO!@N<pow$izlyY)x=K^RITyz9n*~RGt8Xj&XA1kJ)CD$!ZTq4aksg17x
zC>~C4^2>S0+SpP&hnM}>$GrwU4x4`OP0SB<sHUMuZ%U=;m#$&k%=|c3nOE<U$+mOZ
z{56^gVuu>?iF<4zq**lM{Ex@$eq?NRim5eW-yTfgDSA^u$V2tIbUi;H>ulDNCsg``
zzL^D@+~xaeXQ}n{!Lv^5JnR=fb-hY6c%N8(r6&1gwP$oU;Ji6EHqcaoeF_!^j?5-d
z#n;LWHs7*hQ!L(guG&a6^7ik=UnlA3>{+F9Ka1vIENp0JQ(z+#(lv6vFNrA!M&-zI
zM%U>(-I4F(+?UnlVmapzfoLr5B~uUB5Fgw%LrnAJsh@7|JnwE;6JMf9KOh8?PA@d>
z&djELK>0DHH3sjR3@AT~RvLk}nM-1ito@9?12$3Zmh!VnRj-sMaCjN+ESi=Pb3E3E
zLCT~ewH_RdyWY5xr|puK_vx+wvA{z&CiSg;8fL2F&JKTO{5aLvKqy+><e5#R6_t5S
zn52e_yG!csYQyr|8w`X;a1TQN>f!Mzt4ekkU&9d(oKVbb@xCs(V%jSm>8xV7{)22i
zpK^>Ur2@<y__K3vH}zM+L@lu?In@ZHBen0^>T}1Rc4AYfJYn%cdoO;93>znVq>3Hg
zOC~sy=?`N!Jgv!)lQLT<A>fmYH(yYgLT_g_j#$njFYl`_u`02)n@t;t|3GqdLPuIQ
zVEkNc6?qfWh)T<KZsJ3iB2mY+k+G5Mid7BDqd^wRUL(VEAvA_!i+U%HJ{oseP#s)n
z5M5^dq%Owr`X_@fEY@BSmX25st#_^-A6{1_)K+T|F0NWF5&LX0Z3^TZwxH<6=Z<s|
z72GkJKlMKNR@>gUbuy>pBRl-e_W<1`W^*r>(AL%1hiNN_QJG%u4TRJ)@f?h{jkNK?
z<-A3`UPmquqG>RRKfl&>Fg*isjep$SDE23R{4crcB~1SZt}1r9MD0K4szU#?9qLl|
z|IAe{)%|M(>EDSI{dc_eKZ_uV{b~1=kCa`3BOG9^4!B??n7us|*w1;f@6#M$dteus
zs}}<74)uh&+1Ub&4Y+9scJYEEVeW80Fa!Y<z!1(Z5Tqm64(1KBg8~<bi<_(6Wsy7F
z3vngk^6y0!Yzy<Wg+u>HUOjMC{Fmgb>@ShOGH%FUFWb7g`*|Y4!0t^~8(_N`6bb%y
zfqHnsydelb7k8u^5@rj&%EK?}1NMa4!=b(iFB=5#{O=4<v5PDmia-F*Lf~Kr;0TM0
zN?vB%J)yQxJLs=l_HJ;vo6oNd0_FmPLp;F{cXv-W-`|e|7J2^C02E(l{?Op!W$XA`
z!DafleDE*PV2G!un-3gnkGx8G!W<lbCB59QitOBcT(43#Zh+4(UvPEvL^?u&wz$dy
z`*we2e;IjY-Q}ZpHh{Z+iw81)XaI75DY1e7tp_OoCsjVse^7Oqy{Il=20-TlC%SvW
zT%cf>t3AvWhV%nNugE?aFdga%2DmiD)d3FuD*<!$2D?HXF24mJ;1Gl(*ahN_K)Qjw
zTw&gTKLLSIUx0%Hy$=SY;cgBvTL>K3WekP_?d*5aetPzv5L={|Cs^%w0yvoj=m1cp
zoBKuQ1v-iy%+~9&!GON!2$lycfEB??U}dlh_!d|dtOiyGYk)PuT3~Ik4)``$7pw=?
z2OEG5!A4+XunE`{Y<6Len=4Q&pnD*IO~)6R%c9FZ``avttKILU8&Fp-PgevI1_wL<
zIQ_5OmDlazZf>5JH!f3Gg|<)_92kk0#lO;5H*A0!UWvI({c1Q@hl`vu6nWLEe`SBY
z19VUuh$qm3e|H)0tK_d%^#<<SI$wF+=c4O70u5>BWebcD2;ecm-N0D;RpJV<fkQ6q
z@8AjCxU4(E;~yLXWUsnE!sDvbA-t~o|6loEj<bU}H~@X&>T+S9I|TUX<Ou<q#un~n
z^Lr8j1Kk=J=m>WRFtPkKIDn1Tl3;$I1K2_BfsMw3|JNodkf6BW--^YBe@#dKZ_@fT
zX<fDxa8i)?Zzuh^D;GFE=<jd4S0Bg(LE_><zfTwv0*Q)?{mux3M1}vl0~{p)tcbt-
z_dlOKq;H6iJ>jMKR!7~dZ-{3^^u2JMp8GyQf{4g%f%n1-dWK&7>RP;S`-c!}ylVA&
z0(EsF2JXCC#sVUS0s@UfLe)Z|D|&|RgW#vU?=**ivnbpr(7A++-TSrp-f3$0>u&=u
zs}5jfCFeXWAkN9l$u!d|Em?S&T0o3Gs#{K+W|m`Ske-#_19g0wl#*$YpOlvMD6^-C
z$TQi&((GlZq>@BY9SXZ)x|sn!GA$HyI5`Vfj-z;K<%<D#VPUbyx8pBp8|oO%;j{?~
z;<FVZ96ew`Dmxj8G=U{DgfN2p3HNsdp1Ou!EP24LzFzG2;Rg{V3iK=4St+JjvHWYM
zDGyUK*B@p)N-cZr?3nLh4aZIH<@N3>SJlGl<y9NJt*X(_rHv6=vRw378iP8(I`ZqS
z<|8HaaA(EO6udM^4>OHP^-Pn>tb=b8EYq<ZcHHKESAIK6Y*<)80G%6MftrVMKxndc
z=_IYc6^X&BFMPi{Kfha@-x?S7yCy{$0fo@6o*oam9#1g;#AII|_?nY03Z_<tf>{s=
z6{lIqTeX`w)ODrTK_2{?i4u}uj1AUkO}W3CWnCi>6A{rD5ErjdHl)tVFg3|q{g!X-
zWNmE?LEka;gz4%!S>i}wTtmZoqtl0u33qZ*b`qK#aQ^vrg3olUvA&`laVG#m@l1AQ
zh|JmQquov7ngUmEdv%xUZ>@5#f23TmqLiJU_A!w;&JQ<!zGtwQ&zI5`bi>1n5~pfv
zy58EfI~;TD)L%UGHBWX+@y^+p(Y-HzF9hIkawcw#8Xd<n4y_tYH2qX=Izu07O{3D*
zjXel#_lY-G<#9|&Pf7bZK(X#a^SYa7OSYZ&9CqvU$LEH(TZ13J@8A=Iz1Q^GAKLfi
z{<yv~Lst2r?s+U#JzAb5W9#=%x=l*&n68~aIvt^Kvy)DnHoVD*V`;Qg|4h62=%^Ic
zdq;zz>Mm7K#m>soG<Xg9WAlC&Uk}r_1MwGIse8$iJ0B!IH@?SlX@{|V!#s$~6uU9Q
z>?gIR_VvwP#KyT*wFY0saSgBF-NAjX=lpJExEWgwr;OYm&7ZBbxu`51UvD8oSk3S`
zal-F?DMp+R{mcn+_|O~|pwSqV-N0I~RsZsEtdZTJ@{YUJ_ZQ3=A-6nTwXD~@D=p8J
z8t=i|u<|~x$(mX7`^Ki4I%b?+<4<6H)gzHuTJ^z4Oeg3`hG^>DkKu;m4dmMr0^;kL
zd-v|$KX#56`PLL^ywgH2diLY-e%w}9ll^#J8q*A;QR`#9*BJ*cd(l?%`D-Th#jQG9
zPR7#aU7SO8=RK>fUR%6Ht`G4%IJa7=_ueGN%U3Q_-0wT#>2GPI7WTKyX0aOIKR!>M
zO4X5y2s$-9R;yBFyB*=ISKKzVF~k>InoqcR)I90O;dkTk0|%mhXE$1*FM9RdcFNce
zsnm1t=CPD@WO9AkP8*X?vAIUuwpDd+utJBZQ%AJ`NRmUe<|*4v@fyx-JQS;G-{OBE
z9+!Jtexv#;P<;`H`r`zUi$XzM@NcLMx`g2X5mAA8B9MxX5KrKU6kzrPP~2Y`W0)P%
z5djkWwZ!crf^r$$0-Gvpn%p+#QPk2m6%+tO0GgDb2wP7W0DC<_0F}5niRJH2K$iu-
zjgv=Qz<7|jDDa{p#9bA7fiXd162buZ2V`8_;}et=xss|4aRCm9`6qR1NC+HeD}Mn}
zfJcER^^s5)Baj4OzR53fBEmpbFU(T7JTQh2U?_Z&KukqYP*4n*^DoZg0M3qcK!AX^
zFRFWSoCD`C5YKh>CUC|a6gau#pQ>`{i;F0lxu}2uaOlIYXp`8#t{nXB=>LM#`~@Ar
zc5X<B?M0oz_AszL9DIo$FMyq+m#YKB)5`@8@j`+%ukaE4>f+|$<_dKNdjcp8IIZIM
zFC9=du#G3w8+rvsfU_(9z9RxQvxRs9QvI(kSAPNB7qkO*VZ=qk32bi<{0Ho?hXdz^
z{Pnu90Kgd_fM5@=i#XRMIJ`ha0HuNY+QK0&S2r#|l&hDE4FFDI4z7Su_yyVp(AQ=4
zATAd;>;jx#O1XlE0Oo?aUINF<)GtW+i_(Fw7@C`l3k2-w=k5q~y$}YlK=73t{Gpz1
zV89b#q>mdI>F5cC0xSkV;PwD!2P0s<U<BX{SMX)`0=vS12W{QpfDaH*7uZ$mqS6-(
z4ZxDWSQlU%KrJr(4?+GPLG(Yt@ZZ4pSLyHi*}`BKgwo3efUv+(LI_(o&tJ~H0?}X_
zur1gQ4Ackg0Coh!z)oOiFdXaxb_KhE-N7DUPcQ<E1bczK!9HMLupihTX!*au`32;@
z$o-)WAOLny0JQ!Ei-5EQSlP?-|7-2~dfPT)_`5%aUJAs(Qc0&jw)Uo_E@o`2gUkWi
zy~(lM2z9K&u7`P_em|0Tr#qd26>W_GiD;6?<B=zlIx)Q;$zr94HQvD6VUj)U%q}p)
z+Wo%!eCkd*cy?lFnH6QZv{YZ7`!lcjpJHzx&mE4A!)tk#s|DHwIl*i`v!n3$EC_5$
zreV_S!<N#s_Jx1(w2;LiHh*c3pW@SK93ky7^TL^VQN<s&9Wrc8fw}})JEuTi44QLK
zHe0eqN{8a7^O33&m-qsk+8p~&$o!#clZP3Gl&xI!xX0}drpA^>Q%4R;eugy6Ng%ah
zAKQ<VUvg=qc_L5UwlK+-eL%_+uyW32I?C$E+U-^IgeSu&i8v{YI10^xd7_4Z@f-3M
zONB00FU<)lDS3vKPm6vauQ-C3NS>ShPKMOLlXVkTR4uzhEppV35!A9mwbf2dn^s&e
z;yWYKLWmbayl{vYmTD<DOTk$>I7^GOKrP~(47U>Wm7uR2^p!@iR5o#1$)55kq>n=S
z=#V~Inh)~j`nf+IV&sr7*wi-rQ%u<-cAiGQZX8+^7T|v|@>#JuB$A|t20M~b16G@e
zTOn>8Vl17e>IA0~9KD6Gvvn4yg&I($py7H!?*+YgSfVn;2$o7$xq4B;SqRTUcy<WS
zmf|8`!o{J4ivfGtmU~Y8>jDyfH5d)nY?yI}{T#Eu{iWY;op6HR?=|7X_px@l{s4j3
zC_9~QB|dRjb04Yi6j)e~G{McPehA}-9(L@A?7p4wjVk`N-T&R92G-oJHv0~DR)>8U
zF8yD$8W@LV@37?_fWAKH&Gr*@7d#`dM1=TTw?$={Bh*O0jILk3&}Y4*jP9Cp>AwFh
z%2=Jh(8dYI96m>Aen%lMhtCl@zww0`a|nd_8!KOj+d;$!eLz`9d^`w$-Ag}19+fT;
z)J26%-{ItGkU16l-4t-r0sB=>64A!*sA-v&)M4w^NY=tuPYrA(uD>oIw@sU9wt=+X
z5k6t7pL*0$uGMu-TBBEyb{vf7-oh~3oD7%MnQM#DY&EB0stoYgf=|W?vDVT^CJ{qV
zRj<6(IwejK3j_wT^0%pDnx*Sel-9rshwh84W7CZhC*!zn!h->1Q*?Nwc^N=HjrgRZ
z1>k5JKMs08V)Hwm3{V5ifxG^zn0i+2o>>&HU>U(;P!f)h4K*{QXW)yzj-+7+C)_yi
z8l-4F9_O(4A(@axF<uiH+ZH|WAY|!q)j!b68+|fQt-%F&y?FonEgrLMngi<h(ari9
zksE$KZ+GkV{ZHoPVU)c_L|Ig5Af6^0x1%Py0}ive=V~(X5qg6j#WfyVAbq|0t4CMT
zyZ(sh3H(f8iWYxtTsL%(FfxY->-*(2!dg?+n#dpvQb)c-2Jr?@aiW_yKdU6klTsxL
zO)-<imFgcn1DYh!ESn_eQy|wa@QT+i(lB%lgc;K(Iv5yuVO4w>uCOdVEK3tuu@A!)
z?3H0=45bRRs1%qT1DA&1fS1$lhG`u<v|$z4fR}~xsdOI9BN^r~&?;)!;g6v*D~lah
zL74#_%C`Y*mcVgF{N{8zcm%6v!MKpm&<^sY!MM;L`fHB(m1Ur-v@GX=p39O73cf65
zkp%oQbxl5Wt`yoqzNElb;`8IH5nfu+b7&7Qttx8!;=|?zbz<>h)r=h0hoOb6(Uo1J
zYEJhd4-bu4(UNrptFFkRD)i9x@sP4o4^EF?nxwSl`Y=^dfat@rlwvRs21}!xg$GOF
zZvnq7v|X6wJj4Su;HT(uyEg`~;LScP&9cA;0mFH4A5Wo$>KF#RE3)K)jVtJHtosz*
zazlOv6%*-W_Z+2Wbo2LMDLU&0`YUIFf23uBcD!TS()yOMm$aPIt;HKdC262jz>2ZV
zN|k{uDx(&>BYs(qKC}Uh-XHMC&GJBIG*)(Xin0R=ghw1jNByz;x21hNe&~BtHo=WT
gkmX_5Q)61WTjHNnAfx6zU!gB@HF@=Fv3fiC57$Vn5C8xG

diff --git a/branches/bug1734/doc/storage.tex b/branches/bug1734/doc/storage.tex
deleted file mode 100644
index c48cb52b..00000000
--- a/branches/bug1734/doc/storage.tex
+++ /dev/null
@@ -1,425 +0,0 @@
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%
-% Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-% All Rights Reserved.
-%
-% This software is subject to the provisions of the Zope Public License,
-% Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-% THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-% WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-% WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-% FOR A PARTICULAR PURPOSE.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\documentclass{howto}
-
-\title{ZODB Storage API}
-
-\release{1.00}
-
-\author{Zope Corporation}
-\authoraddress{
-    Lafayette Technology Center\\
-    513 Prince Edward Street\\
-    Fredericksburg, VA 22401\\
-    \url{http://www.zope.com/}
-}
-
-\begin{document}
-\maketitle
-
-\begin{abstract}
-\noindent
-A ZODB storage provides the low-level storage for ZODB transactions.
-Examples include FileStorage, OracleStorage, and bsddb3Storage.  The
-storage API handles storing and retrieving individual objects in a
-transaction-specifc way.  It also handles operations like pack and
-undo.  This document describes the interface implemented by storages.
-\end{abstract}
-
-\tableofcontents
-
-
-\section{Concepts}
-
-\subsection{Versions}
-
-Versions provide support for long-running transactions.  They extend
-transaction semantics, such as atomicity and serializability, to
-computation that involves many basic transactions, spread over long
-periods of time, which may be minutes, or years.
-
-Versions were motivated by a common problem in website management,
-but may be useful in other domains as well.  Often, a website must be
-changed in such a way that changes, which may require many operations
-over a period of time, must not be visible until completed and
-approved.  Typically this problem is solved through the use of
-\dfn{staging servers}.  Essentially, two copies of a website are
-maintained.  Work is performed on a staging server.  When work is
-completed, the entire site is copied from the staging server to the
-production server.  This process is too resource intensive and too
-monolithic.  It is not uncommon for separate unrelated changes to be
-made to a website and these changes will need to be copied to the
-production server independently.  This requires an unreasonable amount
-of coordination, or multiple staging servers.
-
-ZODB addresses this problem through long-running transactions, called
-\dfn{versions}.  Changes made to a website can be made to a version
-(of the website).  The author sees the version of the site that
-reflects the changes, but people working outside of the version cannot
-see the changes.  When the changes are completed and approved, they
-can be saved, making them visible to others, almost instantaneously.
-
-Versions require support from storage managers. Version support is an
-optional feature of storage managers and support in a particular
-database will depend on support in the underlying storage manager.
-
-
-\section{Storage Interface}
-
-General issues:
-
-The objects are stored as Python pickles.  The pickle format is
-important, because various parts of ZODB depend on it, e.g. pack.
-
-Conflict resolution
-
-Various versions of the interface.
-
-Concurrency and transactions.
-
-The various exceptions that can be raised.
-
-An object that implements the \class{Storage} interface must support
-the following methods:
-
-\begin{methoddesc}{tpc_begin}{transaction\optional{, tid\optional{,
-        status}}}
-  Begin the two-phase commit for \var{transaction}.
-
-  This method blocks until the storage is in the not committing state,
-  and then places the storage in the committing state. If the storage
-  is in the committing state and the given transaction is the
-  transaction that is already being committed, then the call does not
-  block and returns immediately without any effect.
-
-  The optional \var{tid} argument specifies the timestamp to be used
-  for the transaction ID and the new object serial numbers.  If it is
-  not specified, the implementation chooses the timestamp.
-
-  The optional \var{status} argument, which has a default value of
-  \code{'~'}, has something to do with copying transactions.
-\end{methoddesc}
-
-\begin{methoddesc}{store}{oid, serial, data, version, transaction}
-  Store \var{data}, a Python pickle, for the object ID identified by
-  \var{oid}.  A Storage need not and often will not write data
-  immediately.  If data are written, then the storage should be
-  prepared to undo the write if a transaction is aborted.
-
-  The value of \var{serial} is opaque; it should be the value returned
-  by the \method{load()} call that read the object.  \var{version} is
-  a string that identifies the version or the empty string.
-  \var{transaction}, an instance of
-  \class{ZODB.Transaction.Transaction}, is the current transaction.
-  The current transaction is the transaction passed to the most recent
-  \method{tpc_begin()} call.
-
-  There are several possible return values, depending in part on
-  whether the storage writes the data immediately.  The return value
-  will be one of:
-
-  \begin{itemize}
-        \item \code{None}, indicating the data has not been stored yet
-        \item a string, containing the new serial number for the
-          object
-        \item a sequence of object ID, serial number pairs, containing the
-          new serial numbers for objects updated by earlier
-          \method{store()} calls that are part of this transaction.
-          If the serial number is not a string, it is an exception
-          object that should be raised by the caller.
-          \note{This explanation is confusing; how to tell the
-          sequence of pairs from the exception?  Barry, Jeremy, please
-          clarify here.}
-  \end{itemize}
-
-  Several different exceptions can be raised when an error occurs.
-
-  \begin{itemize}
-        \item \exception{ConflictError} is raised when \var{serial}
-          does not match the most recent serial number for object
-          \var{oid}.
-
-        \item \exception{VersionLockError} is raised when object
-          \var{oid} is locked in a version and the \var{version}
-          argument contains a different version name or is empty.
-
-        \item \exception{StorageTransactionError} is raised when
-          \var{transaction} does not match the current transaction.
-
-        \item \exception{StorageError} or, more often, a subclass of
-          it, is raised when an internal error occurs while the
-          storage is handling the \method{store()} call.
-  \end{itemize}
-\end{methoddesc}
-
-\begin{methoddesc}{restore}{oid, serial, data, version, transaction}
-  A lot like \method{store()} but without all the consistency checks.
-  This should only be used when we \emph{know} the data is good, hence
-  the method name.  While the signature looks like \method{store()},
-  there are some differences:
-
-  \begin{itemize}
-        \item \var{serial} is the serial number of this revision, not
-          of the previous revision.  It is used instead of
-          \code{self._serial}, which is ignored.
-
-        \item Nothing is returned.
-
-        \item \var{data} can be \code{None}, which indicates a George
-          Bailey object (one who's creation has been transactionally
-          undone).
-  \end{itemize}
-\end{methoddesc}
-
-\begin{methoddesc}{new_oid}{}
-  XXX
-\end{methoddesc}
-
-\begin{methoddesc}{tpc_vote}{transaction}
-  XXX
-\end{methoddesc}
-
-\begin{methoddesc}{tpc_finish}{transaction, func}
-  Finish the transaction, making any transaction changes
-  permanent.  Changes must be made permanent at this point.
-
-  If \var{transaction} is not the current transaction, nothing
-  happens.
-
-  \var{func} is called with no arguments while the storage lock is
-  held, but possibly before the updated date is made durable.  This
-  argument exists to support the \class{Connection} object's
-  invalidation protocol.
-\end{methoddesc}
-
-\begin{methoddesc}{abortVersion}{version, transaction}
-  Clear any changes made by the given version.  \var{version} is the
-  version to be aborted; it may not be the empty string.
-  \var{transaction} is the current transaction.
-
-  This method is state dependent. It is an error to call this method
-  if the storage is not committing, or if the given transaction is not
-  the transaction given in the most recent \method{tpc_begin()}.
-
-  If undo is not supported, then version data may be simply
-  discarded.  If undo is supported, however, then the
-  \method{abortVersion()} operation must be undoable, which implies
-  that version data must be retained.  Use the \method{supportsUndo()}
-  method to determine if the storage supports the undo operation.
-\end{methoddesc}
-
-\begin{methoddesc}{commitVersion}{source, destination, transaction}
-  Store changes made in the \var{source} version into the
-  \var{destination} version.  A \exception{VersionCommitError} is
-  raised if the \var{source} and \var{destination} are equal or if
-  \var{source} is an empty string.  The \var{destination} may be an
-  empty string, in which case the data are saved to non-version
-  storage.
-
-  This method is state dependent.  It is an error to call this method
-  if the storage is not committing, or if the given transaction is not
-  the transaction given in the most recent \method{tpc_begin()}.
-
-  If the storage doesn't support undo, then the old version data may
-  be discarded.  If undo is supported, then this operation must be
-  undoable and old transaction data may not be discarded.  Use the
-  \method{supportsUndo()} method to determine if the storage supports
-  the undo operation.
-\end{methoddesc}
-
-\begin{methoddesc}{close}{}
-  Finalize the storage, releasing any external resources.  The storage
-  should not be used after this method is called.
-\end{methoddesc}
-
-\begin{methoddesc}{lastSerial}{oid}
-  Returns the serial number for the last committed transaction for the
-  object identified by \var{oid}.  If there is no serial number for
-  \var{oid} --- which can only occur if it represents a new object ---
-  returns \code{None}.
-  \note{This is not defined for \class{ZODB.BaseStorage}.}
-\end{methoddesc}
-
-\begin{methoddesc}{lastTransaction}{}
-  Return transaction ID for last committed transaction.
-  \note{This is not defined for \class{ZODB.BaseStorage}.}
-\end{methoddesc}
-
-\begin{methoddesc}{getName}{}
-  Returns the name of the store.  The format and interpretation of
-  this name is storage dependent.  It could be a file name, a database
-  name, etc.
-\end{methoddesc}
-
-\begin{methoddesc}{getSize}{}
-  An approximate size of the database, in bytes.
-\end{methoddesc}
-
-\begin{methoddesc}{getSerial}{oid}
-  Return the serial number of the most recent version of the object
-  identified by \var{oid}.
-\end{methoddesc}
-
-\begin{methoddesc}{load}{oid, version}
-  Returns the pickle data and serial number for the object identified
-  by \var{oid} in the version \var{version}.
-\end{methoddesc}
-
-\begin{methoddesc}{loadSerial}{oid, serial}
-  Load a historical version of the object identified by \var{oid}
-  having serial number \var{serial}.
-\end{methoddesc}
-
-\begin{methoddesc}{modifiedInVersion}{oid}
-  Returns the version that the object with identifier \var{oid} was
-  modified in, or an empty string if the object was not modified in a
-  version.
-\end{methoddesc}
-
-\begin{methoddesc}{isReadOnly}{}
-  Returns true if the storage is read-only, otherwise returns false.
-\end{methoddesc}
-
-\begin{methoddesc}{supportsTransactionalUndo}{}
-  Returns true if the storage implementation supports transactional
-  undo, or false if it does not.
-  \note{This is not defined for \class{ZODB.BaseStorage}.}
-\end{methoddesc}
-
-\begin{methoddesc}{supportsUndo}{}
-  Returns true if the storage implementation supports undo, or false
-  if it does not.
-\end{methoddesc}
-
-\begin{methoddesc}{supportsVersions}{}
-  Returns true if the storage implementation supports versions, or
-  false if it does not.
-\end{methoddesc}
-
-\begin{methoddesc}{transactionalUndo}{transaction_id, transaction}
-  Undo a transaction specified by \var{transaction_id}.  This may need
-  to do conflict resolution.
-  \note{This is not defined for \class{ZODB.BaseStorage}.}
-\end{methoddesc}
-
-\begin{methoddesc}{undo}{transaction_id}
-   Undo the transaction corresponding to the transaction ID given by
-   \var{transaction_id}.  If the transaction cannot be undone, then
-   \exception{UndoError} is raised.  On success, returns a sequence of
-   object IDs that were affected.
-\end{methoddesc}
-
-\begin{methoddesc}{undoInfo}{XXX}
-  XXX
-\end{methoddesc}
-
-\begin{methoddesc}{undoLog}{\optional{first\optional{,
-                            last\optional{, filter}}}}
-  Returns a sequence of \class{TransactionDescription} objects for
-  undoable transactions.  \var{first} gives the index of the first
-  transaction to be retured, with \code{0} (the default) being the
-  most recent.
-
-  \note{\var{last} is confusing; can Barry or Jeremy try to explain
-  this?}
-
-  If \var{filter} is provided and not \code{None}, it must be a
-  function which accepts a \class{TransactionDescription} object as a
-  parameter and returns true if the entry should be reported.  If
-  omitted or \code{None}, all entries are reported.
-\end{methoddesc}
-
-\begin{methoddesc}{versionEmpty}{version}
-  Return true if there are no transactions for the specified version.
-\end{methoddesc}
-
-\begin{methoddesc}{versions}{\optional{max}}
-  Return a sequence of the versions stored in the storage.  If
-  \var{max} is given, the implementation may choose not to return more
-  than \var{max} version names.
-\end{methoddesc}
-
-\begin{methoddesc}{history}{oid\optional{, version\optional{,
-                            size\optional{, filter}}}}
-  Return a sequence of \class{HistoryEntry} objects.  The information
-  provides a log of the changes made to the object.  Data are reported
-  in reverse chronological order.  If \var{version} is given, history
-  information is given with respect to the specified version, or only
-  the non-versioned changes if the empty string is given.  By default,
-  all changes are reported.  The number of history entries reported is
-  constrained by \var{size}, which defaults to \code{1}.  If
-  \var{filter} is provided and not \code{None}, it must be a function
-  which accepts a \class{HistoryEntry} object as a parameter and
-  returns true if the entry should be reported.  If omitted or
-  \code{None}, all entries are reported.
-\end{methoddesc}
-
-\begin{methoddesc}{pack}{t, referencesf}
-  Remove transactions from the database that are no longer needed to
-  maintain the current state of the database contents.  Undo will not
-  be restore objects to states from before the most recent call to
-  \method{pack()}.
-\end{methoddesc}
-
-\begin{methoddesc}{copyTransactionsFrom}{other\optional{, verbose}}
-  Copy transactions from another storage, given by \var{other}.  This
-  is typically used when converting a database from one storage
-  implementation to another.  This will use \method{restore()} if
-  available, but will use \method{store()} if \method{restore()} is
-  not available.  When \method{store()} is needed, this may fail with
-  \exception{ConflictError} or \exception{VersionLockError}.
-\end{methoddesc}
-
-\begin{methoddesc}{iterator}{\optional{start\optional{, stop}}}
-  Return a iterable object which produces all the transactions from a
-  range.  If \var{start} is given and not \code{None}, transactions
-  which occurred before the identified transaction are ignored.  If
-  \var{stop} is given and not \code{None}, transactions which occurred
-  after the identified transaction are ignored; the specific
-  transaction identified by \var{stop} will be included in the series
-  of transactions produced by the iterator.
-  \note{This is not defined for \class{ZODB.BaseStorage}.}
-\end{methoddesc}
-
-\begin{methoddesc}{registerDB}{db, limit}
-  Register a database \var{db} for distributed storage invalidation
-  messages.  The maximum number of objects to invalidate is given by
-  \var{limit}.  If more objects need to be invalidated than this
-  limit, then all objects are invalidated.  This argument may be
-  \code{None}, in which case no limit is set.  Non-distributed
-  storages should treat this is a null operation.  Storages should
-  work correctly even if this method is not called.
-\end{methoddesc}
-
-
-\section{ZODB.BaseStorage Implementation}
-
-\section{Notes for Storage Implementors}
-
-
-\section{Distributed Storage Interface}
-
-Distributed storages support use with multiple application processes.
-
-Distributed storages have a storage instance per application and some
-sort of central storage server that manages data on behalf of the
-individual storage instances.
-
-When a process changes an object, the object must be invaidated in all
-other processes using the storage.  The central storage sends a
-notification message to the other storage instances, which, in turn,
-send invalidation messages to their respective databases.
-
-\end{document}
diff --git a/branches/bug1734/doc/zdctl.txt b/branches/bug1734/doc/zdctl.txt
deleted file mode 100644
index f7f1cdd4..00000000
--- a/branches/bug1734/doc/zdctl.txt
+++ /dev/null
@@ -1,335 +0,0 @@
-Using zdctl and zdrun to manage server processes
-================================================
-
-
-Summary
--------
-
-Starting with Zope 2.7 and ZODB 3.2, Zope has a new way to configure
-and control server processes.  This file documents the new approach to
-server process management; the new approach to configuration is
-documented elsewhere, although some examples will be given here.  We
-use the ZEO server as a running example, although this isn't a
-complete manual for configuring or running ZEO.
-
-This documentation applies to Unix/Linux systems; zdctl and zdrun do
-not work on Windows.
-
-
-Prerequisites
--------------
-
-This document assumes that you have installed the ZODB3 software
-(version 3.2 or higher) using a variation on the following command,
-given from the root directory of the ZODB3 distribution::
-
-  $ python setup.py install
-
-This installs the packages ZConfig, ZEO, zdaemon, zLOG, ZODB and
-various other needed packages and extension modules in the Python
-interpreter's site-packages directory, and installs scripts including
-zdctl.py, zdrun.py, runzeo.py and mkzeoinst.py in /usr/local/bin
-(actually the bin directory from which the python interpreter was
-loaded).
-
-When you receive ZODB as a part of Zope (version 2.7 or higher), the
-installation instructions will explain how to reach a similar state.
-
-
-Introduction
-------------
-
-The most basic way to run a ZEO server is using the following
-command::
-
-  $ runzeo.py -a 9999 -f Data.fs
-
-Here 9999 is the ZEO port (you can pick your own unused TCP port
-number in the range 1024 through 65535, inclusive); Data.fs is the
-storage file.  Again, you can pick any filename you want; the
-ZODB.FileStorage module code creates this file and various other files
-with additional extensions, like Data.fs.index, Data.fs.lock, and
-Data.fs.tmp.
-
-If something's wrong, for example if you picked a bad port number or
-filename, you'll get an error message or an exception right away and
-runzeo.py will exit with a non-zero exit status.  The exit status is 2
-for command line syntax errors, 1 for other errors.
-
-If all's well, runzeo.py will emit a few logging messages to stderr
-and start serving, until you hit ^C.  For example::
-
-  $ runzeo.py -a 9999 -f Data.fs
-  ------
-  2003-01-24T11:49:27 INFO(0) RUNSVR opening storage '1' using FileStorage
-  ------
-  2003-01-24T11:49:27 INFO(0) ZSS:23531 StorageServer created RW with
-  storages: 1:RW:Data.fs
-  ------
-  2003-01-24T11:49:27 INFO(0) zrpc:23531 listening on ('', 9999)
-
-At this point you can hit ^C to stop it; runzeo.py will catch the
-interrupt signal, emit a few more log messages and exit::
-
-  ^C
-  ------
-  2003-01-24T12:11:15 INFO(0) RUNSVR terminated by SIGINT
-  ------
-  2003-01-24T12:11:15 INFO(0) RUNSVR closing storage '1'
-  $ 
-
-This may be fine for testing, but a bad idea for running a ZEO server
-in a production environment.  In production, you want the ZEO server
-to be run as a daemon process, you want the log output to go to a
-file, you want the ZEO server to be started when the system is
-rebooted, and (usually) you want the ZEO server to be automatically
-restarted when it crashes.  You should also have a log rotation policy
-in place so that your disk doesn't fill up with log messages.
-
-The zdctl/zdrun combo can take care of running a server as a daemon
-process and restarting it when it crashes.  It can also be used to
-start it when the system is rebooted.  Sending log output to a file is
-done by adjusting the ZEO server configuration.  There are many fine
-existing tools to rotate log files, so we don't provide this
-functionality; zdctl has a command to send the server process a
-SIGUSR2 signal to tell it to reopen its log file after log rotation
-has taken place (the ZEO server has a signal handler that catches
-SIGUSR2 for this purpose).
-
-In addition, zdctl lets a system administrator or developer control
-the server process.  This is useful to deal with typical problems like
-restarting a hanging server or adjusting a server's configuration.
-
-The zdctl program can be used in two ways: in one-shot mode it
-executes a single command (such as "start", "stop" or "restart"); in
-interactive mode it acts much like a typical Unix shell or the Python
-interpreter, printing a prompt to standard output and reading commands
-from standard input.  It currently cannot be used to read commands
-from a file; if you need to script it, you can use a shell script
-containing repeated one-shot invocations.
-
-zdctl can be configured using command line options or a configuration
-file.  In practice, you'll want to use a configuration file; but first
-we'll show some examples using command line options only.  Here's a
-one-shot zdctl command to start the ZEO server::
-
-  $ zdctl.py -p "runzeo.py -a 9999 -f Data.fs" start
-
-The -p option specifies the server program; it is the runzeo
-invocation that we showed before.  The start argument tells it to
-start the process.  What actually happens is that zdctl starts zdrun,
-and zdrun now manages the ZEO server process.  The zdctl process exits
-once zdrun has started the ZEO server process; the zdrun process stays
-around, and when the ZEO server process crashes it will restart it.
-
-To check that the ZEO server is now running, use the zdctl status
-command::
-
-  $ zdctl.py -p "runzeo.py -a 9999 -f Data.fs" status
-
-This prints a one-line message telling you that the program is
-running.  To stop the ZEO server, use the zdctl stop command::
-
-  $ zdctl.py -p "runzeo.py -a 9999 -f Data.fs" stop
-
-To check that is no longer running, use the zdctl status command
-again.
-
-
-Daemon mode
------------
-
-If you are playing along on your computer, you cannot have missed that
-some log output has been spewing to your terminal window.  While this
-may give you a warm and fuzzy feeling that something is actually
-happening, after a whiile it can get quite annoying (especially if
-clients are actually connecting to the server).  This can be avoided
-by using the -d flag, which enables "daemon mode"::
-
-  $ zdctl.py -d -p "runzeo.py -a 9999 -f Data.fs" start
-
-Daemon mode does several subtle things; see for example section 13.3
-of "Advanced Programming in the UNIX Environment" by Richard Stevens
-for a good explanation of daemon mode.  For now, the most important
-effect is that the standard input, output and error streams are
-redirected to /dev/null, and that the process is "detached" from your
-controlling tty, which implies that it won't receive a SIGHUP signal
-when you log out.
-
-
-Using a configuration file
---------------------------
-
-I hope you are using a Unix shell with command line history, otherwise
-entering the examples above would have been quite a pain.  But a
-better way to control zdctl and zdrun's many options without having to
-type them over and over again is to use a configuration file.  Here's
-a small configuration file; place this in the file "zeoctl.conf" (the
-name is just a convention; you can call it "foo" if you prefer)::
-
-  # Sample zdctl/zdrun configuration
-  <runner>
-    program       runzeo.py -a 9999 -f Data.fs
-    daemon	  true
-    directory     /tmp/zeohome
-    socket-name   /tmp/zeohome/zdsock
-  </runner>
-
-The "program" and "daemon" lines correspond to the -p and -d command
-line options discussed above.  The "directory" line is new.  It
-specifies a directory into which zdrun (but not zdctl!) chdirs.  This
-directory should exist; zdctl won't create it for you.  The Data.fs
-filename passed to runzeo.py is interpreted relative to this
-directory.  Finally, the "socket-name" line names the Unix domain
-socket that is used for communication between zdctl and zdrun.  It
-defaults to zdsock in the current directory, a default you definitely
-want to override for production usage.
-
-To invoke zdctl with a configuration file, use its -C option to name
-the configuration file, for example::
-
-  $ zdctl.py -C zeoctl.conf start
-
-  $ zdctl.py -C zeoctl.conf status
-
-  $ zdctl.py -C zeoctl.conf stop
-
-
-Interactive mode
-----------------
-
-Using a configuration file makes it a little easier to repeatedly
-start, stop and request status of a particular server, but it still
-requires typing the configuration file name on each command.
-Fortunately, zdctl.py can be used as an interactive "shell" which lets
-you execute repeated commands for the same server.  Simply invoke
-zdctl.py without the final argument ("start", "status" or "stop" in
-the above examples)::
-
-  $ zdctl.py -C zeoctl.conf
-  program: runzeo.py -a 9999 -f Data.fs
-  daemon manager not running
-  zdctl> 
-
-The first two lines of output are status messages (and could be
-different in your case); the final line is the interactive command
-prompt.  At this prompt, you can type commands::
-
-  zdctl> help
-
-  Documented commands (type help <topic>):
-  ========================================
-  EOF             fg              foreground      help            kill
-  logreopen       logtail         quit            reload          restart
-  shell           show            start           status          stop
-  wait            
-
-  zdctl> help start
-  start -- Start the daemon process.
-	   If it is already running, do nothing.
-  zdctl> start
-  daemon process started, pid=31580
-  zdctl> status
-  program running; pid=31580
-  zdctl> stop
-  daemon process stopped
-  zdctl> quit
-  daemon manager not running
-  $ 
-
-In short, the commands you can type at the interactive prompt are the
-same commands (with optional arguments) that you can use as positional
-arguments on the zdctl.py command line.
-
-The interactive shell has some additional features:
-
-- Line editing and command line history using the standard GNU
-  readline module.
-
-- A blank line repeats the last command (especially useful for status).
-
-- Command and argument completion using the TAB key.
-
-One final note: some people don't like it that an invocation without
-arguments enters interactive mode.  If this describes you, there's an
-easy way to disable this feature: add a line saying
-
-  default-to-interactive false
-
-to the zeoctl.conf file.  You can still enter interactive mode by
-using the -i option.
-
-
-Using mkzeoinst.py
-------------------
-
-If you still think that all of the above is a lot of typing, you're
-right.  Fortunately, there's a simple utility that help you creating
-and configuring a ZEO server instance.  mkzeoinst.py requires one
-argument, the ZEO server's "home directory".  After that, you can
-optionally specify a service port number; the port defaults to 9999.
-
-mkzeoinst.py creates the server home directory (and its ancestor
-directories if necessary), and then creates the following directory
-substructure:
-
-  bin/ - directory for scripts (zeoctl)
-  etc/ - directory for configuration files (zeo.conf, zeoctl.conf)
-  log/ - directory for log files (zeo.log, zeoctl.log)
-  var/ - directory for data files (Data.fs and friends)
-
-If the server home directory or any of its subdirectories already
-exist, mkzeoinst.py will note this and assume you are rebuilding an
-existing instance.  (In fact, it prints a message for each directory
-it creates but is silent about existing directories.)
-
-It then creates the following files:
-
-  bin/zeoctl      - executable shell script to run zdctl.py
-  etc/zeo.conf    - configuration file for ZEO
-  etc/zeoctl.conf - configuration file for zdrun.py and zdctl.py
-
-If any of the files it wants to create already exists and is
-non-empty, it does not write the file.  (An empty file will be
-overwritten though.)  If the existing contents differ from what it
-would have written if the file didn't exist, it prints a warning
-message; otherwise the skipping is silent.
-
-Other errors (e.g. permission errors creating or reading files or
-directories) cause mkzeoinst.py to bail with an error message; it does
-not clean up the work already done.
-
-The created files contain absolute path references to all of the
-programs, files, directories used.  They also contain default values
-for most configuration settings that one might normally want to
-configure.  Most configured settings are the same as the defaults;
-however, daemon mode is on while the regular default is off.  Log
-files are configured to go into the log directory.  If configures
-separate log files for zdrun.py/zdctl.py (log/zeoctl.log) and for the
-ZEO server itself (log/zeo.log).  Once created, the files are yours;
-feel free to edit them to suit your taste.
-
-The bin/zeoctl script should be invoked with the positional arguments
-(e,g, "start", "stop" or "status") that you would pass to zdctl.py;
-the script hardcodes the configuration file so you don't have to pass
-that.  It can also be invoked without arguments to enter interactive
-mode.
-
-One final detail: if you want the ZEO server to be started
-automatically when the machine is rebooted, and you're lucky enough to
-be using a recent Red Hat (or similar) system, you can copy the
-bin/zeoctl script into the /etc/rc.d/init.d/ directory and use
-chkconfig(8) to create the correct symlinks to it; the bin/zeoctl
-script already has the appropriate magical comments for chkconfig.
-
-
-zdctl reference
----------------
-
-TBD
-
-
-zdrun reference
----------------
-
-TBD
diff --git a/branches/bug1734/doc/zodb.pdf b/branches/bug1734/doc/zodb.pdf
deleted file mode 100644
index f4a495bff6eb1e88f74d7d8fc78fdfc23995e87b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 121233
zcmbrm1zeQd7B&uov~=fC(!&%pl+xYZ4N7;HfTRMF0)hyFD5)Y1A}Jw_2nZ;h3P`8^
zU*w*9&(-h$eR8h9-*Fhu>^+Zb?Y;I|&$IT+t}ZRh2L&VW;2;Rd#nK)mDvHOiY~y^>
z(-s6n2p~Z}|KRb<+BtgKxP$m*9W6X<q;0HRtZneFU&r(Cbhoi^!t?$3)z~O`qKYDT
z^!aoif{eL_RNU-i<y?hAhY9M_d$_`OQ>txgouM78M%Vo3{RA`xb6j!y=TmMSS;lO4
znPQ0AIy{<%5M7WHDVV~HSyn0Ijj`|UVyt^~efMcsvPR@?Jv=?Z@B0vP6dalO!EY}h
z2y52Rh6tX`a~y;U5IMf-6TxmA%!MnP-gU&y?|j4TmXeLEP?F*e)CT-9CX2R>AK@cS
z8`RpPB{7p*+p}wrOilRjYZo16&RM(y`39H#&=VDP+^6%He!ldz&iKTA`<k()O*n{8
zI!K`iw}snCGnO2xcal4q+FpoQg0_7zfU(j;vqx|8g_6<t-p^ubmiPEb;UCu{bju7P
zD)xNcdw03Fi{R$s;(JzDiq+~v|JOx?)e9tjl)m6b*dubQPghE(zud5%%Bo-^T}VN3
zP!a9l3jHxqghR3r&P;&8PqWtN>o=yi&?r1|FkYPLj6VI{qo!5!+l`pS#TTk!D7lSO
z8+X@U81$psK&|pMS_59(x4z$}8pCunwp))HimIYchX-+^QbBEb5cTnL8Fm_+t7!Sx
zXm?h8xubBmVwEDv<yi!*@J)pE;RUZs)WD-X^a37Hm4QBb+BbBnIkpX^QWup<KZ_Bq
zM+`+Teq^Ppp`uKkmqEylEnS9DyZ}AgyX|oLhGuy<nIoyYCfGeJ#d>aM*edVnC2<~;
z{jSFqmX|GuUom5;^-&NlnNxZS5s&H2%NcK|MTf}==z=9Zp5;@fTFW=P+a)9@xeX0$
zV$3J?8z_V6*EwkHoc$fO!IYNsEsqzm=;sGI=Y=#|o{~dgzz8We^bHnK;pD5=FCEG%
zi59S5orqp5zYh#hqO`1NE4^RlOte(AYfzenbV4kwC@iX|&kx}(cRfyujed{gu%hu`
zXNP{vj(L#YLUf38ckEdLZySz6g{0xs1`$5@;+u``-GV?InH*^@Hlbadh*`$litcuE
zj8X5%_XEWzTcr#xMFte}bKo-Yrn9E!T}7g}BpLAOFt*?3<7MXc&R`YDvf$xnNwHbm
z5pcHWy(9OBv1}zkR7fl=-%HJ!TM;SX+bw=($NT(MP3PrAM#abt?A$JlF;?>Ipg2W$
zW^q|@;q|19>?9(V&!W5-Ux`bY{ln-=Aa%sMl>;{S7|POa3B5A<w&oWQn?7s|T8(GG
z+pw2+SEic~vQm9~pr31E45^h&*-E^#((Klyulzb#tyH<1=GzU4wg&wvs!aEE1<TEa
zguU9HOBwkx>_QqP3A&SWi1AFN`BusTFAf%$y)aq7r;K#Fs+5)uTJ=}Hp~&6{l@%WD
zIp=PkxHb@ldnAa!z9!;FcwnW@+q4R$jm|&24$8W_8s*W3!}RnKBi+co2<p>{W75%k
z3ypAL8I~4wy<~^gM7))^IZ1(aI33Ig+@)65EYSictarS!AOAfffJ7`iY}Kyuwdj{0
zanPx(doq=v?YgFclkZ=>QDcQU^huIQ0r?nZJ&%(D3(e(8W=Otk?v6+!G(y#5LGNfe
z5*X<yq9dq7D*^~M;wM%Mr9Y)~4DV@EHI%4}Gt|!_VqI1hGu|@}hH&z7cO@p#yCnv&
zaX2J@h>KmpvXtqQqrDGckBgw5n%6~%W%)dr`WSfYg_pjXQe3pSU|GC9$>kPn2u`_a
zK$MTTEM;oGvPamnB*rEF;b+hEA4m&`+C7L<N~U*3sMteyUk3;2yL|pJXg9rE%kAug
z)T(PdRjVz!w15nY?%>vTEQTr|oeFzx;4}%*f}Yy+cUYT~>cN}Ix-^W)QpmwLqLjGw
zRaAyJeM#eC^5?2c5@9L*^=(NZt;7W7H=@c(E_<*ka<#EmWa{$WQ8A#<@uGJ=adj2%
zd5zJZk?p^}!-jO?xxM0dtn5{@(7Rr2VJV)Veq`0VHvX=Ft$X{VrQ9wOr~h?K>DtFr
z*KtlV$hE^-<$LazhUZpn@{C({#*)3@=wrdx(k;qnVvZdt6BI+;{VdSAqT_LHp{va-
z#fMH&9(fpu2e8dXQb)((+&49OOJgkU@A2c7yh}@b^V<+@$+tOgM|v3?dGj5aujZ%Z
zYFj7V{&Gh+ogm^KCa=%B2Z0SUR%-s}WfH<ST+ukT5n;MG@v<aBvO)|7^=?hyh=(m7
z==RiGzFDyz`toi=582jm3FFmk6LnEBJ@j04oGTd}U~+q84jW%L&Maf00z=veGc%zT
zo`U;Jyicu?I<5f~jbBSj!`{>}+H0_shKpF6i@guiWW~GjvG}1yl!wDt+Gx2uZkJuK
z=>(D~hm<RMe2>{93gSczG@jAjPT8u7u>TC;8(Al)Zzy;;Lo`8CpncU7v(MhxMI$QK
z{uw&;<w5;-($n8UpC#j_DLf-~hTxJq=^6H0YIeTzAEF&rd>l`um0v@>;v3*P`OUDW
zrV7{R!1lAWyZ(gW450|_qLrs=zLyk2R&m9x&aFvfsv=z$wRD;j?#F%a%}5;C0`z?m
zJNq_ggi)ov_Jde2J#a>s$+cFK#OQh!P3GRW6h-uoJvNoZa}^&I4|*kM{rrj|`2<Y}
ztWRm@V`P@SxD=a<Y!?Twu%lt6THC|W(w5ntyjv_V`AmO(q2?&%c3gM6WUTSW79Wl~
zN2hPC_Dxw&Ht$JsqS(kaI**N=b5$^JD-PK!Wq_Y|eKH<U{9*!S!EEu&w_ACsQK}z3
zsAz<7%ScqTN~fr}qhrc@WbB?;M)g}=pE!xaCOurJ+`Zj^@>>M^xk4>rzJVA=FQ2iE
z2oGu}`5JRnrzYIVr?E(2nmd{83%iZ)#8(luX4Lof)1?E3>tdA0T7wO@16EP1nfyO4
z35A#mXIEh-bX_0tGplyCEESE<1*_jy)O6fuG0tY&yMtGFw;(Jwz|-GxXYggh;wR5c
z5`H3c1D>b1^I!ApRY$R&5bRr%+0$9n?%_mE$l7Zs8QpYXa8E9My@@7s8KYTb@KCUM
zXY83da0V1i1^5OumQFskBmS<HfB9QYc*`xo2DbZ*#*E3F*>8@C*P=#9E%V$K3!>h(
zS&_(!IN@#11qUtjs%T+zsx|wgM!=csOeN?~qASMpTCKNQ`sEp|$~7)&2spoMh<a-c
z3_&_^qk$g~&ppP#in<NQK+O%{oKnzYrC5sB$dWsxwdAL%>6SS2YLvL?O3p*dbf=iD
z!eD55Pmw0RL7#LcR@e0@*W6Dl3hRQe3^zmw^+0L#$yMn6a|xQwNqbhWI$$Z-s(r;D
z%^;B)y>e`}blG#&#@YH`85-~tklo?I{+8uw`?=bH_|+|L+Tig^xj1{;ID2}4&eA|l
zJbq0Z4;L?YD;p0G^!yhpHr93)k}kd=BM9&dBoqch3BgS8fZ^`II1mDL{xJz>XBS`=
zBM|iLW11l7&-p-5#Q8gc^B3g#%g-?&sNnfKp`S0X^Kr1BUkier;nf7e&S!z0&jLH2
z1$I6Q?0gp3`7H4Baq#nT@bhu-^KtO=aq#nT@bhu-e~ml;cEtI&BhF_*oX>(dp9OI~
z3*vkh#Q7{HKoSi4mu-LklmAMg&yD-j9C-YaF7DPq^8C|u5Wr}fHddY>BNPmT5EK9-
zfr+6AC>R2Tf)GM5;JqO55iKuE&!7HLwsUsC<Cg&PauB~3h+ob{+Xcig4dQeK{04!*
zxIjP_{x=L~STBG<5D69nz8`^tgOP$lfG>o=Fr*NG3j#*M;Qs@TGh_V&97re_1-J_a
zgMcAtxR8QiB=Q3Gfd6WbGjII^90(yW3I*5$i3AG@05}AYU=;Lhz5k(K5WnI$TQ2_q
z2V4Lw2tk6NC?pt$5(FU-2rx<j^*`V^TlN0{2Mi9_0Sc@SK`;u~76>>D!1ce_<80eq
z0LR&0fxy5JBpk2>3Jenv03o0du#f=a&ut;_t544M`~@)lYp=k8Jt747;tU5AA^?I5
z34(=SLjN4c&yaTk98ic5;0!?!93=>b0HG0z6ad3O@H2R*|G^wTL+(F;0qhY7QV;}#
zK*0hiH~>okh-LqaJ$^r={;zTLFTVhL6gY06z*|AM5D1Qh1F-%Vd;GId3P-@ePyrNx
z0|=o)fL{clz%sc&2t)p|On;tw7x2qp=72-MFbE7FaRB!~ksvr+2*7oLkp3Hve-=vN
zP(d&ZNDp98z#l+Jg~NbtBXj|KpnkQ-KMSRB2oOR6f4~IbKnMlw0R?sq;sW*%{1wLq
zL+M}J7AAxM!-4Dr1_Krb2o8aRVS*PhhtRJuE*MGw4MPy{2Lu7E3xTtB0fGrZ0azEX
z$FD$srd}6cr!W)*i~w>u7!(0UAc3PFB?N|}{``o8LVhvG&)n|<IR0%A0YNYV1qJ+q
z1Oq7_42c48{rM61JB|y+58#M{!3DtrfGwa96j(@5;EaF;Lh7H#u-`HKvo!3?1V|ue
zKmlJs1cA^Fhl2$We;(3*$MMf%2Ml_a4xgQHz?uNku(K^AAantHK!05u7mOYMUK<b~
zm;;75KWbsX0gQxSz#PB9_=ln6XZi&cHD{>@3OH(^LMQ;%|6-2|29JN8EobQ{3UwC3
zfHY9(?93Gaqb`sFL4PyIKTE}+NCa3As1eWB2@uNxy8y8r_2)4R`fK_0v;Mn4?D)$b
z0)QQmfIZ*{Fyt&v2Fe&g=${Am-*KE3pclXa#11GNC?SFK6$SyaLLp$O0_VNZ1;XgB
z70b`!_W}_9G6itl3kd;R6*%tUKqUzUj(Z`5z#nJF(BE)eFpU0%1K6Vw;FyP^U_g}s
zxCA)vg)XpVe=Qb%R{s|_hY*kp3j+1MAP~4<fHfe%0)bz^8o$B#XMq&hpHP6YfCA+s
z5KBP-%W#2=6#8o>_>=p%xGhjXz5@g;7#s-%aVQALkD;&&nBvzw@b6nh7ND<wG9<D9
z9du?QO+cvxD4}zXhhG*B0i6>(z>mOIIp=kNpMh%Sobdrj6n=Gg7b`6rfS3VDF=<&4
zzqXC9C+P145AyHE|CdGzhXOPFsYYrvku+CD67=Z#fYGxeibynH6-Lc^Qy2S(jvvw#
zjBTDXao%Btw%%3N6v++@svEp=Cq=%r`(?7Cr~uKWW{hdHpaI(u>RQ(sHLzqQ&J!ME
z2Jq0<TZ)&}t`Qp<)kLTy4W5uvtQtKZRJBWmVH*qK&g^=ye(%&`K!;gb{#sq%s6nFV
z_>Ef4#<hlGR1}Ltn%HEJ+xDS}S@jMpiM)&SZL=#l`tM~Z8w-4-xM8AB5WJkUPb|@_
zGqS6e)iiaq=7k#jW6KI&1;>(>c_G7cS(+^O$1bt#84NEvrRhB6Og_E-ylPgd+fWhP
z{j<g*mse@v%#nNBX<!NODvMJo%gh?o<)!hVd8OG($K5%z2Gv4s*XFiO9v$C@G55v{
z$l&41C1of#^fw<GHwmtO$2U9*uef8$oM+y4G+9IGmH6UP9+oM@@??#6U$5d@KQwR|
zm&R168W`FN4<~p&SQ{!9;8p#;aoBo#DE8K|J68~g0#lNc_u`x+9}7*KugZa-tVupw
ztA34};Ox#D=bj(nLmDmx6lEe25<f+KtDNlR*a%vnb8Rx-pu5kzPOnukm%QXgrpkw$
z&nPv8cAlFX!kWemeJ7E+Ebp76m%~4szn8hAD93`g5Iy@N?ejtPNu2vlnb~^D%d3is
z^v&BW76gXVPH6%PX`>@Cyq_3^yRWlTbhM<oOWU_hKvr287GzoC8%Q30t|2W05n19g
zJh{<+>6LPQbE>6K*iw57b%p91?nGJBrLL-?39#Bq2ED3DR>VyAZun9$Ni8ma%d@)`
z{(}Uz*X}L~4=!ufH<2=RiZ641pT}|v$^4izK584E_D+Xd_Jx?kn$)Y5#AH>+9`Y33
zxkZjioIG%qGwOyEUb4xq)c_T*_@mWTG}MUB@kiz81@57bhr?z+WZ~0sD=uPUt2^qF
z%Fd_7R8o%FH*`c^_SiF9v?B5aD|<*+S)@ehr5<!=#@L+7hdv@KyJLerR;&L(vS*;_
zx)+7aN59(7RDKUiwpli^Rdq8)$FWCsbO>{?;|FW!)X_2X-P=4Yjj6p79}j89(p*2j
z`-<9HmxGyePBCL=z#x@4O!~1r+buIi+?kuNB5O61kk^a%l~#n+$mA$sL*}m6@&#r@
zsXb#z9KC0B5cxawN#A+5Dq0Raj+)7kqi<Bj<-U}JQA?5zvE}BP#)i1Z+r1HH(}`+a
z;!R0qiJp(1)aF!rcDEsgr@8bQ$;+F}CS6BlL*Fiu7#C(S3iHrQCOC@@_160*&vB<*
zO;$=E2plUTv{SYXhos?j5?kk=FyFqZaB^whZt4NWy*+7Sv%+jvZ{6zz%%=w*Hd59a
zR$s?oIl79JAodSuRn!UqVbsaLbq<Ym%zoYDWt3AZWn#uf;y=2QAJx|#PMn|ARFuzI
zo+27Ms1PJJgfH8)l$2=L<}7H9Vy3||VJR?onU@HGo9~sRs~w~m6w2#`52jvUh<-?c
zFD1{%ypd5gakZ88$JZertPYRQ;A_zibqv>Dmm<fn-+rBPSaOrZ7i(BF%YiH8RE_xF
zvRZVE231Beh%}e!-m@Rx^PUC_rt~rL?Rte)ArG~pA`*mIG_fDHu<IBpGYuYPM?dzJ
zwpq<X<$}u-LOJrE%OSEEI`|o0_t@hG=ke%0LHE{GB4Flg*Hy~JKVFADzfnPst9Hc7
zJxs+(?3F0<{0Vy8O}^8P3UYsz`+7%D04pgG%p_m;sT~3pprY0XHfNSb$2AZ8IvTDK
z`K^2Vi3k#^^xu=Z#xClLk{Tj&pvVJn>Ad(nl+8%;J#M2%fpf48w{|H?a<FW!BXi@H
zD-|8}KEFD|?S-#29gaRO<w6{lbQajTvG&kDF!=hjg&unI2#B#SH)&aZ6Z4Z|@Q_HD
zE}jIBp6%@eH1$Rz__G*NPlmUQr-(yuY_|mOAi3uFY)$@lk<h}X!);BhKISP3Uv@lR
za;$^h`ksjonEE)f7A&3<Y{WV`i@`M4eFR>s)jqMA7U2gez3sc|Qh(KqKXr}1F4kP3
zvbK~?F>Uwin_G`Pt6yDl^v#q%#Ezb)#fJ`CynIYpmQVgtO=<+)fS2*D>Xk3+TrroG
zTLL2nEan@aX3LPfNU8!3dKpXbt#GvO*(s-kJVcjL(tJH#4DC2s3n7$2kChv|Iu4nj
z$NDhuy(q85Y7w~oX7H(}B*~k5g7+s;yFvTWc^QcJHa_nsQ2KsXSgq23pkIq!5`8eO
zCq-^NvU6P3l77G{oSEN627jFJ?TTYn4xi_B+o$4M=yCL`MMgooR=bnv+>%Pnizn|j
zm}ShkBYR&f4n<LDpA>EhPUYO=U(w;G+YS#VzGbH$qS<T~q3rxA9_zO1lWlH3Lb`y7
z!EDC|cs<yu*VZw%9xZdV&Z$o8>z`mh*_oyrncU~<w6qgPR~yLEnvwLMmc+>N6j$tk
z)Y@4pHpU!Qtv>Mgkat3ZW<OJxjbQm;t+RCs`_Q+S-~v<TW~>NGoTqp*Ibu5?=O33$
z1ZT=@m?Jg2?j<rGJAPacxnk(9Fc1~aM|O?=W6z>{^TzVw=@Anraq_P4N|KjlB9rmw
z)AiFey-l8#*Mn${M`fSj%O0BqSJL0xxf{jXXO%xJ{I<QXpZfwIWQ4Y)`A(#I(3%#U
zW~*ovg54V(qBoK+ZZ|0HU+%XYXRA6dQw}as?pwcXUl>s=dZ5E{&;~7TFT*mQo0ec9
z(VyU)uaKk<F&X`O>sG+=!9fjy`Q@Xh8Djxfo*Jl$3774thS%*9Zv;;3dz%@+m1`$#
z=H*IHdG2a`s@c`fdCt7Cjnn+NkD6%L&yYd}=k?w=b92%BO0>gmapWvrQ;TYtiw~#p
zWz&9YtYABD$x8uy9V;=RaGu2&i&%16M8TyhSDX)sd#6wOQxd<DiKgysF$M|T_?Quz
zAz(+ECU&i6WrQSY@k(?t@8J#%Q$lw_s{DyV6uM+=rqy7?i^ZHC&6E3?M(T3c<Q_+r
znX1%mk%}rmAio^^{mPSv9e9)M@ch86r2~APRgBjKMC}P9*0@j2wUrcU_@hA{@{(%m
zhu~uWe1odsYRnbXq<QkGZ)avKj}bgC+(<|}FaclQn@7+$*W2`(2H5u*p??eKIREGz
zVcr$9fvkWcBGE(SmpCIF2~Gi86$oa(DV)$(pg&!6XE$mQ|I*aQFx5k>=TaQK55UbX
z%c(RLo#Ug#!vhsj99oYE>F;Q4SxSwo`Ce;n0Npgw@P5uMpIn5dah2%3%{BFOLae=3
z-FwwZT=$t(S~H~rAfv>&m;`3SyIx<{8XnJv;$lSe$E_TE-}VjJ_d8OzUh=?mx$KAJ
z<?R)_+>ld><<)-IoNqC@xyGpAQizov8fJ-ow<;%wmFQi^@}T5O@0magH^_H7#R=Q|
z>zmiHEvW<5iV1EISKfbD&XSW6j;~A@x^%>{Be_21{R-TVZNA`_?K(GTF45n=3X}6m
zDmJXn?De;Vqz*`k(kj18UlPrK3%&%y;vu=@NBuRc<I{s&QNMl<M(m+@mVQ6lxFk&n
zoHy#m18wGn>Q8WGf0*Bh=R(b4X>Mm+t%o#BPhV!VG-iFJUz3fZrTIvp-oP2>*t3{(
zk8jg18X<yr;Ph?&jkD@I6iqCtQ*N9(SDL_`W-e7~eA&-YZ@#DrJ6jYb(~2?fzGv3v
zzuvdI;Ye$d*&&`nhYX-#h-evcM|jU)^{307TML2GU|Bs1oMMZcQn2VN=<`2Z`kL~2
zjiEczL|3W&c2+}WUn{59#ID{%L>cd^$ZQ`45zkl)rdx}4pA4uBuTq#+p;qQz@bnAR
zePPhoyuzHa$d1D~&njkOLld!CK+oZ7xk-^m#*~)X%d8zVjp-zbk<F7X#h2l8gMWrG
z29$uzWOg#0u6|a`_Z8k6%-j~|+Q!VasC5v^MT`FF`Q9gz&y%CZxV+5I@Yx??r<mT)
zxW!u@u6U!Foa{&;*%RAdc;h((t`-wj6uFm~hHuE5$sn6i|HuhGZqbZXOfvRc6a7fp
zMiMoelKjH`<CqWYnRTMNcBGLI_U+@sMQ?iNInm3_N(qdw^>5y1a+I!_$?`}lBpF+f
zxmr*L4pSp)+4i7ekP8H}&ee#eHSe_5_Pivet0a0Z;IAmJkoKZ?!BXGJcxE@-()oFm
z^pDG-!P0WSsrUd*`YS!~i;VxXRQWF*A5hMID*G1|@{#94H^6S3RaJn_^Yi7rnnIly
zPXK9p_Dg_-`1$hli$JJzxg2#~jiJtG0XU<dA32`|U@Okv37*d)cs`5Z`7DCxvjDa8
z&ySoB_^)E@f>a)W4s|A+olzqI(~AUJ5>QY;GP{6^3H`MM{b`ho7s5y(paMn!0v-%t
z2LNsY1+W5w7oog=!|~5(Jb*d`DsiA92NX>}HUj~26`*87|D3D&4aYyD@sI#V1Az+s
zr0)RQ3|M4<vWrCia~wa{<puom7tM=A0&NF?6alE)D4<OQ3DC|!SH>UXfc+{({M;D-
z1cv~?jR2Y%R7eOc0JMc51yE;X)}I6UmDl{aHU9|^fU5x<0|OQ)LI4Ir0(t}#ash+<
z+KvIxKfea9|EshM;0AOEfZIJI5&=FF;Cg|!6hQv}bB-7G8;0M5=l=@>pg8~{-Pyu`
z0}?X=gao8+KsWnyfBZ^n{)~zj@W;PMO#xtIzy$z~7tm)Q0EH$1Y+U#S-0>TX3&zmD
zU;te?2tYc7o=J#6{15=-2)Mun%<(I$`SV!#2j&12e*_Rmp@4LW1da~@pb_HF#SqwU
zDE?U#6#xYGvrYO_8v;N8`ZCa_^XF^hHyHmchzbBfO91F*12lg?GXx0$9c2i?KPS9l
zzsB;Pr}97W2_OlbiMep#WCc(Fn^pjR0b~4z;_oc)AN17ywL$?A1PSoJFhGbw0ED*y
z6a^UNf3e0t>q$WXy+LOphyZZF1MPJJ5TM-;_P?0pg7NcT=0E_iNT6lvEb&1A@f;xp
zwEH3d7js-NhW>>E1+=dLUIAKU5P%wj08|ka^a7#$*A@&wk-gAy0+97U#}v@>0JM$?
zK+cv0&?5sR#&D<*uuRUjEQnu6Q{ntCPFqh;S7CmBA0HpEzl*C4*v0)Ozn-0god>_6
znzSVUO)ooZ8-6=yYa3s%t*4VC*WcPOfuQssVA3F?p9wplN$49Go~6ksK!-Wo9?o8l
zj{lX@{@uO*(KP-`viO^l@o)8lUlzz_1^*H|{w;z0k*ERW!~cQ$*TpXs(B=AvqDH%c
zfyaa(Y4HA;s6l&oF^Ek~Rd!s_+r4Bi?Ta<HjMj^GeKvB^*tyf)V}Uvajr;D7Hbr|e
z;(3G1_(36R{AzvK*egBzQ=;)V(?(w*(w{Ug`CACr8=tNmI77@1vX_KhKAn88$m9=0
zcr<!?h<Q&Qvc$41Njl+atrU!HvRIsY9d3Q`Tkds7sYyOw?WL5MkJNCE$wqxF(ZN6A
z^ju$TTq55Wg+6#gSL))YP5oWBwVJGFcan)pA`bfpR`Rj$<y)UVWmY;<PU)tOVE5xI
zVyzXc6(R7abc>+&?M*rscZx5~i{u_P-@Qi4WcJx+0B$l`#K*(4D<6%?VSw!=seAPU
z*9$WLmmZ}?v}}E^J_lQMqWLOp^<eZL!iX#_GeeF(K8yTjxLf<&u-*05)j{0A=K+j?
zt7~6hjIDmZ(L1d=T7I+CE$ox`)AZT&GXBd?mZ@_$im*6QqCD1OcCq<vV^um6x@kpg
z=??Esb9-)6XXh2`jD(mcET|S5YE5H`Fjo$6di0#?dJWi<=lA(_6#J}___g0?9aiOn
zFwAFA(*nYu?3yI<Wj<v-^Hm~+MF&RR8{rboJ}uF^f`JA>lr5{tWY6VeKjGkxYp?nn
zRp>m;WF3Ovd%1*&SedXK7B?Xn7(HQtV)^uro1k+E=VcZdNja>`i+Cl6*ln0p@jmdU
zQt8s*cSY~gpwX<*<!}`Z=*@|{zf`%H8XaEf)oOK+8vDhI<JG~;J=M$aFiML;Z;w*?
zp>4X2z0jB<bEkFRW;JBRoW7brRZzTqWN;hZd^eKcS86g>J<(cqcvKG=sZ4B$<g4{f
zh>Yj%Y>$e-%)ODL7T+14kq|Ig#xHGDsVkenzgzG;^BPupp)dJqQn&#4DPFQJl{n=U
z3~Q7Va$l|uj9%@AUF9Ovdq1dW<3s8KQ)}}zVqOMm)!n9hvGxv(K{#PA9zMW-(QK@L
zw~b_CwID2DcGf^o$g@H$#Sd}M?Ueaym~h7&M8&C-UA~!Vxg1mEy1sb`i$E~n>Q`9_
z83jh+w6#|Dg3J9{?oD3B4vZrp@BUUD=G$sVcYMaIb0>@(MC$vo9&58;<Te;K`gi0k
zjFPSgO1)&=I@p3OMct^e)3HE$J|MdFtcj)OMNC+%l(^fyconAOn9d0H#l9sa`lF0E
zE(FR+<I`3`%T@W0>f+l~)i(%Rts!uU(hcGW6`O~T%;y7kO79jh@w>%zwg`iQ<38fS
z9>qRmFLBaQ>ceIxw5igAFA>D3Jzd57V7&W;`5|faz4`}ojO@oak33A#_C;R=$2<?$
zUh7IXo!Us0EkWpvyWr(c_|(+o$l`4h=h2S8%?+%6&gCf0ftDwoQcO%;L?^`UZh8W)
zp(mWRHs5{h68<2LW9sF+<A#6;JV+ely-<<4aEZNbBL5;83$;Ezg6M;<p%+;g7qyeK
zqc%o#NgHdXyC#y<B2HF#LBA_wJZXj6>Pb~yF@YTK<A}RfALdfj)Ucs7kLwuQ1@Avj
z<+2fLX-=VkS_txH)6-ehXY_{`g|pC?$M8)<^p)11zLQJ{k0pY$NRGdVv=s~&Fjp#R
zGp{Uqn-Fmr%!p1=7QBpAN^PEsqpXpgK=NgfoBYWPX@3mU+K7Va6{!{tuOMkD&JtpD
zF7D*P*Fkji$BM1*VGoYK)O~8hy8S4?xM<KaZ!CaBppb2D#;G+eT7y7NIAZ`hWu=au
z&ENHq@TCqYuCccyi;t?)Cv0kOcc<w=*19_Wl;J*ImzrLZQ+SPGqIdJcajjazUW1_?
zO8Y_PHRJ4@d(^Jo_k(U~Qh1F-UP?fFqs!wZY|S%%eX8M2_4^4vAvq<Xwe;3T?y8Bw
z=0qOh#3yhkwPtNx_+-fZhuGwl6G-a%O&ek^P-Q4<`P9US)k<iKK_nw*E+c2>02;e(
zI1JS%`EKOZ-G##iQae)0I?-E4Q3OVGb+*zA5vf>w26tW`%)5bSGGQ-sZj}&=Vio!b
zn%$w~B3|9bn6|@Q-s`X}fO*0R2faBcZS1bI7_M8{P&!QLdJTSp|EONgmNqBs(6n;n
zyi$6N=Go;<F|p(9$oa6_R&qa7>ORh>=U-1*PcO=+Il^kMrs6k!Xx4CZ{d;ZTo8dOD
zR^eGQLe|UltXn6&?OW|)rcBH&R2fZF(l-wS{ayy1IvzSNNgdZb6?=ZX6Hu2HqI#3%
z*^eKdYL0>Vq%$|&-jHXSo|fmTau~ko*w7gev#N2?f3%xc6I>y;G#hH+GlHYV#;sL^
z{XLv~X|dx<;}7{))GXT6wHAv;@2RvA9ElB&)L{~*tuBNi?n&jiEvggURp__^Nu@~^
zwOqo%KbpeHt7xs}d_AWxJ14cP=Tg+V5Y|dT`dh^zszeHG&huB^S^2rkUnQJE^vl8$
zIOhG=sr3@+nrUN?y`NbHiznA*3%Z&r3%nP7pk>sQ!<Q6iZ>_nS`fSB;6+zBhyLS2T
zJ5SPMQ)$Bw5GFWv<YUwBR1I;8n}t~PMhwJvu7%5<a4!m7Q;0Q+(k)L;!Wuwl+Li6d
zE5%BBcTY@dy&zOhPLrnZRSemtMV#n%c$REypdL2HLrLrh`mbjTRcgd*%WY`thFE1f
zO>91B;*)!yTKoCUVsvF5Mhh0M-5l>B>cab0QlfVK(aB*N%IKl*t2B;mx)y{FbwJsp
zrB_F6-g$_$@x}S6t~aupF>6n_WZ#Hia*P5ODHC{~h(6K#P-t}+yjY-t*y@44fMJ)*
zfGDwVr8$m2@1|S;w+WIv`K!#mzp{^+2Qzpc*<B22m7?-;n^I?YFCKRxNHaUZy5oAS
zAwpiw8nrb_cenL+z}iydT2!NP=V+Zqln)Xoamm}lyySE<^(L$EyY#9gD)HSolxV<@
z>A|u|QzGuTlh?D`?FaWs^FxPiNnyJWb!_PPj_G@otWq;un;Lgq1)tDFnK8a|!a^Us
zYN5ARnxebHep}HZn^Nwv;dCtttl7ts50b>ZuA_1jkC0ose~&~kpt#z08T&goNsMz*
z=ga%-ek<)PZ@zuua}QEcAx<63DrNb+yLGbZHqgjekex*mSv86$)jzKNc8r5P_HZzc
zlEUX@odEfQmE9Yhr}@Z4^|A6)6|-Daf{14CcG#f5mEEE0=iNOCwdHSg*-x1BS8jy_
zJvC4^!Z9oyj8QjQ7LmlK6x*5zVRRo-mvl>)MJnvdYH1mNd_{JQf3>UT$p7hkiJeZE
zvA_Oxap}kJ?%<*6dd+pROsayb%)zqnpKe*%elgvPA-U^dRduhwZ+(4zF=nYoCTQ(`
z`?xnDpT5hrwM^QWkfcz<hjv3OfX<!#q2QC~JhI2Kt&OX?95Un4<kz(H-m8_@C(pY3
zsVGV0i8YPTBd$Z8JqyBD0%C(tavL1uZ9NxCthThjlXtOqEEGr;P&zQ3VAFkZocIA!
z`ZD5Yn~RxB`PeBkeWv|0DlrG_T@!`;$l=o_FS1rs_gW}<Nt^nrxAWktOFcaNl)Q2_
z!^z1pPVJU!-7Dv~x+l;dLiY8X($}|73hATX(j_n|LMlze-!bKerpuf6%HahSrs_DY
zH;|cnYv`CxFuh2{dL`)on(fr>!`-AWzI_;R?eU#CTZtCDa-nO-%|8yRQ-b$Yg42hX
zglgmk<u}b_F&cJ$<J5)D(&zuKcYop3e`d=6#i<MZWVP}5FUqM4ofF?s$T{=<_Y&ae
z|N3tj|5tYEtR)lz&}u>eV+a8Xg0oIcpc4`x4k71vWBz|C8Ve@}u$2YBtDmQ>i!;BM
zvy+9LvnTMMjWs`TS<Ujdisl^aMSualnQ(yN2QH~VfpQ1|v^Jw4K-U+*WS;lo{r><v
zNA$DU@^Nu@@UV4p_237HP!t~&!tV;SXxRWa5uI;(@O#+0o?Sik{|zcY7XHVRK=Jj{
zv;Xhn>!&mS`Un1}`1*G>_1|4&|E|6MsK`2V&3`cc)jD=Y*Zg6XRZ2VVJSV#~WyTC5
zl0v8KUF_wh9REh2(e*O9#}_IdAB7yhPra?~8#^0#`eB5e>s=RGJ-_9yiHVQS+R@_d
zs@HTW40ZSRNbeN)H@%oGV}5yERC~Xw*kbL<as5j}my=G9Zhy0kkBHlh-BqED5RZ={
zjC9_Sp{ApkQX>X`m~V=y=zMyfy40y|f?P~$CaMp@Hu`?($RMN2A0WDOyO8FS-AaQA
zPe*zros|n-<od^W^oK-wl*&ZDx)W3NZ+sICfB0|rb{jcq&%O<oM+FR*opuUQpPW|c
zCR){bEenNcPGihs1il{Q3iwXBz@U24zI_r{V(n^sXyj0n-g4JM33cM+atnU=Vntb|
zLbAbkE0{=*WRhm8wbN<0>FehX*c)vB%M#1B>#K>gzF)5pmo(9l8FDx_HO4gTm38+%
zj$Xf0@m4``eQr*n)lhx!VWXwxW|mR11E`Fb+86H|n#jRWH<zJ-NFM0R*tcunFo#nY
ztrM@>&EnENXHV7$w{4i=AMWeM=;PeUncBS?E48FjI|aR*ijj}GK3dn+8@RAYK(ZJM
zLT8hWwq6jf9-4TZGIJFcUUPTZhtE`NUGd?oZJG25tXd&Dp3Ktd!+W2hP5Gf(^Wm+1
z-H0M$$L|oToAoAEB@INS^~AP~pL<m?o}VZSiD-Qdz<MY0{dxr}Mm}X-Vuok+mrkni
z`j-|tG;2or@Rn{O>l-@If$qKsVyF7#1rmgpLp`o_e~-d_+AobsQNpDfe<{|#{?SU$
z)@1XSen<d}#?Uuc1fqDGL~2l6D*Di|R5K?a14C?`E~f=<p(yl)(kM_;XmrH3UAiQD
z!{01)x!L9ulEZsbd$z4-XMnw><vob3dT&Is91Ce~xkgK_Yobz~vhk5re~;b7<j|S=
za&g?F8jWsmBXkayevj+k3utr&8eyaIdGA;DpY8kMW}e0%nyx4HAnqu@m}>6Da+hIz
z+#y(~LwT~)#7-~6P9WZ=P1uCfT}-Ay`>XA}{$#ByoQjas*^%CeFbkI_x)KG({N1`x
z2oKJzj;Yy6@VD*fpNeWWbw3;XIww3S#%RheE_c*1lXgzG9x4J~H$D+`s(cyXbR*Lw
zSt`?l%K9bU<rPoRtUhJz=e7Nf#+%PB+pwGNPJTaF968pLHclw-iNmNQ<|lnRT+Eeo
zE8(yhhZ3{!=uz2?{f|A!08asm1Lp_yMAAL9Td#d1Zfz~mbXQS0e)0Xjkw%aM%Bq)}
z_H(BFerw%g|L9#n$i6#<8Q=Q17y&155_Lr4YlE8dZ?Ai<=-`mwo#dUbbQ#PH=fdI_
z2~Wy^hdIYx#SCpIb<k`FeKVjaOv@rhGf!#mg^I2pzSBj|En=`1b2oLBfAgr2g0Qm?
zJj*1=)YWYzMuUSnP&j4pUipG?x_h=ppw)n?pk5Vaq!oQ?PS%Bdih^&12|K@5vg7Ez
z7LBQmq;|MVs`8v`?3H6@K1e5YXx1zc%GLjkayqiV9W`*R9fd|!x<ZG1F&UbzP=M}|
z07={kM7x8s9(Oe#^ZHsdH6CdZVuHqp_!x%am)72Zo1P~@o=#v>^WNHfXj!>rK@|={
zV9GOS@VttrtK)z^?&7-@$`w&!*}o!W#!1dibec(U<;yI5@9Ji5jjHPLjKx?b%((Jv
zHgl2`ePk!wo2BcUw_4+aV9w4~5!zv!1Q?^=W2bY{q${LkvfqmMKNv_eY8{wVh@3BC
zzSN4pB`SU;i0v3;n->*=*4tiuwJ!jJ(0)3VKB1Mo0B0v~Ca*0rl&mw23q#9)?A1dg
zy?6Q46+Z^!JCbd44@AaD_T4{_KMALowPb>Qfs%v==z2Y{)O^_2G>EhzspQf)_DBfb
z`XFnrl}<lbIxZCMyvRQ5uO+-F^s-m>F-`znrh}wP2}dL4(&tB?UWX5t*-*>ONlc>?
zCo9;%)x&$gVS0Ws`m$<$Q0A~U*A;!zq(b^0m$@MruS!E$!#^X>Gc2cFe;=Zr(eA{k
zl?uCqwnZzL_>~W_&RMgjkP-%xRB`gnw>22f)$-`)>z*6i9EvXG@=PAG=os|IhI+(N
z?!PW{;Th+pY>z?*QL)+DdgcjcOi&t!-zUDACnQ1fWK(ub?W=E{nwWmEoBQXt^jgiN
znlF?xGiL>`URIQ%O_Y5N;1Off<R~`TvkNXSzd1P*DzRL|m2!K)$d5Y374a~iH-GbV
zWu*Q&qlnDCc^D473!e>onYsQ34*erR9LE^7*7vDmJ~z6tp7!kbnco4wgL+6|bz)gY
zE%_BPKyJ3Q3wmgwu^VHej~Z<skXkGkWF#4|E%9pCHnXv!e<-gYxem7$AG*^`-_J3P
zl3$fK!=R|^n!hAS%NC;*;}N2tAtNKP7hlq5>X1cpgMHZ#YoX%Kto_Ls9|y?Wpp0Zz
zZD=9*Q>-lW6lVvm48c$yhP1WF`jR<Dh*4{?b@k&f8#Iy%h1Q!?{9AoPh-fQ4cFB@a
z8=Gu4hpe#XZtJTgglnu}={Hy+B<B5z2YcJUfAlmqeI_U+qak_AYH)6zZ;Ns6eh8Xc
zY(uc$seefk$Dk4hO4TaE5xSkS``~UYx6#-LM}I%5o{aaqH2zOs!awkj@`=B6%vfO>
zy43|KRq}-xuEnIfrX`t02y>NFaud&3OURm#pVFjmPO?>g3DC$j@g;3$b0gzVt+fu2
z>{FMD-q3epx{Hl9({<C<n0Vr$!P8SteJnTMo=^`(`Z+;ZMGMi5&;_RB1kMLfdmXKo
zPmp`%aD9`qOP(_Q*GT&#$wM-NUrY%-&<JH0SGj~TxM~we<2XoEYSlKzK5x4@oY_Zt
z#OA0VRTa;XXN)ENoa4crIY)O^Plhk*wo9^DnZeVv<W-ym&D;GC#KcdYg|@^JxeC&s
z=+j<95e(N>nP#`)L}R8E5wB8FMMmc&YEsuAr=2UvWm}mOgelc!>T(^N`xuMK=l#2G
zc4-NAFesD-E`5dY@7PO(V&;As(<`!?O3u`mZ?txuv90aFh=ujLVo);aJFz{$@Msob
z8koMZ)$)F^4r`Cx>H3Ec$sS$FlwVX>;cs8dGaM|)^A+kd&AIX-LUZm{+gvWggui+E
zFo3)78u`-qll#Vm{$9-lIbU(8e(+*%9A~`R=r*)E#iH}ad=sHZl&`yR)7vV<*XmMf
zUnfo?mwDqrc4_!aL7n&LO^Nhi7kl5!9aNr3TuL-s{Mx5WLJI?rn!VgpD;{<Es2EjF
zL|J9MCTPB1HoQV(QDL{C?L~&TVF127*pf{+e1*$fHk*&HZ#&eAYuR1n$GhfHG~IWr
zkJz-*6H+J^H}<HnW85xzhdbDZ2TF~NvZ7I6@)JRl2WH_5QVj>;h6SanfIIgcgXoj{
znAGXLP@8Dmj4P}kF*mE&S+CJOw0va8t<<d{RAUnmQ?c1DsHxU3Tr<OoqmjPZ!D!`!
zj1|m0L`!E9Kan3fg(E~ewwG-8Z{%_zBg+>jIj^4HyJ5%Yxf}47wu!-<olJp!S|p&-
zw&v*o5$xtNBOCi2cE3AZedF;Dzw{P8c$>x5-s#drHzy)?*YN|BQ4mJws!=yC7GJ8z
zE+pMwjQ8F|*zQP+iEaElS|47pVgA>~nT85jyKbH8N@yXDGw!&Oh03)zpA6*q(R-~)
zZ-PII@X_{o7Jt~5dkt3$@xm=jTp9~Yi6ysb<_s}-c}I(8DmTEZzhP`@tarSk_hF~T
z(!IUz#VuEHi=ze}m#-u57U!MJU39i_z21_4+tSEDM4J#j<}MMxF`d7C)PiBYP^%%Y
z1yEn=8TA<|1u)JUES%1{^v4y+S5_FTsQ1iFO&AEHyuJvPm2FHJn|&QMSLEWa6-HZZ
zq8R#Ad~`2veLymg41+tQ!Svx{hRf)FkAyzL5K?1({^6TlMlZVqQRxS*A0+r3Bg-$#
zn++yEYUn91KfIm>3X0zhTIgXBG{Ym^yj<qY{PHQzrLb?+K9jxu9G=o|tfH?ZZ|uxb
zDi429FTr)f5f|om3!{-+lT8h#2i*=?qLLAMLRm4`*wf{EoG=+`yccGB&tllhDbV~~
zRQ%)8Z(AKpQgoQgS*nsaYizJ1veY6zXOuZpNb#fZ?F<3skDMo*`eh@y?^>VMQe&{O
zc+P=@3t6R+?>jYm2JJZ=1gK};;3Be7GN^I0nvc~rjOCkm@aSe~h)hJX3~yKQB+@O^
zroDI~Ci6+zjfSRI=~%Wqw`0C_g}sV5ez7E5tS4gpo9Dd$C=tF=sH{Zz`|rsT-{m*Q
zc9j<lko*&>7`H^?Z(LjIeQ6%8v!5Hd72oj&O8=wv1@;@K=a2wnN(D)54kfmR6D(d0
zzRk=ids_~z0(!cp$r20*&erD`rWe^0j{^9El^DZVV%joJQ5c5#(g#y3i=V<^RVY5H
zvX_?S4r$I<9fh1+6XlOE#lrIKKs_|f$BTHvx~jApZ<$Z;SMo^{Dz+=UmB`RYma%jD
zyogyG+8WoqK7b1|!AGy1x^bWB(TnudDaBH+=xc3PbY2E>buAgg+S6<kVCMLFJsxGd
zzCA3trz94Tu)QC=qb3gNla=?mQ1w++v)^`YBXbSEY+8;QK#v)D>iH+Im=vsc95D8E
z4s6@L?z7%xt8-H;6>Fr7ETiJM^`nT&l5cu4(=@E#Z}{qmxZc;2%k_$f(&Guw82r;a
z4U?a?5x#bcEaQ8dMXpYK%HKVhh5U_vfC5+b|5G9UA0N$gUPS&E0Rc4Q0Hc6kU6g?U
zE~1?c{IBax=fC@_0{#QB9S(E{LC&uB1J{*NXYD9w?J;N9g@KkG;3*;h7d`dA*#{qY
zJ5Qjb=Xdhuvv9YvwetiXkK^TT!~b_hRp5*&`3oCR)Suhy|6SFeTj;NW|0(PLek1JP
zwLEZV?62!%e^}Q4oizHZtOuUT^QUEf9L;#r1ad2=QPQQLi1)#M;`{4kj4ogExow_b
zt1O#h)4@+nCxMBQC+t|C>`qY0+p~2Xvs{Xk&8r{e*je0L9r>IWNOm<i%Q|zSUWL(V
zD`$rc?bC-hzJuOWSQ68xOQ*Y;U%}n@8@Io1R9j3NoM<>X)E$3ysTPA~4Db(=cVT9l
zXdl^VLny?^g;7(GqJm}>>A2;gJ;AZ2NM7&y(7Y7nJK!;P8KH0Fbgn%MFxhsbO{XR3
zxV_l?;>v3cugdZ;?M#a-*<qZBmm8VQj}A?l=%a#3(>%B2PCKkioV`$lpRHjQleb)?
z2IRz?eB2WJ?iJYizPFnvlVi$$EA{!C;FcBR4&|F#A4rb&M$T8H%C%edN?Sw6)^&<-
zTHMInMbB-*yPGy16J*7}Tm|o*kS>zgy3FSh&?7coAzxqmS$eiV(v8cjYMFdU{8iTG
zt1`7OmQBvddZWbm!ejgRj_u4Bb%XW;Yj<yN?<!$6QS=@>bWE>vt|}BX-C?*N^JwXE
z*4-rrnM@k$k(`c|Wu`~IH^$of_<3VJ#JEVA#jA-%8RUXfWI#&mH#OYd)A$G4zOm$q
zZV6jwIuc&JBj)mrtmR3C+;GkZV!=IvDHn;AWIJno2h3@A3tv8s?0^81!Wi;4dEJ2&
zim^1Ol$M^I;U*pznzCw()l;EWXUH=gW4mL2dj$0!xKC>%ZY3qi(pnv_RvN>RkOhPO
zMPo<ou6;oMar^teH|qnkZkv9YIo@lGH2(Bi`{>PYHrWW#`ln1&&<8Q?ituz~6c1H-
zYZhPC7w9#aP>1zM<H;ss;$d;u>s^tb>@*y?eT5Fji^XZ|S&V<hM@TZt`e{Uv7$atl
zaMYKy(2Y;^*Q#7JVvBvM^_X8UIZDPBDbO59khm1>ac?TUkQ7s~-L-i85&ffc#J=of
z5A-_M=5f9}B;lOz$;=4w6u1s2O5g8#4K?58y=V6KzEgy+`WL;Op>U^{>hyc7LrxK#
z{)D5XYv>MFl-Jwwqx;#(mP8Y&1~WlJIg@NW2JaLYeSbXcwYkrG<y7!pyMS0@K+x&7
z$gL9(QB)LSbM6Is8<q0GC@7jX*QsvTLU8$B!zF5S5i@m>x)}iiSB@+V;L;c8Hf_r-
zfxTs!XdjMTB3`lmy9m*kkV-6k58Q$@TO>C63(*+AJjS;+kxy$uwm0~{-Ru{=r<gOs
zvd2C04J|_Ml1->7A-yE#(~yw6_Mg>QiN@_ol%W1SmaO?-v73voIxtUtleXM%f)7K>
zzZh!GcW#=`4}42H<hV>7_$_GLY!m$84et-J#UC_+mJ?#|6VQ%FC!a$Bn7YyY;Wtmq
zgMRtm!UY3~3jAC~7JD}W3usDR=ZM~qoM*>1+;?nu{OGW?S(|2CeH;>u?i9JL;<OpF
zgD_{!p4^+mT-<FV?mnf}l1b>R#LY$8=O}fYwpKic#9xX=UloPQDa<~#`$7tz$9N{%
z6n#Rpy(tNcZ@TTC7Q3Q+J$s0Q9@Fooi{923HnSxXil+|!S|uVSa(oKf^0~M$t#YN#
z;XF}OH;&EIjp=C&e#LH;CI_khz2Q@d7s?$t<R5V#-*7Koy<v|Lj@QZ9@L~0%GM~*M
z>5e{w^;419r|M6xK|=DVXEBcNVe_Jd-%8vQd0qq3`fL<&!iJlgfkA|xsj33wdX9!A
zQ^LY*Zd|yUb9vpI$?V6M?Pw=X=7$*gF1|2BFY+yQD{AY?$B?<cFNZv<fnD{Hrru1D
z?l@7uc}0E;hO5sI0-A9_xwICQ?OcUldp4Vr%bON134Ubi(d(6&{(L}|O=p^YM54Z=
zqAiW#RrZ8aKzsO^18;^$7k<rpcmYhr%Y9Zb+pePfdd?MLdV$TO=NjZvj5Lp`XzuSN
z=%6K`-O`wUy+Pd}`4ru5hJ6AY?mayp)3vGF@*pm?Q|S=vTizyqPe0FuZv|GjuRW6G
zo+cXM=KW12-MfBK1XRqHHDZ=eLq=#&REbNC6qNCmu>rP6vho(<GuaY~%SIztiX`is
zD6hV2nBeo>xb(0P_onvaD=j0Y_^zKP1{vO-YG8hnC&&tDX2e4xs!4<1CM0Vt(iTKc
zJ}76ob+eL1)=QmX#IL4&)HtJs_~d0+S)wwCTk1I$;USjmC@C0iqzn&Fq>sFlO0b@G
zO%Rf&aYe236V2th*vOR9B?d`LjdAwGF2jxL0SeEj5t4O^7|aQw0dF<rku@ycSkau=
zcyb}RzSrr!+ShxhHT0tTwA(~@s}0?*#5v@TCJ@XDY79pY<icoY!I8<Zedo7SV}pdT
zTzvRd?^7ZZ<O9Ojbjc+W7jDxoSaV#yzg~uMs{r@TD!LqLr)4GWNOq<Q+J_+QE5;IT
z*p$q?QO?G@A5`UFu(nAtsPFQIY?`cZgdx)bV-wp49`Tl<nI;uuYNebjq=or?8MWd^
zgr!UI6VRq8SHkYap*ZWu_-~Uo!={r|?+nGXOqi>dhrkJjNho-9W1g&8`(ZLnOudJE
z7!1?Qxp7H4i9Is?&|g~LfxDGismUcEmO4w6$2;HV8&>_h#@rMTNEbKD=bAw!D$Whv
zHBCIac7^`fQiRIdT55Laz{lnrc*Dfx%7+<73TaqTN%N-_x7*wfJUA3@2?xFuo!m;&
z%e8wmy8DXi?x=oG<Llb{9QgH`_bOVyp7cm)Xh)`g6>6#o>ukT2jBK%dYIrSh!fx1n
zce2vH{h|C0b?DdjsRt|hCs&8{uY``6IM|cLDvjS{`!2a|kSQj=L3V)a>8qZ8=+^u^
z{JGps{(AB<#ym_XrH5iqSGbQ_tYaDrwFB*B8Cw%{M;^%STFZ<Va@<{;?sRQjCK!2z
zaW5j?A!`2;gGi-j!Z2*(VRJRbc$2;I!!P4pVv<By9adHlvYbhU)s6+bo#x4KpN~Ed
z<aW&Ot4|HU!tsX)%BB}pK_?l-s5Tc0J;GL8y#06vJ#k6;MkVw}bd~a=0HwQ2)d+e%
z3B1p@YaWEo-5&^-C%=DRUxC9ckVca-nfg#Q>A50H(H`E$m{oc<6wk2+`aFc#?0}GL
ze-mS3p&Y$Uz&0L>#UuJaDUpyT1$xV~_(hI=_NL6G$OJxJDqQv82$@Y&)po4x793r<
z?-4m3_<O=Pd6!@B-D$f^RvVYtyhyIFoyis7JT1b=@ZoWXXYoPCNDbMx<))nh-?Mc=
zN+PV{{4itBN#4uZBHJ13Xp(iw)qRLPt@4?cD8;P#povG3JC#4)`hmaSWi9O_CaLlk
zHxw?-wke(^5;H><m^zuh)&#}!iliB}HFt||PsY4>*WGz6eQkyoyHo_Xdm-4BB=C`R
zHoIvL^&F3Y&A4#WmQl&<y@ndgg{OsEWQ^b59m(_?dCG3yE@Q-Er0c>{&9}9n!oSXJ
zdpHr&tZE$KkxePcqhpc$J?Wj)OrL$<EA8Zk@y8_|4zf2SW+J;!YMPV<D^uS3sa}!j
zSr0X<!#Q{y_jqUIJtxmIH95X|!2ys^(VW#Z#pj3I%(Dj`V+mAIq2EF@g0od~i%+Q<
zKT;RvG8InVgFe&Gb$>46#C2ai)eW3!=-$^QhI=DHIQ%JRnY5k(6MHkd-i^=8H}6f(
zrzp87{h)aG#7a*uD^+S;En<j&h)k^BpvLf~ynjGGK@4krn`q(*NWP7M*$5~2lIM?<
zf$+!6xFr6K;Vkf07OiZ~OFLq)kXq(e71zXNXgFlrgYVM#%!;S-Lysn@6QS-F>#!WP
zA61T7V|QQW9@dvI$uIfpk|~I~jVpWN99G5NC1Wq+?n)L+y#ZEzGOc&=)Sw$3Cr!15
zqKQZm`L&WVVW(p9;8iiRZ~7jF_u{;m{#Enq3+#tcL?QV?EG<9g1_qU|H&$selixfM
zI;JZ@yyqhx{Xjv(yYn&XiQYH<hN6NPCTpSV%HQ2~-?=mwG~yw}OfIVzl4r^V4)3&<
zPgdwD=yulB9(nHe4|9x0L<hK;m`=>Ksg=7XO9xZSwyCX-9WZ%)V4D6vlyASc(*GhK
z0}pjNuhstcY*pa#f&Z|4JL}p69<K^CvHo7h{k@~}|0C@kzcbylwb9r%E4FRhuGqG1
z+eXDsDz;OxZL4D2C%t<2yVl;l&*=4@^9MZRH^(#X57)dj|Hc~kWk_ZIl4UvmQNqRe
zrPh9B$C$qoO6+WZ6xjT~$h7~*5&PAeVdQM+^iNalSLe%Lm$rY7_pjyaZ;tX0JN-ZU
z$i6ZbY+nT(%q;)tC;Oww=0A<{r#bhpLwp6S{>SF{hid;H&A9*QcKq{)e{PQdXGQ$~
zB?$k~LGrJE=3lm3<}YvZ-xY+_)noSt5q)xW^O}qN!i4HpxX+>i<pu+n`Rm|Z0`O1o
zfkm2_$8X{!D3K4AN~(&*!<p*AZVE3>OjMi~mYf?dzS-K~1-Ko&+LYg)*f&Q6s+@UN
zmFzAUUPmIgSae?Ve5rRh(n_qa_oQLSWP3Ssco&z8-Mk-8uelv6C&lrCXqy(uK~+#n
zyH`&}ob{VQSY5jUC*;{n>MQhlR$z1Ii(Q9C&#SJ|Q~Muo?8-$HZm$ph^E(nxc~7>&
zU7Ta^K<A|XU_A04LwV<Qovq)Q?ly>FjF<4LR{E=A4Apfwi<ifJ<jRe6y_Bd+QChRB
zjV^Omiy5Bfn<iN1AYe_|eyc1#7P~HGDw{L!vl#wp(-5gP$86=iG8Sqo#3rv=ABI=O
zjtppaQbtZun#x;K#^G3Vaoe3yYJ%fF+_Kc=3ZdXJ7<Q#hPr1)yY%~vnF}^nwh3D>s
z^Eza|O7RB_>KljisE2E~I<Jce7ikqbfoo#B`KdO|DEJ729&qhd5uIgj5uiV^>2oAH
zvo;MxS8g{VNNWpbDR+j~_H8%_``{#~dc$-owmSHB+j`|_95Y~lHvis^9~}Z*B@)b&
z^&S8A%;-z|CddjDM%FRgffP;lnh)G}#INDvWO5?$G}f|e{se*vztdh?{FVDNc%kY%
zXz`*_bixPtp?rnLA+1c)gO^<^Vp{IUrNRt`mn?Yi2r670a~7Nt=(1#jpAoFMaDN%!
zE((RFA=c*8dUBs07PAD@FxPK6>8Y1s&u>Fjcq^|@^51`p@QJKOH*$u;8-2VfYg~xA
zU`O>><}QAy8RJ%bPkKjZjfDbcyH~1}Bc8qv7~PhE$~Su+tN;-vBYe5|xY$usp*u<E
zw-etD&z@1A5?ANZzpG{Z?MKgdu!3Lc!p4n!!*y%(vyn*iQu+~eq&UC+2fiI*=WvKs
z=KYbDINl`R60em-Y)4x2Q291NTmKdPUXCaLzsYrA53CSR;Ka&C4K|^_S(q7PKMP8W
zVAzp+%!oN*J`*Y^1q@q@mOL|o_Xst^uk_~GvX4alu=pYeSBmsAgIE-xO?gKAoRat2
zB=3?3axEt55RPL%u-WX}7Flk6VX{XPv1~Iym>zBi=a(X*l6Cwhg}u%B^Rbrb4d#Vv
z5)^R`1wF)D^l2=hU#C|Qq*sdBAe8cIb>K0hlxwSwFf^Y)4@SCE-7IM}G}0tWjGS-T
z5Q0JQ{e+;wkl3Qg1Y~~s{A9i3QAa*C&P*W~X<iKiN1PeAW_x|MxpaZ8%{b%eO6_><
z@lZNa=W%SaW?07p9Cq%yisScyGq6e_4?Vz8K3&C3&qVmsX955rfVZd}ikbQ2U_>|^
z`aOOQ>VwXJ5&+A*j)lT5sp^>F&uS8S1jvQqw|VO9KLbX|pyoA)77P{7Ln}uHtO=AW
z^)V)P^;YUDZYFyKpt5m}e0Fjz^R$jXb>Y0o38<AzoD0j4VD235b9q!rbd^AV6@}4q
zS17VerSK`RB)*{h?9q+Wl^_iDSXF600(1$q0<*Tz0fBGgPI&JLr=-cn=R8UUCZNfl
zv##`#ZYJJQgFO%ppm{NF=%o44a|DW|{S3M4v1QC6_d*uq&Ta!AhJ$1L1bpAcVWb`6
zZ+FehuZeV8&46b#K;I?|1(}Z!4V<VC)`v(14L+||^BRfepmph<U>aSNA!-n!Smmm~
zwMc_PKo+vv3N>rs6kx2mgt9ASVJ6v2;uD6M)z{&r%o+@+gqn+FINmrjD%0uqo&94y
z53&qMNk^JMRzN@|9tje2s?459(A0-31$PJJC?fn_5AK}}#t16A#YJIEt|byLzAngP
z*whBcSDTNOA`B&nxQ@~Ox8?Xvoh4@cVGQn;!PWG%U(r$={tpI{$mAE*$yx=-Qiu?p
za;i=4$?;b&TAcF<d`sw1AU%?kQvi685UV7?%pz(~jUy*gb7N%pP3kP#q_QUZo;qO0
zqem5E&}DK<-?d`SmD}kln+(>9ItE2ub2v6HR47NAJ>)o?_*Mo?Aqw!{A=2b|Y4D+x
zOhFRQFVil-u|@kjwq9b4*ov5lcl3bSw~6LNy<8vcuUUkfLg8A2&!%#omo}j<cF^Yb
z%V&Be(b4T6xRo!4kiP*rpRNm*CzrRj2vU8FTJ9i!kFAIeEDk`2=&s8BrVS|mN<T!M
z0~Dg?w7fMI0dZVe4?q%81aiX}h{pqsl9~3xgOHG;feY~lJsXpOZW)A6Mi#u=DQ4n$
z^w(9pxABA%<6pGm5vv%niMCXsH$tE=sqjRQcdDF2{nF3f&K<dgBHxlM0&s(ycP@bE
z4=Oj~8p)E*Sen*Y^8t6ID<qd|)fnn-fA8}Ib@`~(pX{tfMUzGuh*^Hd0XdmEcS~=$
zJr0HEegby^w0<lKr6D+vnO?fASw6^&pceTo5kWgtTA}(mBM}j+Ycp1kBb#tpFyG0n
zM^#&CP11$gE5j=u%;ITy2T|^mFpO+SE*Ir{iH?V14zdHUP#<anVd+*2Po0~*<q??V
z1db~D{r=}XmROiDcoYr~@{K}(omjrp9Cv&w04;#zioj8CFIfel+_g0kc4=i;sf~wr
zkkxQdOR@(jj#5~b%Xgym;1D@K9Gx)%-|y#L<L(>9VRixUwXOYR<0I%nG9olkM37@l
z7ewiHK88R(2|K^g9pD>6%5HS&Zu;)>i)ILTK$M~mBgL36pxb9*$VlyYXW;l~ru%>%
z2ek>NNJd@zA|+{nxk)BJ!sPtln-kh5pC{yl`i-1xK-I}@1W&$ye+?Wdv#F03T^X;_
z;}rxpJ+pVc+veyDPwpgv_aIU%hHMT!6T2)+$P|M^k3X%P)pNz+bg#CIO<Cc%a@+HS
zhKXH-+G}n{vjSny)%(@#LhDG7x5f<GjWvqJefj#?H1NR12vXRiR#LD0<db03ttp-y
z3bU2YAg1%%czm6ejn3=o<00bKMB?MTbaM^!vFIBwE){!}AT7I#>IM3Zz%JNcH{vuX
z&Xmnqg`vJEymr&_5e$s64DT_<6N#iFA?}7@&Jt~e!Yeg#PRzj`OyJQOCQPeq*YnOb
z*kyh5&2=>qY$rZSzPtDB+5}y<N5$@-=DQQHwrxgbw*FbGPe#>ACcUJQ)`nNd^V+Zh
zFgL3ysq4+-{Bt^`-~Q!`55Gyoup?CYmR+R}@CeD__%%~Q4h9&zJ^#|e+0Wmfz}YM)
z%Kwi0zk;8Cg2aC!-2aLD|5-Qp-)<`W^E$%cm2Lj4_y5XZ|M}WKRFFTO{~IdEf4fca
z_p$6>gJpkD5&23N{yE{la4o}EFZbWXwLjD}tJj+leYSLK`M<@WUCk0Y+s1{i)&_!@
z$F61L`)Ly$NNQC(IISG*CQeje!kChq(~;4>2uI++9lpSdRe$?Vw*}VAiK>oMjrU-A
z{7MM1=bUByOK$dF=(?Bc)Nl<p)B5an^YztMH@w68;h<z<C=_q;!=s{b*vNn)5?8_7
zQG^-_7N@YpwT;cTz3JmPhwo!8_N_ft$lHq}&8H^FwT2Ye@Hmosl-h9Fa#9VQYv!9w
zAoV(;e7~+~uDD->m5K%G3RoV5qH!xX&bUbh(m|*1#xDh3@BFF!iJ^ZwEZzMr!&O<`
zS<s5{BkJ<()7bqi0A;w8Fy$kr(xam~wqlcMGe366yUr~dekJJZgK2*^sE18h_AIys
zra7eET2@atLsX^wt@4nV-1DX&{F}{tGnos869D^Mme~TQaOn^~?9Ku<0gN%NX{So&
z$TLU5jy|Asau(h?6ddY>Iun#_>lHp-i^TDaoDV5JCqVwo+LH-}5wUf8vdULWmmtk>
zj(bfE+KZu2{WDm1`sJ1Fk^6OT==IXA92@N{Rxq>8c~%8g2F{}(OL0+I6LpA?y&heo
zaIrH@+1}HWnHnTUnxZN>4ByRL@6{1JahNK!uC_NZpU2LvIoxZ?$$JGpm+5C`mD84P
zMGOb9fCS^43?EVjdrjr;w5KZ;+~Q$Pk|vsqhn53)UBZ$t_E_&%p^x&<@g=u*ct)fj
z+T4f&hSI%5At}v68@mZSg`}5wFp=NJ^T7!qvPsUGT<e9GK2!+riXuTf0Fa4gxhL4}
zLtP38FUtrngM45B<FlWKNFmdl+10dXdRW_;EuK>)+Bg6>4)mjDd+PAL5g1Z__u1A!
z0nG+k9ga%(q)b8~Y{+#UBe>x3Fq(sb5Kzrv$%>r7bi}X~=wX`>M&t62K;~9i(IUqe
zo%k|wV5C7JnvR%Ew$nSC%ri3&dg!gemsk*nMCBaMOov6Gf!q7@+-SrtDe9Ke16v~_
zQaP+@Iw`CX3=*Sw8tcH7$l$oAwF>Zy17)IN*w<ZITsHAXX*T&i_0<V;=2KNo<Rb0J
zndlwFA1ayG`WFH{uu@z(4jDvsipWYZScqZ4S*zr7SI%#m53O+tXVeOu4NkdCwf6z&
zbtueY=XC*=izs^d4q?gnPAqyc#tVkbs81`k`U$7kF+OCo&OWZzQks>}hb6(zd2g03
z&uOp3@X*YIT@`G{?&wLu(a(V5Z3R|B+_q)B=Oc}H&^K~{uUeWcG&F$nE^yc)$&$#P
zEdP4gAdTR5^)<bdXEGaDWRLl1^HWbAXkY+GRk?^dp`8S}LTf85iKS?DEbiM4AzIJI
zyQ>mb=@#t<*oJL0d;$Xk<4!X-FkXRUZk{DfZNfEMyzpUpo?gR3k#7|E)jM*Sb)i{u
zi}@Q!r<Nl;DjYWF*k&bv5p?JvN{iKB@bVY{Nb-^&Fx}(MU&5)ak8wU-<l%Z96*a3*
zGfK#n9anp_HCM+7hFc#DmejcyTIVRrkrqi$cv~ECh<eRfQR$&wx`1q;v8tX!Srh{q
zLA;mD69-M+Du|`hR)Bkn1s^DWOwJ3=OQBum_x#o4EJuTpcRfCV1U*5|q#2SHaV=e%
zZkYP%9%4X*oWX;%x*wY=LfF7@$cbaLA&85{ZYDzAhkm&?!ca^LXy~PTW3a2`id{&r
zAkRY}Rf^xmg*uzYeC;VKnOWQ1Gtt(T&Tub7Y_q@2_OEsCi=5;qPWw-bVEExKdfMD?
zGN=OdM3-D3?3vR1i9|*iykdcoMp^BrN0EgB?0(dF6hf*-w~5~{JLWJU02rYG{W^DE
zl8Jx;VW)Fc)766fJ-6iN5xV6Lex1qUGeL6%gL?UX{>&X&t2-q)u~L8#)E&MJJxR=K
zUKRB5SYA~b+??ieX{tHC=-=NLOp^iH_42ikVuCR-<m(i%<^9Az55nuOWU2G*0)$pX
zG(k+{^lRz(JP9F39mNeG{mIaz$aCA_6a-g7Z%jBa5O4?X2xGNK9q{Po1?&nj=4!IS
zh~M*^r<xp#ksh*>9RexopG#hr<T3{=2E^Fd;BFzt8E;AY>AbFID1z67FXtunh*QAQ
zh0gGPV5+sWFl>P1yfOpn5#EEe-p}Mm;HS05A}fC0F3%4%tG@1+pTkC>#Zi)PtwdnZ
zYP>Mf*ls}a>#JOGeXHB!D5}*3je9kpylotH4XfQJ?VIWP3*2hs(CW`k7__on2I#J&
z;#x&h)yH*f!=p_vJ26D=mJQ$=%w!-EEv$KK2HI7J6l_C3>Xx_IT=!21IfT@6fARd=
zt|5Z6*9WtH!uD)SXTgtWL0O$QlHM)ST}BzYj}<SGLfZ_Ve!@^k2Px?ZUN~1n+t;kB
z<&Bxq0uFoZ0qdDsruTeTjRrn1ffe@OUXjTU86E3$E_+S{ogmyozU~c*S!c0C{fY>O
zQ&`ol&-FTI!I9IMu-k-Qbi#y?ye(9G*((^a>T4K|CI>WQ^+$xqZv(!S;f+jJLv-)s
z`8wC0nHgGxf|N-!Q3Nd^k26u52s87~{Esl=$uyGZOm5VB;{ACq1{5$;bFZM<roKnY
z%+1|IHJhn@{w8X}H2U0oFrz1fA)CZAObG&I5JJxb%&;nU3Z!c6ufULG?wU(QQbWrA
zYX>4?zq9p(N-}jc`<nsUJZ@_QA>SIc$SiP!n{LNX7i=rw@!p+lZ<hEfy%3F#Um$<W
zFYTx(RR3Z>aBqBBI>AGZq`6w2l-AjqQcoUV?Qtm$N;>&`p)i)(+3lWZm1pP}K`t5g
z1q@@FGyh)z+P_y7{2M^~FBbMM#Oy!sR{ckS#_&~={|^H9F9_|gL61KW+CPmp|0(+a
z4~X<9aQYLW{aI1`-{{8wLqh&dRQA_M%)d|>Gwa`OE-q3(ja_R&{Os}#_V-c440A8s
zJR#y|J<fBrp0+-1;!n)YrSX`k`c_43I`!#o$}Xg?^UNA~8rqM7cl+Dcq{a0#^Rg>8
zUxYS-S-a8|nxrUf*D0QY!YGMm@t|Z|bxeiQ_^OM^gPz%D92xdwGc!iCNxe#W8oq5M
z;~@1u7P?gpFrJVC2T>Dm)h74nKRzW}@HJK+(^BEQFx+Y07-e5qlUsFCq>r*lqG#n_
zuEr99B|PFU3v<(b)9NTTjyTeCjwKJnn$CMg7GlRuv8v2^twA(&!tD$9d*{$=u1%+*
zzHDu*iH4Ld-b+b8+}nyqwxUd&O(}W!fU-W^)-R~mxU~$)g4}}jS0{YOA6mOSg$Or(
zav5$;?-y`j`WCk?lLg}OYVE{2q-M&Q5_z}EFL60v*x+efYp?BKmYnOb!7t^TBMBIs
z%E}f13u@3pK^#K}d7%ELEmT#Y&gXVIT=MM0F1Qs<hE5jgImQg-7-2@YN4cGBqlra@
zz$q4y&zMpSffdE{v3OZYH%<eQyJKhwRXHH-qE$gki+IGr9*UCtO5K3TNrBQHed|5v
z)(_=T;*4C5Y~|F~31gO48?{xZh$oCEu=P1Pqd%F>O(Q!DQo$)E1fEcZ!bv06@r~3l
z>=3s9#jv<1b`k(XCbh;VXeN>lV<9uT-Ehl%K_vIc>`AZv6MGJ3sUZRi@sPS?6QKC5
zL)M<~7xzJ@wx)`-Zy@@5?fs}bW(q<XHP|p5xge1m1nO&~3F{y;bGftm_A)l3eA*Bp
zF!>LxQl|H5mrBPIXD}h>hhEmUd~^Ou|M2E^(?$KnhyzNBy6sTh380V-T4cz(W6#jM
zd`nIB4Me~QR4uCeT(-8uABn>R4-(VZ^v`<DJf7yunyi7w6~_@IB2KuM`|2`ua6tlT
z{d(ldV0^gz7gRdZit$tf+3xgoPxMeCm2WDi<9283s1k%NT1<5cq{?T3Ztv09I4|Tk
zxnaU1hvc6+kIX%}<Wi^6!Agyo57r|N$`6RJzOeHAGRdB-GF)lschbVyFKXUGR#!Ld
zkfXDayX=HT$1%eq5s}^Gq0p2I!G^{1VC^Ik)eQK9QF)A^C@s&1kvk}8jO<P(mYzyK
zQp^L8M54x-SRBbQb)YlNad<)}fULpUPD_N=xTgIfH>RttTXOAm-RQ0!%TXx5Z+qcU
zobo7B9T0=?o`~|v1GS+@4wQu@Tq%B__Gu?Yu+z}~lrV{MXn7V!;*HRQ=4dZ*hsbJ;
zR#Y&uupw6Xtkj}Jl0cIqB>b+I`gMqr1yCGY@?KJ|v}XIq;XG3bgkEN0bw`Q*iFc&I
zBrZsNx(Amn4F4ANFm9wg>fTQ}>rl2N?*xDC1gRXol#y1uFb8rWXV~V&%9;>LbBm7p
z7DPgGTHQhb<b2%=B0tj>k0nd!Qh~aexUKO4$-)y6C4-`b4o=8o1$M_qra)et^0^hK
zD11=mk5o*bp!Tc;<PT2Gy<Ask%9!cZjxj*w%Qgp<vQ{@-*%C$%xOx6GML<sUZq^cx
zRBBWQ=4Vvb2=qPm0fZL?dl&0U2fLrf?^c%u3}}=$GieF&+UH=IO}Prxtu|)V$}p76
zSYpt1PXH@OH?-CZDpP2{C}-z`1cupBF6$1r!H#ie=>n%GqaiZq%ED+c3htc#<Vl#l
zC{$TLd#UlzAooU&=(bJq$ENi0r-ZZI4ZT{B&wD%UJPFQ=h>$#CbKgu8dk?<r-*vbg
zk+V%CE|d!H-T;!*12pLVu3A=TWzvkc;%^MwLk4TY)bepZVpa;4sLGFIZ{ZWLyRE#X
zedE;aq0^cdG6vhL)5Te?AA^z{tSPYo|MtY$<E=jeQB_ihJJ3qq8V69mTS1$VH|)P*
z1&@K-h6Ylde%*2M5<!s5RY72srHVZA)yq0I(EGifg<~#blGD!$EW^Z6Yqi_dq9YUr
zDsaYNk|6LnSJCfNY>ilr%kKbe`|6Y-ZIBFXD@G<FrrCB&hq*_UvujIM7t2e5e}m`!
z_wr6f^y<LPdGhU)`coH<oI*i#<a*SmP$RFKiTrck(A8c=Rm{@rOl^7&?LqZEm-*G@
z>I4n-zNy5f68PER1u=48!DmUrryu_E51lppL@T<QEJEr+Vj~U-sTSk?H=Em8rD?ha
z({pEhy4F0Z-6@aj$id$)z*F;)Ti$pk(7qF-wh+^#rl7rE<ce{?3oleCbr?_Ta6D>3
z=bYYe$UZx50stIec?VCcIO(M|pWJqE$IDs==64*?>mS5mO~f1Cp(d+>=pmSzFFw+3
zF7Cu(Td1WT^YeqF^PL?WrFm0bKVi@Nx*F;Yc>tK)DsV-Beq?xjPdf@#I*>=8KQ}My
zdC~5RjBEdvf)BeXxe8+r4}o_K%dd#d#Y>=-kG?&7j)X*&K`7#?A3=T05_}HPJ^dL6
znn-vQo7r3anw|Xp2@;Tj9u^aL4zVd>Ti5r)PD}~zw|=qtaVEdLB^<4rfLZC`CqQHT
zPkyYe4}NEC{QUEDjs$E1x@036wdO-poVX4qbPY%UZ7k=tJTgQY&q(tSg{Gn{L!|&s
zz(YyZ7j#qk=I9|cbOf(SM#;4gD+RHbW85Brc0=cpem+`;Y1i`^Ccuh?CB?H<GXD>?
zr8YxQEtxPnL@z`nTSH8<56~ZhP35Hq#~d{LhI|w)!hj;cX)@a&S1;5mtlg;0xvRfA
zgZK?x+-VcyJtJ3tm?_@Q+fP?z{o>FS*ad2O+4OG_odBkmmvqM1ojfzsU{k`HOT^j(
zs+t8;`iLI|Exk(98;=iVCcTb<*5t8z9LE)sg7=!|z%CgP`k3ZU>H(kh%!_qoai4@(
zdC>=eo!<t+EWi%<v4H`Er7WgvN#UX~e2oYOHVQ=9saJw&Hn%;6q`<bWX5wZc5No)U
zX(5Iz=Q;OOL<HNB7`J?ctF>)f#rC$K4G2Sd$>^a0dgXPpoz02J^l27Be@dBj?ea?z
z6kr5F1|9OZWLvpBxBK0olXH<a1r#D$7ePDT%!Aw&Q|uHslDUu1a5|rNIQgh)125q^
zz~ICyLij{3T_;6q{?(g&FlkL1gdLnV^Ml<$z3FPL7xo5e=bBb0*!N0yVZXtN%O$qb
zTCGwMG};8UVP;LM)6CCJuNOk}gJ`WcHzE<`{YWwlDL_$;c{clu+m^QHwFzBOB&yt6
zPUO};J}!X(o=1xvLbzbXJI)*~ntWyy`;+$mI7gZSc23L$l+Bo!Y#vKiJjo9}%1|2F
z{{`^=yZrxefcFpAz<*~X|I3{4AB^Px3*h}LzWxL7{?nT9p8@Y*z0-dm@c!RZl7AeL
z{U1_06FURP-=!ocG_7O5&USosbsKd5fXzZxAikf?#dEL*CN|5kY!jIw70GQpXvETV
zAm&(mfAkJvN+OXuEo=dv#6!zBi^$sP;9=VD@@#oMlQO=j$B(>!?|a~l4jfN=6%H0C
zO?(ZVc#OmoOs4*Pzq&WoJ-OnJ!_LFL)~=;3w`q;Yx4RxwToIXRFxGCIFF?Cb@jZ3{
zb$vTs<D*^J#qHjI_3n&1+f4`OVKY|!E_Z|d`18@7%8_TUuWk*I!`0WLMF}6jL-kR^
zA1Cw9s2bjtZ_E4h^2)~%ebd()9gI`zGG_)3INA6ylnXnJII;PCW9#B$RSp$gKOy%X
zP^r=>NWGy|7xu_qjO*vUJ8<K7f0ezTHj%WKZ6?2T@_Vy8F&`8Oi8AzUbMZK30*Ix>
z&N_e4ryS!q%uLXy82d-mojNe(LGIFHGdwWUu^A%OxKgcSR`hB+Xj=^yZ_{=mU|Z4Y
z%qx8$A!8tK!30iAVIS)v;&E$XoFE2f8t{?vVi)Pj`BFd3!W&Il06)w-KO0|d={w+=
znj;j)?_J0{wR_BgjU}rvbhN^DZ`ekdv<577*ez)kMas06ZBox1xe0r6emfP_Y15|Y
zb2#?@grhInUFlwRxRmnV1$({?=r)r({o%!Q2ySrJK=umu#t-z#if=jyyNr)@?)ch<
z92VV%x;YF@O!-6K<_L6`oPQd#_Ob1G3(9o!tvW(x+;@1TI_*=;;Ibh7lcm@F7Cd$T
zWa_sZ?|Mj&EDhV@eew1pZE983Z?ZcK*hEM@p{Zi4Hgp@_6kIkqcjtifZfe7J2#jxV
z_WKc<R6r*2@6qfx(9%E32jbrp)))%=xvuc^asvI_EM4!-yLVhv#^SWQ&T7dX_ZNz|
zdleA`(d==AcgU6&hc(0zXWaOwgf$B=C~?VnCh~tGPa@f+kg)Y>SO6!dVJ?C>#BSg0
zM2c3{mJ`aS%hP?!dMKLi>KMbj{l4!}DltrLJ`?tG%{S1Nm};$r@``T5hqmFUpv*my
z$38y+ZH;SgjHBjOm8=M-Hn;13kTgoYT4i%n5-==3D#Cm+vB9i?w#Ung%Pb5J`<>co
zy^O}Ft1bca>AHjJ5%eydnsE-=R1U*;SoaO(%__n>{xK24S%Vsb=d_IwIlkHZhb<Il
zo%Kmy80wG{`TAgtW|MURx{842^H$7O+s*b1Q86?R%H^@do1T<Y8tupT@0RrV$ahSg
zzW#TV&rl64L&ZlBrdcK7XeE>-_i;nPksa}L+96@4woj)4E<<VAP;5G9->NO$zGcUL
z&j}&{S)bn=B8AGA_I{}e^20{XV4q_`PN@SpyJPM)IARr6Ocrkbm7&5ZF2mZl+B1z<
z9{>^<Qi=NPF_dCP7)X&HPZEK&dWGZfQ6ZG+EEa&4{z|&tN{<TKt&#-7da(U?nD1HN
z;6hOF==_e{?2taL@Jq_-l|@s%k>gxq>cG%+2nE*vcC@pf8)PMWE{@Un4B7Eorjw$X
zPHRkjc7Tn{jR8vWHGr$jI0a$4Sb)K4-0Tc%VxOBp3k_9vQ(-KDrD^>U8ddJ5xKeI*
zXCf}-yr<ep$*JU!^D@lP0_lhXSEeN9eC57sNhI=k1gI|v7k;8~k}ADyBzB#1-g<~;
zl^bi`T~S?}?)=L1rRN+Ht-#v8Vx*9IyO~IC;PnWyKPG6`tWk=qCDc0<c@6NE6Fs?R
zNwj)Q$c(#s{70+RQ_lxV5|d2yc3gL}2otJg5I-pPHbJMLLDvv^UYYe+Nd$=vh+JNK
zRRlxX*{|b3e*m|>of(Jc+Den*aTL2BNLaZbutyM0#=sm}ZBWGF4Bz;pdvx(k4s41S
z+{?pX1y8^McF~De%irX~g0Y3VVMShE4=CaK1~Tc&Wt6zP(`_0>K9c)?kv@nJrEo0F
zvLaGVqe*^GK$GA4T5os1^NmY|tu~b0p`;xN3t!??l{Ic2EGX5+H)hQ$xk5eyiad;Q
z*QLuRBCSpOYUdc`wYB9MXuiIX(r_XkNwVW!Ot$gjjK<tJrK-xYf<GIrp8B=$9&1tr
z@x}w}mB@!DD@K~QAYx_e+DL{XO>dmv#H0#jcr3DvC-*YlavE!Ps#}oXCw=4hDnP<)
zo|Kd0QlDH2&g|Z1QFoX>*anyZt0uqSj63agJCl;L+qeqNbl6CCPG(E60pP%_yB_fo
z9gVBfVKKxuqQpwCMR1H4&VqDUAqaaY9`&s9XPQ-xHWIVwPA7nVTECobqKLF$+Pl*y
zTYXUuo7UEeV5*HT0D*62iNdAn%T;d_3@I(K2|$Ot*+Z0@t!`V37i4QtxNR#gMcSp<
znipA14GF_Cbxs`kY2TPNvpbiL_#9ubY82HgffVkr`4UPW6)|!6ZGp>Jrh}2>0%b=V
zZ|&NrQYw|6)M3}}kj}VQ3|c;;nQ0Scn0hqf**arHxq$z%>;yAt+9P?4ecB>oy=YZ}
zI*dnQ_aYnzz5KC6P!u28Zkvc;fhZ5<29yvSIHMozdYOY8ig^SyYJQ^F$z^BL1432}
zy!_??iGiR3oScsZHPGFO&n=8(P=eo-+!!j=u;+q4VMwkXVuG$0I_65l)59t<M^0Sp
zLGV_6Q2_5fRA;;uDwFmtv9D|bq^2l4$f=g*O#sQ~9W%q5GBZcR2NdCmha<YP#yO+-
zx2Y1ol9!V^beE?Kn&S0X8LB4^wdC4Et9;A((r(N*r`#V>=)FCib^zT0D^m!1<LIjN
zwA1q46B2`dDB?FQQylt|hU(aSI`nA}flo!%>n%DlS@U!$$*_y!A_Mgb`-MIdPKj3d
zd_KNO;cNNGQ?iA1u!Y_f;b1Ahz>&d#GLelpu<YdfWp$gF+G`L=5Y7ffsMKYWZh+t@
zpb-itIu%%_Px=7jb1YfXtboFkf<2WELBr4Xg38$i%QL)j%8jZ{l?Tjocddkz`E#EA
z3OoC-^%o=tnePF+GQ<VnwJ$xD#&NkGt5Fn5u$3AiRx*5O6bZQ0vI-X1l{tza@=#We
zPVr*-JW>FdlEpIN@r_7f^xb8W@%dG0?zequlPg%`0-3WO&5F?59hB_Q3%Hm^`|ROe
z6%fP9j4?_=-#j}H4k27;LFivIG6S^CYlaB#b`hv11|I~qV{g_`yfXxd^sb}jB@g#`
zi4}*?FS~aceo+6OC9}}0{yko9^|HEH)S)z%PC!zbqm+Q!``PfLzg;@=P{O#+?5!+W
z5seB0OE*#}pS{JRJ+(9E+0DvgvR`SFMJ!-U7#FM`gJW<vr)k|1ZSr7@04)H|MPxWW
zAYDqL2tX|OoMO&_hTudSIpZvY5O%(cczB1~x^T)6ym04dwWdYsTB>l=XK<t?M_D+R
zh>TGt{ZgJ$Zj{3I@7i@3(-bNMZ|~Ea699SjjCU}8%+-C=SK|-@8`>SN6UAmr7#k(Z
zb1h|Ywj@;lyL1q&QqDSFLr+T5<I;dWiP?RfP}UlSH~nnc-x{!_nI3K8pom`4c#fN8
z=4Rpn#G|}$=8Q|uvj*Ff^w{yDX6N@w@HcRt?m5m=BfpBu;}Kivbtq(O;;Jp;o@n9W
zX6HBdpD-$4TdV41ReUS*G0Qq8J5&VBxRH5Q5gu0V+`zL^Z@)|8P7;7I_3+IOBi*0A
z3YG-niK)65S9lu4sg)AHc0YiuC>`-)tsd5hPwKa}QF~sD<(SFC?~S>TvSw0?8{(x$
z&*wMeh}5d{WrtfvzCX}>)>xA(kHL>lL|EbcniLht=t`RqK0ZCY-tgvxyV$7)c@!on
z)r7pz^7t^=LX`=2^M=DQg*S@^zjW#%<T59YQ|J99uql(~%OVjOMW>|~bo-f-|9VYc
zgYxq1K>cKz!-vRN%IaA~|H^KD|J<|i1gwg8QwiOGVzJ$W_vV(<j<^tN0LQ>L_p?<6
zr9%Di2LNz5OUEU=Za3m;iHQF+h=Q(;ov3Ig`l}Kp^$n`tF#c;;GL=~t@bqsL4Zn2Y
z{Z3?y53MGqMlv<@x5+r9>@?OEI=Jo5%Ei~YLAujl9ND4hK5gfFHDaJ|!&<rd57HcZ
zo0>WPwr>(GQ%1LW_Q?(rI^VxR9Xc4<3%XDT%8HF^?}O*CwG(wSw0)?0QwK<aCky)r
zBP+5xY(FFC0F#&_2pLrXav;;CV)b~>DR~_%W<?m#o7x6*fF_3>vrdz^_7&h2#jCEE
z^>6Ms6#(H!A`IFFGZfaYCZ|?~$*4o)n2AMoI_QcTQ!Gc?2J-;msaCgBvh~o{l+f)D
zCdX1JMlsj@W^cuWi-w9~SOUdJXCKaI2(4aM4zvm2ab!9(ewr!8d;kLk2r2VZBAj3s
znGD%I@~ZAQc-$!>^l<IXkHf?Ly0@mq?8pPhsZcS+cU&~2kQjN?BTag4{<4og6J1eQ
z1$H;CB|OxehkL#^XNf*T<ZAo-`*J~#5ZedonYNCwszv{1lyxROoKvZ7FiQa;|Ke!?
z6D6dg-yVT!wYFe%?aRa!nqfuae%mrkji*H-qN87DtppmpR4OGq^g>kE2x+r2gds|*
z@x|IsaLI#O<(L<p_|IyRXgDSu(Vq;jo4QEp^V}TIA8+|rD_IX8DPcW#I}-^X%fyco
zYD?#@70H@6ifj4yhcFvYF#UIAt>`!7zj~&frH;Da_ix5`dyMYR%F=&@AtZ=%cP{nd
z(I@yfZi1PD#KYV~LR$7&4GRj?n+<2uNeZuSY3(_Zcg+4^PhHLPwnN{|ThA9ivbYUw
zP>XH&wiPnqiPv%zOcy!K!_hH^cs}r0WySk4k4kyIOV?OQk+LHlid1gpg*#SM`o1Db
z;h}>%P|PR%X5nd1DN$rqo)y!L`0l&9@IJHOD!xO9c3TAJ+BCP8HMLk~Gd@!AipecY
zM4tt%9SlIk0P-?Iq>NIC(Z!j_6x|(W@N|o!`|6ud4x^a70b%Rpmt%6B)SQRDG`Nre
z*5PiPI|l9Wn`_eQ7)Su)GsL%<!Tv>(7YdYDjuG(vX$5~m)oV5I%%)XQAA`0FnGWu%
zyQrOZGT`b4p3-_H_;X13u0vO&5o49)<zsryxLmqpnA`9ZxIH(x;orFg#{cYY`eVEP
z@1b<YKWgp%zldjlH4puROZemczhQ6s<Hdii_<wQ<e{IG7Pl0bbQRc4>|1SgnKk^iR
zPgVO8;r^8D{!3M3X82O<{-)PyLQ^X7i&*HIs-B)uP9BH@$T>*-v79_XaGJ!HOgY41
zM{Lm_B}_@2JhH!Zcnbc6(8!dzTDIC5rUz{01pE$m*B9Ep^==fXa&Z{0?Y&&BSTEVv
z18>##QuEsWxFcW1Hn^`dhc}8QgY8yQ$H&|*yDg$g*<kK`RK@*$EuZfFeh@50A*l9|
zx-YwYPk{mHZtG5|$+}UAxij->nmP7I&*aajf!rTXn|#%u*E@2#>oG}d^w-g_s({EJ
zjw>!IOR3J)L}c89tvSWvaBnHU!#b)E$5SV3TdiFrM~`Rkp{V$Iu70wz%6w&pR5R)C
zZrM$gH(J-fgVv(UsY23u6HS%thykXt`Y&-;_4QCSegeRC%5LGDnxU~&Zy-n0WG_ja
zJ#+i|g)8wf%+yHvZN5M4#5}=&-}!2hVrQ%%{;Kz${Ju7~(S7mp5v$kO4*lJ?4caTy
zw1TVwm~Z@c;h6TJfO)BTZ-dU&7n9z9idp28N#4p~8tZq(L{Ju<+70O|I@lt--)uC&
z;J4APqt~Xk?mK9>J{07PJ4=0r@tR!$r@*1`{a}t4E5<zq5YRRxl5$*D$<9|rd}8|p
zS-sa{RJ|QhRG$8j#ZcmiMe*D<>lNp%9g^6Na`f%SHs^qddl`A{9SM}!hW4@N^@dIe
zUksD+LFK0kEU^3&b)mlLwZut}lA?^k^c>UQp9_l1NQotzj{#Z9^Ff;?svl?|#e;>a
zzw~$K*@!<SLbPjjHQ1#MyvLyEXY1tw0=tq>_=h@*0a&5l<50Fww-E*RvA_FSek04Y
z*c+B(;@VS~pn5=ULUxgj##QHM{#M>-A3#;$QR{L$Fh<BH%_mZsFg-P^2EK{@gK*;;
zbQq_Us}Um}afBTicU7)Hd@~QqeNz%^rV}^dDlRH3CqTzIN<;??NYhc_Wf^yy!iJ)g
zp$SPia@y71SfmYv4B-s!aVtSvcus~@Zu+8?s`V?Vt({cJ+1A=siZ`w8l@x1~%@r%^
z3jU4bS7~7&LD%;|dK2?!NtX$%K#a+*4$H+tADj$K)4*@zX@M*gw`UdlwBddvKE`Va
z>#;kT#pAW;Bh012H|8~B88p4$j<51saa^Gm>}=c66f&OkypiwM*|q2{6qD3J<aVOs
zZv!Yn(DYNfU{xYcjmvMA<q}%Im)2{nYO$A7*9rB|d}j^c0lZO%u@5;@xd%Q^Sda#Y
zZezC}bDQ`mKczI&mv?0;E{mPIai6K`8~1F<+Qz_#il69ojfl@D_GmKpH9wUd^i^A1
zwxl!a&<7EXqGUBITh3j;MYt&4mk*VkT*wCvLo{Fc?A|`ibjI<7E+R(PkIq@9tBQ>?
z5Ufj&tB*s6u*WF@9;gO;v<py2>qZUDIVBfl7cejy*=24C9T@4oO%h9e@>fkgHcxo8
zg|dv;3!XFT=b#SvTax)aUzk465ZvtI7^#TR<t{9p7t9}yeo&MO5$8ZxV46Vg?$-G|
zjz8;<)aMSmgC2{|R0u{ub0a<}7;^)GF~QGJvQe;-9v&ytll88V22BKfVMEWBoXak(
zgF4kxU;wa^L%;zeZdIx!P-Bc3{9-FQOaNoeDct3_$K?1x>ff`kJJzOdri4XICl%ai
zSEznNVW1!HL6*egO>$-&mc?P|AwWx=^)uZnJQ3Q#!2IMr85Sr!5Ca4WP@)`SvDtVA
zx<+yOxQ@pRb4+cJTgD|OIi)y-q{vT^t$eOucb&MeqnFZfL)5`0DM>H9z<+m%N;2#&
z@$DzOS7d}D0tuU1RtC(loYgL7EJ7r)EXjDIMCCz(o(8Rci=<QnyE(yfblj3g8VM8|
zudg;M+*0JLiGJHHdlrMUP6{7_VjN*N=pUa&{yMx~c(JlG9;`cq=I<z)pB|^*(*%Ju
zZr2bEf`Yb{fcpDi6w*%kLlX?-Da6FXtbrk>#VqC*wUWu(F40~PdRavn0%oQ|DZPtd
zk+m`*>x#Mt!4}D}NIP0D^`(vL-mFU%wdj<Bo#qOkqsmhsc*M2K=g<u#xYTXR8g;7M
z{fI|uQIHq80A+~!m$70H%mFm*@8DaZI-^=g1&gEnhsF}+kSUSuFv){|Ziu&`4e2HA
znvp6ox#+-~qF-Ak=?+vg!Qg$y$f{>(i>#`+s0L=f`lhVuFp-5gE!QfVmoa1UNf`Y!
zr4i@FTV_9rpCG)Ii|-WRoLh(I<GMxv$HbZGd_Nnp9l36&9N?BVgD3~?8C4)ef8pmS
zhy>4om;LIFdw!+84b6<6ph^$FPo&ly5brR!JzzpE+=Ildj?XeS3BaI<0E=h^(fDR-
zN~(u5xkIH-8+0p$>=@XtZedWn#&mI)Hv2m-5^s9fv0~kA;TVQ3=o<zWE1_Tbdta_>
z6t0HisVa5ft;JKV-LmJmtzqaXc*FQral;>4u^}x*Gq4EJ^BWO12vPl4XZ8u+I*37N
zcK!7T@Z{Xyry=X}2h;&wL$-o3`5y7a;?7nDm=eh0-nBu^4CeF{OC%m~;;=MJc>@P+
z=1b14G5kMxExji#omeOP5v6CYXdNu6mv#<F(9;fmOn51(7`;lp4uCf90->eP7650G
z>M!R<yz-%gQoJf7ZrgMCBX1i#Iq#bhmrTrM_Xkd8FhKkt+3=w$=;JoTnJy1mYND(d
z&mNUm>Do!A_$>+Xc4!LT9fj<Nkh@00%tvu$Qat1i*eq!^j|FfzW21EpmPZgUPgtS+
zUh^xOegLRr0B|)=2x(@?s=#*Pfw(9zWca+EzVf|Y{p>NbknYQj`L^nSyADxkDJz+<
zDM+w<(8WjDK*!l7FMwyb=ug(xv1_9gwG%bw+iJz#+EJUVZuUBPvk&|{!V+El?E!rb
zLT=8QEIT9;|0a*ShP-{OsTvE^b!xeJjvQ$2XC2rJZPG{f`bEv*r9}|C18;cqr9`wk
zCI2YGUU$>;r3*<?BwfbUKpF;^H=U)0{&=KVI_!)Y0X_$v!7%2Hs1;W{@KI~)Fn#Ts
z;x}-u(L6s@?-JOSaF?k~KaQT<CJ*)_dUb)G5b21#g)(Kz=NCA5YNrzof?FK17nAw8
z2UxQ4b6EFMDa0!+VgmT(k(v7C^WmZMCqDoU&1}+p_JCg(Zq!5=UgTk63ce1z<+HLT
zpX-X>2?YIYAj=DVH4f)ud$|{Ur7t?M_q3c)Zi26tA3__~I|ZkNlyH)jwGrL}#=^(r
zWMMBi(Bj0@R#yVTY7H5h&Cj$g9<IH_h)JHiYog5>U-MQI+iG8jPL)6nT0kXMQk54j
z4&H0gGJ!Q;bGK&C<2#v2;P?%F{ZEkRBuqMWOGAp3=RrBy%VdZYN9&Mh@^qHxZHJ5Q
zW<9H0R&R7u%TZ?uS!F>~c)AEch}UH%T(}X<BZ*Ki(<H2ZWcSb9e;=w{jZ-5AC#qRE
z%um+pyO+H{nzmu|1v!5~YZAaH{1zW6%c(EbeDIOac`DWPP?Zv&0AiidL^-nhVAG{N
zgYI>&KSI`zP~6JF3SWSTU-4D3F+3OXqFG&_&7DN%-I?Wjv70k!1fo(OTJ0*c0=p)l
ztx6^0QnU!!!{`X3){I-S4Fs0%&l2odNF%vNK8wMED&k?v2ySS8zCCfc?mD3(D1A1;
z?7EK~_1Qa~p95tb0^{to)6g)fJ@2gm`@H{tyG=<7Sq=vFqb&_{WuFv5M0Ks)wJNMR
z8|LL#<1o?$9XOQS<q8GK;6_C}I$CRg3?X=0gcji-Zfq|QN!#@kKQ9Pt+ZhWsY<&$u
zMd22!$5OoYce3xWTR8%qK>!hQ)U5hR6?tX^HSAQAtsB5b7L_@mpd#=w;OgH=TDwTL
zI}B!oA+J)z7lL~Tf6WdqCps}L`DI09FOf&!5`uITL8k#YzXHJKMuK%%ctuyIe0Yjz
zqixhX*{5OD-dh!^q-Gp8#4;An5CA(OV%G3j!L4^r;xi3=8`tnvYFLN}(+%i~n$(Kz
zzE&=;v|-BDqTei&y!lXbZIazGAI=l?KhAo9pYaFX>$4J?F)2yeC0mt!NE8M>)ev|t
zP}DdddQM&PVQLGDGw+`iBZ$Npnx4>e<&Z{GC|l)pbi54k)Tmqeb)E@L$De}O?x28q
zF8D3*EKi0l8~+e=jAS#N=nDf6OF1YqO1X<tDN{}AWY496R6j1w+3`+Ts|haObSa-9
zftbpgn{8I;5wt62U|Wi#{!(3{$E~7OHY}AWtT#lrKyP7F;7_;pXe@3vf$#!3<pfqe
z)k4DPmrbsOYQq)u&NSF~?8gg7!Ar*T^p9L8maU)0LV>VO%e~c)xlwaj#cB*rXeB4s
z8mmNtbVBQ|j?ghk%|Da|;;2&0mYwEz&oitFQ01tr54WwRtikD)uc^3(?w(Jb<3Sht
z%BVS&Ej!%-DdINb1XXY<C1He87JUc0njvDm5zQU~`<CCy(4K~LYVawj>+wFhf-2&p
z2s2b5yMcSubRFe#!)_(ywjbScmscqzEV1DGV+yw^liQdJl4W&Ldu~hjL}GIwyJMQJ
z80%Oka<3pi`xIjc!f{Agb_Fw(N~5ZdFZHA@kghpa=&SV?2^efMPQA^jfod#ow1)X%
zq4MTjFeZFa@~$rKX#PndyNh~yajMTwXJ!JK52{XG;1OLSCHi_Zuw6Fe0*Rv=<z_k6
zYLfORoGbcf8BLYn>_`mV^sjjwN*$z^4>uf2R8r#ZX77FW5sYM7bummz*mu|PbPn1A
z^fA$lV%Hc;lE{XjaB-4G5SU(mUnWB@LTgN5=J!V_oTF}IQY1TPoxIdwjf7vf0;l4s
zSikGK@kcPFkj5f@aW|yAqsruj)O4g=v>1{G?eR)e$|+-rYi$rb%Ft$7HeTGF#$MX0
zmcjH7h>Og;<i&gtosK6aksT@7yjfcVKO`M_dNuMAaEK(qa|RY0?r(JPqt5Wfx6z!D
zMI!60EbNZVux2aTm{CZ-yJ+|crRrb_pX%6abDF9QZ>Vrq$w$e~%zQkz@-eS^+h9LK
zgL*$|B7onB<%|WEv($|W*<ToaHEo4`*g2h5++LgbxR{L@ECH&Duka<|P?aTz=w>=n
z><q*z%9e6^`_jdPT3KSm<oEc>C71~_gs~r>7+Lies?}))MseqSph|JB`wABain68B
z1;lGXqpUEG-rLesqI$de@L-ChJ!LY)Ux{B)#Lic6&*FEU&mLP-9CugWp^cZF#Ha~b
zVZMui9g~a^!tR`x5Ik~*T5;-rSb@ym<G9GrSu;DsQ{M(P`Y^xwlTK?H9M`MGIgFGz
z5)Z4eT|arGEAKaOg=LPyN=Z7-P*vD09hSx4&Fn~3uvtvF`QK|<HzpvdyW(<|B%M3y
zE>r;}TXN@|0x=}x<RS=z4oIFQIWAu~<C0dk_?1f2JgR)RW-7d1D|+7>kCN6SYcjLf
z*aIIS!*LQ#LiKtp9VFN>j5m))-H5;Llb3zz^Ggq_ybhcm@K6PdgLuP>^S$)_UnrUX
zTr}|qFY!Mpng6ml|KHm?{`%Y>l*}KF=l|TE@O6FfPjcjc`5^<_*R9omKY_-;_*V|-
zPb%j>(KmmOv0?fvpY>mi4HF~t-}XWO(2)3IY*4$tj-5>?FNF1ZOUP~(GgcK2UOQ1%
z*?akki|1z&8zDKh-=5yG#?3RbXty@-Zv(oyoj&?~{J!2fZ)aal@<|`a9O@&DWp&4R
z63DXscB<PUx8=Khk)MA>ITt%&CiX_j<G8KU$d-6eMel(O0_fT-Q(aB&>s3oO=csFJ
z@eY<@ymM@w=JAyzRc3pSr}!CS{dP_p&UkrUxbe{Vdt_pmC(tI@x=G>W-QV))A$Vv=
zwvi_B%(<v|W}^O9pwi;S{`GVZK2xtATBuuEiL>(9Jl<-PcC0fHvqkE9YOk5E%lnfk
zHrivmGt?T%%`Kn%Mp>8X;9z|Hl!fti-235x8avJ}_ppE^kTXEbk4|2qqm)<@iciW>
z4=w$z$=b;_SR-udW$c}sJb1BNoBFoT{RvtM)gSM$Y&u59)n)d0evTD}dxCMDM1LRs
zIG7lG1G7SX;ILP@EgS3w<RH*P^jmF6`<aBeEeKX<-lLaAd7?D?adt`80{e;m#d%9A
zOr=qH-y#2^bCw3nIus#X&uFAz%%eI1aky@f?1KvQ8dL=`lZuB$hc<Vxx;c6MCd<!{
zvlsP~L-USBbO;nqrRiCX{4+>%0F#BL5bAIqu(lIXT=0c!u?ihgn{?}=cvr!-w&T4q
zGv~+Dl@~|8NVm`&-;Z>O9*#S`OKR`gK$lE4H7$7bG2({tB_1eT8WM?4pOXiVZB|Uq
zw*;QBhwXaBPG+#5!zQ^uZA7q&Yw?I@DrDHQ4&V=K%d5%viPPE1ZEi?p&mq;ygQrB7
z=Oa+MarRFzJSQZhc9-GDo&>|^#5p4a5f;G^6~}5_htwm7V<XNM#B^|UabtmNTu{H+
zH$yFp%^0qo`OYOL$1diYc3IUY;nH1;?qQ(Z<cUHeJ~9?IDF9&i;LXjiH*ykXRV=Jm
zCaYX*6Xw!KzSVU^VQ1OwMov?1@lX(ozT2bMBv1d&9cl81#UdiY#lbZe#zQgMPOS%W
ziKFTfjDz%G@2<KGUZ0(c*k4G1ue%vt7Tp}LEku`rq3d|RomShK$-3u%lL81UNXQ3@
zB-I4<V{GGQbK7xn!qAG*36Ux<APBFMJzszR;QBZ)0u40SoNkP(ZS-lBQIcC+wS?=>
zTS{+xmO~J36Rm(GtX^1jFiPODC9=hQqQ7_kwZU=Hm|htiB_P>X{6aaM-jt#cp)x8m
zwG(mC;*g+I+?Kc()a!H`B}!V4{Op>)wHJOR5hEQvfrvgXX{I_c%oz}$h?Fqnl%bhR
zv28!!EvFF!`h(5{sVy=6h<SR`Qb&=XZz?Th0Y5SgIJX#MaFeCa3^JxLjYkvH^e{k_
z=|R_8Xt@2&9GfOGu^OKQgLpy?gH-_CmLx0hmwqG{0#x*BKF*$eP&7prKgI($+e1Ii
z3L#h=8RR!lF?!$#+3An9J-!cGgxq8KKrx<C7^od_V1l$cWJMKz;g;xP2>3$Nvr++r
zY!y!y3y@U3EZs5D2w%Ouvc|eV5FbuZjh>gnv)El(l3-RXmE0kG(X5~f;+$9W3z!f3
zg81b0(C{2kI54L{|DryAQhnTM1t4E?mncG9S+nUeC|Xb=CPu6~eI0A~DhWW;)xCK&
zMbG0nN<dPT%ZzU*EwZ>kj+2K?W#sb+oN`Q;@BV$Lg=O?|v}fCln;U~m%XE=Tgc?>H
z8l37$i2&@Qes#lEMCdDdwX-a)c2~OMda`rz;X@dOZYnkXgEO@Zc67!*T0y?k{|{;J
z7#(@PV2#GMZQJgSZKq>((y?vZwr$(Cla6iM$<2A*Id}TJb7svm_kOLlYW@CIAFB4=
zzYP&?Ji=^*b#dX^5FvwYm#P?pNe@5>f({hSd=6W6wGdXUq{s#g1}8h7AC~Gqn{TWc
z0p1HZ=x|BZ&e5I6hC(nDj?vhyN<YaLY~6m2T(OcD%t$Y;7i*|crSjp(`5YXn21OY2
zHbcQza!9UYBF5BuAYbimcGP0%Yz}j@-sAY<0u?xu=plZ0t0?#x;7IR}^rS(Y7CmVV
zXiF$cE#oVtwxn8O*&w2jtNU1S@y5ONEI8Fku?^P9uiXOguPv#TLY#X`Hoi%smnoGT
zUC2<a|Jax5SB6I!4s|J+-DkL&F>@AV#1e^V*P|pxkYvN{kxOxfzOMbd6k-M_1iHc8
zheagW)=9Puy$8rtL?Rjnm@oi=WpJY-oa*9o-ijnL-4v67f=_d!$$6@q!!G9y!eOa|
z&2fos^3JB-a?liuvxW#$%<05gOh{ZvgJTfC`rs(g(ADhdH-o%GU<#;rU8Lp~MpM!I
zV<Q0E0|x3Gs!TyP#M9@yk7)iH{xkI5f;_X$Hr5F^vpx(ml+5ll#vSG!j7&~|<!5gf
zvnIK16I9vwOfG*07&ETy8!Y8^M7Y>_dh2+Cz%T}FlT0e?MW!P*6)wRkJ@@vK0uKR!
z_pc$^7)ye%As4Zq&8jg;yX2?tgx)lklS+AGL2;w|y*Oz#4c))qUOY@o$%lx3$5aRD
zAvzI1s@<}p^KlLVu{+1*(G^^Uo>0?RngIy)8)o_dSepan9-Ye|mtDF16im@{+n&}|
zmW3wVcgpmM{Eja6n{>xV#p3C40~8a`p6^#h`INl9GZ9J54JbyD{r4$#p!80HI8gf_
zK{RjzTQ20v2$CBSi2O5_=I^zv1kB0>(~6W!LLgI+p+a?oDHpl3*#iIo5s9N=!~9(?
z?8yu#EkSl})-CAKYqC>4nl$UaZN`p?$){jXKj<!ZSo;`yA+aD%)tBN?kuMQD<m|fQ
z>$jD~v7w;oAp-giXNF^KS&ml5kd%g$ap1S&wy{_&LnXu#tHr42gzWuo6-p0ca%+04
z=e`B;pYUV1!UFhR#7j<Oobc;I&1P$=3Ox_T#E$ic;G?-a;k?eEMav*KU(OJ&B*G`7
zlf+@8`;N%(42ieU<qNc<l;?NC+981AEww-+r-#o;Q#gg{O@0?sBc@45MazZ3`0i&g
zV?mAK3tc$vVNBV|IOuygteVV6x);DM^^JXg;f78|?sBMMXL~h6n9L!bTJ=U#k_7CR
zRxOq%kONybG=$)8QUUpJ04Gs=gFaB#L{H_YD9HES@ZRpdl@iC`(c_P~pv%q>4-o<#
zXhw8<bWm23D(YfkWY7tI`BzlSQ{8L;c0ScxXuvxWPZ~*xHgfN-Zm#|S`Z1G}*X3E1
z&u|<vPOzhG<()7;4AeIri#KN5w_Ys+Ru<zi&q;QJny|B)uxZHbNI-cdP{*s@Z{fb)
zM)dW_MG6=@tMwR{V_;*_q~F38kB-_vY8PY`=~tm^lop_gLz?2??2I;Z_?sfoa#Jke
zA_NTz-KL0;ILN`?z0V<c409!CIjBnRJk!K(e-KP@8A~X_s?$(_z!HQ#v66UO8=Gb-
zXGk%RLK*s{0AG`8;jFc!L%$I@u=f}<rqK|Xd7Uaf^lEVrbXnC9olRQlnX@d!HQGv2
zn^jD;aSx;94=quAS`TeYR5#DBBedrF0r#$4qJhT<?CE&i7HLYQkX9<hC^9xTsDjRR
zj9n4k#Z|w3;$c+!vK80g0iH;n(UCX~A+-@E2|q0w7-#9rPH8p!1@=322EF-Oc>w78
z*D*azti6JW9tKoadieO$&oR;2+9kdFSnHCDrcn_&9fkwD>okm`r1>u@a1*Takmg<W
z)5<UQrlBlwYN0U)XS)fUPJ@Q?q2AQ@)wUd5#;{&5ac9nBPqIU&0tgX1sbyqS7pv7P
z!*;|7SJG^0KvZw_%qt3W^P{7J@$F6`6}XGW&|X@}yJ8C>JlXgLD?A=xZgcAT0W&#z
zQ>8Vagb)y`%IisDByz_^g5pfTb2JTg8gz3eUzS-d)iHGlEILED7T02#ZKQTVPJ%n(
z7K**IcxH5=cxGx^h(2l+JVsvl+JHa00;?h5HXZMe-Nh=FY{3)P(i-E45x|eRi;^Qk
z#A?~NpxP@OGAC#qeJFZ)ETwUQC61lM*d6g)baM$CbF+MX1RWjovME7Omql&Syp6Rs
zOsB7yP-&inRnYESM;lVB)BYp-%l?<o&rXuP_eoy{6~40@^O;o2hnDZawk|e5)-ujN
zJYEEJ?q(hM`&fz|z%|XKI%YgZe5~H$Ry2?W{|Ew>Dt!5G($6V`u~h<kO>WEech(Cp
zeEu=XxqZ-u8xl9wjfOV7T~Q#pi_Dx?D7EXdm33ioJCAKv_#A0%vj!#v@3kA|0%3%_
z7kNaWBGKKTOMLukJ3r~*PwH4Q&*=TMQn#n~?IayP>30NkPiKg?W*WAsX*htygtQC7
z5h#-;czpa1PWl=@R>v`-&8@Dy@*G?kKQDj1yVg+sxcn3y5jbrB9D`@CV$vT7|CKa;
zv8VqeWB<OT<S%OUf04%jhzb1fN#mav<Nx4(f1Uq#?(zT3ss4}T?~e=n|8;Xa^PjPW
z|6qcQoL|+4fAj7>;;X0R>+ar@w@ZKzp9a`W{c-G=jgNf<2peU&-Yt}1`2v`BX6PhF
zNHu@g+3)Rf(psd}eYuccv~Sy*BmMbc=l0-O@0~5OLm>Xg!8eNz1_+^aLv58rqG9OE
zl8+-R@O4_%9|%vpUay(Kg{!AMNbe4-#rsruZm&Hj{NKy=i;6DWyEOuQDCV$HFS}gq
z%7C431q--Y(5wB<BFfG4UgepfgfU}Zcxy<xm;3G`FBYoY>n&Uley}JaYaMs0E)-{^
zkb7WMYMahqg<)N&k)Nji{1nI<7|V9E@PAQubZakB&YfM~LA40_nYkfv@Y~o<`bhqU
ztkg%#=AipThaYR}{0jE+S2;WSXs3(cjS-jYB?Q;*j$8+#Yn3a{a{#g5;=>Na3Bqv4
z_r0C?sa^leEp+Vlh@NHE_d0&AyEM<e>%zA4PDFfk3xl(rxd29d^w>ANm#_&GKtu*q
zub14~){@GEcd$`BMkGRf@Pw$5%TM5-)>ACEdk_(ZlaxGXqi&d82pl;^C(=EWt%T?z
z#)D%jD$UHw9ie{VqsgvSI#u}-9+Tiw`-)&JxNo-)z(S6{WXEXkbt1q~ICvx^WB~SB
zK=^==bukM&cOv8;Z7{()@w5p{cYpQ)U<#)W+;wUi%AcIQnqcn40EdlbLM&rR&-YX}
z*v>;hz~mnG5m?mu&+j7-`|s&jjh8MRp*$4|pV4`||5!?^N@qnVW@){8I-QbGrEBPH
zO-hY_Q4+EzUCm&7sPRW~5q-LUkytvJbod%t=8ZgXD_57#eDzRdY*3!{#fAf}x=-wc
zGZ53{7<YkAp46}o|Ec-5Mkses90v`F&~Sl1R(*xb>xWconz(Ss>ZZ)PicD?0zZ`vr
zW`=6JFNI`P=`4LON~3Cn`6N5Hp5e}qL&@BNQcF2Of$w;NH3$>lSkw43aoGoznB%7o
za`eyDlb`V~q9Ugz{uw{ZmDi}#p_nW|91WaabdB|q`{`+3KCjrV^nbq+)V*5Vx)(WX
zE}W-S7Z;dm*%C*%LRZ{neLi6)1`ez^y7gzoX{1vN$Gc5N9fLM2@ac;<4G;ka^!z@`
zZNKuuxML2~@8(4+jdEi#cdw!iI7F)Z#golS_$w}zhRC_+k%KQ>^T!B44Om0OMXC}?
zinrGE-b)GRJ?cY1T3W0o$_zyyDf9_6EQ&MY07x)Cz%Dt2maL=!AdGFX$pTv%Kb&SN
z!35K12I*=(V~|XWd<rlbo>K&5Ne52y;SFr0l&U}vR&eaA$EuQT=~)?q0w)C#7z-+&
zL<dHg&D2E%U~N;RxFTYJ*BJ)HuT&D6#gRg*hK=3Z&~C3V*0flr!1*B1NYV;{&;5c=
zW)`0j45H>xlJ98HazbH&PNW{;Ku%(1gT#KZtdPCj0$C<t%HeQKdByeSsi%tFwiAKr
zsQE<T<P&vX7c<HzMRFPGl^9USk$A%^BR^4|<}-td#r5sMI@#!c<Nf~5X5m%<{GwL?
zV`kYshuesSjyt4A<_sK<w9Ixtj?*2!oeNkdXobBfRRBiX+=!S42<m=BaxUCrV1HN1
zhDw@B9qhVNqG4~CBQ~1e_rW?*!@*1-W67@|0uXBs65<O|BF@gLOThL^HRK$<O-MRy
zCb7=VI_D+T8#;`>8O%{+shc>qIQjAYxnUAEe}oA}VK{}P^J+B4D2~anD-lr|uePb*
zF%Zb<z+wx_G?sKB2pe7BF;L!qw@>5oXUuRYBdXKvRRQ<nMvoXDO|l2dG72LRxFDX7
zv?`~rU``W8mGz#GX2&zTCcIjN-|!|uKK#SU#O)%?8}*hSKq0<v6z&hBTnIYclAePv
z4IvM;;o^Ci{b5)=jQgF$+CV~WZ%tg`f&^dFBPa8E$?oR6ev5Q<jJo+;w#r$O4PW7g
z1*PGoQukI6a_6LnaX>ixB)VQDc#D@3v^$<YH0W@Kv_>6|#~>ik@#w-E^-tgS7veM8
zM3WQjSZ^F<Kc+=eAA{zIaBOf}*Y!Gm7%op-waUAxgx2eWM!b0_GK121j#vWJ9w{T^
z89&<Lsl)BH${ab3SpTyZ=0fyDYl9pqyM3qQq^NA{`!*#I)1uYX2kOk39m)(Hd2^FN
zppU>dKJqMVsRLwO7H-aB@qWC7lgswV*v~a7!K^@IW?OI%=M8x~Vco#rO*Yq&Aj8my
z`2dS}M1qvgIV!~JG*wcpEN*B%?s7-DFufeYnC#im^+78ggn9UR&NfcDtnc8!C0L%$
zaIoYw#0-+Z`7wW9o{tsbe`l30o-WpeAJ|mkacDgTllkucdgt9hekDLJB^g7mjTgf7
z)sgJxZ35@Yi{eCn#ROTYX6u0F;rDsqL-PGI?NA_v+G6nnf9Z7hx_O*H_~#=LRm;a&
zZ1qOD8EAr~hlL^tvL*()U7oYC(Ikbv#T3tN6-o|22Mu5F<|CZy?e;4X<hrbSa`o{e
zVbqnYo3S>2ksCNqHrpKD_gKls9Wes7{(HOI;Xb&8{incB9``pCXV&&7ZU|vbv<*ZZ
z`rg{R)*uiti`>;9cz7>fH(>knNr}O3sYvg05+pqgPJI8EK3ODDA_zrGfopSHW8hEd
zOfy#2e*=g9?ArKm;Lu-2z<-HIGXJA|?F%9OO*lmV*KV&Jfd&C1^Iz)wzmDzv_1Zrz
zi~r{t{(1uxy};K1?@wC-jelS^hOZGFg|9iBFAT-X`8BFT@THn_e7Pc-IN0bIzvg4v
zzg}ej8%sN`R)+fYhCeN=?EZCC=l>Szzs!sO@IU^K;myCki+?`-A3*JoDV;wQ`~PKI
z$=B!lPqg|U*o=vp^KbhqD=^0*SBI}Fs}Rom6uOJ&*4?>>d*B>>&Ir@G?QXGvL(L;F
z6!QfXGBdhll~q~skyyEhpBUyQJm+5*3S=M4y54ri9H?f8bv7@TYP5$WppJ)5Uw6Ws
zycp6d@Xf4}9Dd5o5uGxpI(WEEzS@l$kHU#=o^H=Bz3Ko~^8%@GA@a8-qMdER|M*<C
z*IJ%;w-peG4<%4=uU&p0+IwF&!WP?f<BEUwvCddA$`iPB`;i=}QtJv>A|)Ja8km3z
ziudE)y-VQAwqnsU6@tKJi<7tVqwVEJxiTz4^RqP;2(CO!mo7kJ?%oD9ltAXWOx;8K
z-3_3@p}G-SH>=#890d@Y2=YB8I*K6VZ03zl5Yp>Jok)*H0Fx12bPhk&*ZG1!a@<3n
zgLBIxn@l_azwop|fe5ybk|?@R1_?pU2U(o`z)<A7E)1rDMJC;TVK2ZiJils4bcko%
z8cz8+$nS(TrQ}e#2ng~s`zBCy6GV}bfUW4CNH~+~z+jqAGDr**RaD#?77r|oMtOpl
zA>VN}B;|t4^9mku&u0;`X{M&I_i?9`7t?$fTWcpH+D7B3fc;f0s6g7p-cnX>%1gI2
zvD`fDDyXigF-7Q$B}j>+avkD@1df|hn*ACw0NPX%z&u2VdzVU}?y!d<VF5qzs4npx
ztX8m^y*7k$oSY`OA*i9WQ5BI8sW&kNR2ILzIgreU2g<H?vIg!;aRn^5FlT7krR~QU
zFYhllMZ{*iF#r@gpN_@j!Zx;bngFIN*FT_&aimkGZu$EK%mfV}joPqVsRP*g0vs&}
z|KOcHajKs#=fc)Q6!XRj6ALoHDQaUCJ})c?_$F1khb0J+_>$ne;G@Vt5}(;!@?xCo
zw12IX(lsl(92LgrF<^YN=<Rj&GkBNf!9(L>I&bn_jOWBcmov|Y<xx+U7;uM(1kMne
zAHgs}<us1orwlMxY?M;xSK{Z2d4ll@ocredH>{F?=0ypAEIx5LMN>T#BUuDIJn!U>
zZNMt0!j;V#+l(9^3M)9l0%ihd?P>A$+oloaqR#6pw-tvEJ{=MQgh}u>m5n}(B<t5{
zB~V$5x9$*D!t}kxZkq$T5YYO>LFj6!qawFc8idJytFC2ucJ1b;{!#))C|<z0M_`PF
ziu7~|Ku9JS0s;^#XjKpsZa@dr@>wx4RsuIIwnzfMHi-l8jblmd)bn7N8IU0VZl8)g
z_T47#A>rA08`!U%lDnYl3*C<4_Q)Iz@mbP`HqcH|97hMtXPiM3F%$fdhIYa#y4&(0
zT|IP3a(pUfU6lu3i{9zodqrTp=Cbkql1e>U0Q9A3EF+&<z~kj7R3E{ltxA@nCt$f1
z<4DAq1olW3`sS3GN0P6UAn+Cf^zRCT71H1bHrE0<&(u^z90+BQ7v2o1bv<!ZvWt&&
zDjBlo6`ZJ_Q$inf1(w_!E>z$c@wCYphjGNG89o%j<OFjtY6K%Z35{aj3!-y#hB=jz
z4Oc@{?In-d<sXNQxUt^pq&^%D198%(UV8^Zslrz@aNSx=0>Pqep}X}twE92fK3+k6
ze)j`&(+K9@+S{_#MenlC<CuyudI>P2Y%3fo)eIBKNZ!k0gRTJD1Crp+B~G;D!OJkT
z`?Z{;9Eip(xCf$QG^4=89T=*!*}m1wcjNIj0`H$c;gASKi~E3~JoznyF=sxX)VT9-
zuq4BqsNWnR7_ywzOnW=231<yT0H;et->%W8%50#BB`zv0Xo_=nL+sXme~L!vRZG~-
zKN)XmPJj|l=fDQUro#;cUA=7x7C`6SqEw{?@Mkuq;3+NS%uzh*U-c2{jFAPt8l-|D
zcf%5=XXv43F*wCx_n;5jGMtF{o+TGZz_#yk>RjND-kU;gQ$dLMax2k3F=|=<y;&}$
zC9t_tfS{R-Ax(u)5EM}USTEL^Ot^n0k=XLk5%>CHy)6o3g{J<8#9{nq^Sl)x+;LZ>
zbVK~DaZ-~;r}@X0ZO-x(6=XxWxiQ#-RE;-Cuwz-bLTPM>^zX2yXVA8G<)np)xpI-7
zY1*qzmL&Li5@^y%bbxCIFh43z{p(bChz58MXdj|6H#%a(sNv;tb6Aet4edE8xIxfk
zn#P`ejquK_EaQo84de|_3^OtQf`ar;8H+e<)h9>YJ=C;uN<X#k(bBNhf<r|WCOmfr
zsfr4!*NqYj)dwd-v<?SeNwL@QL;M^^9VX7QqDrO%|5>CL31#~;|AbdsMgiv+WRv&0
zAMs3dKS;CdDROs_Tw5uBqjMWCVgy)@4F?v0k^_P`XtSblmNOtO{W`;_Puye5T?Olm
z_T;KtdxT>#v&E9PLRt3-w{n0iRvOgA7JU&#Sx1&*54CjKS*XCN*d?61vrtU8)35-k
zwDb#MHA?q}q6y}UAc&P4&e6K+C?R;=80PQ1G|gY8u18jy^Gv6OgE&u~vBuJlZMS$$
zj$8>h=1$|r8%zDp_^C-n7SG7T?ip-^F&|b6nLzKJF&TgR9!ln{Qgj23I8a?#lKTA{
zi`<1Mb7^hjCOALF%7jaafmzHV+j=fY8u3<Qxs95QH0Eys4o87+-%U3g?1$ORpVE~T
z;%h5<fF63Bei-))OhDa5F1Bu2c7_VWM3$5CknLl?PQsXR13DI{>bwfPUVC3H$e_cJ
zYFNOqp+q}q9B@5f&H8|9Y@020n#rlnQ>R_nVJD`}>slbrM=35W*N!K)iR)@)vB8`w
z$(r7i+>L(A*upd2lIt-C(~~0{Uso3ql-zKttW8`prX}D1(cUR4%k~>z-5i-GUgFwr
zXe|ECEogq!RHC>Jd+^6?%SCiUoW2PWLO7IKqyoMuwXxr?*|%J8R#zBD{bN-Gh>;oS
z#bO0g5{z_-da+sd#$e1vN1(G-VHivTfA$XUTM09e6qsu_DtFqJ4L(jeK`*s}ff}zW
z<9D~;y?F1xvzKgtP48+;cdVt9-(Ou7MtT6XgPfEa$hNizL&r>qxR$8GZ<O4r4@4jO
zv452J{^M^Ry2I5XYwTkDbEd2-PZi;l&A6>S?y+%wWG(!)c`w_6p6)s79?J_RR9nv%
z@T0P>pS}BcBp=NXLP!y>HQ3d#Qhyn}K=A-cylfWpnIf<U<KE-7%?BXU_x80KofW?a
zA7`7Dr`Z*uRI*XUPJ<mU*<U~Z&FBg9pE}=vgQNdy1Nm1R{g;E}f5y>Ywv2z^?f>l7
zV`TnQ9{fvJ{Ko_Q9k}|Zi{%fv%0Td!#7Xegt@j6V{r{sYviwOc{)t^#zJUAR)D<g!
zXhf~Dp?&Ro=+(o=_|#CzKcZ=6iz}LkX&J6yxJmkYCmz>uwKgLqEFV{0w#YXQPLhP3
zj|KCPOhs(AOy9bD+%Le(*AKrFNu!^x6Dj3n^)WyL^9I)rxV{dqrmov2fcY$?zCK1^
zf_az)Ht{3D6mImezqvkJzRv;4km=Q^^^6cv9_^ubVQ@d3+oys@27H2MK%-G}sQc5L
zNpAR+zdzz{EV))+S}b6YsRDZnPF)08Yn}*=n3NoYjpc2`7d9|&%iM(2>a<Q2*Dgaf
zT_7&mFnl|;Mj2KI)rMSw`v6@`u+t@1KfBMaIL|<?N_jE=^1CbMWqmtdk9s=NP@&H@
zpmvJXOrL8xChFXuN^?S2(w=He598|KG-^9_-Dp=?veUU;Xh5cBLH;exQCw~UA&1Y%
zp2cZX@$`zGf(Kum0;*eB1rI)F8PfnE2#H?qF`7UU<BlncfNZDp5Cs7DN|5a<3<!v-
z$5&B19d%<^6RQ72^*~D~GLQrQvY=dI(E!5JAZ2H!Y$k05rqR$O2GJft$=WTe^0j5p
zaCPIQ-KmeHkJcpuul(K})~Eewt8i;vR*}fM8bz9{*V*IZZkWGXQSEvEZb!&Ths}Hp
zN@boBj9@(picboe0$rmB$fA_!9d`E4<ga^v`Q)7F>Z-Vq;GtF+0^Tg&vcu4G9@J@S
zKPA!~j}Ry-!x4(fhhx5kyb|r+JFioj%X90w7ogD8j+=~m^`Z+ig{=mGEGsmT5vAUm
z=4<AwD1`&A$=j6tEt%oydW;iD)Uea@DK!RZP&D7aMyH=Kn>uy+o(-bTPVAZ>8~0PQ
z7gWg+e!MTd67Bmbw?Y<qTc_DMR4zb98Bxpp?m;b^WP&sPv@=7CXka~SAdEy4Y+9hg
zlB?pFlzXH-IIiBubVP3hwpmQw52|i4<*5NAf&lxjAeic7d-_|urb0NevwnVdrrN@_
zrUk3K>nYsUof)d}caD+ob2*zKAVqlLW8e1RitxtUCaMbauc)8622|8c?{JZXT3$Pm
z0D6RO4@rQ29hSi0ZO{RBPJR#81H7I)@SxNF=ztN|+k(?vr`@h*U%|pN`&i@o{f(4V
zGjN=uxyY_!L3ykZ7ry=JW0^^OjZ9;Ch0$C%^xVN^sIN2t!ki+TL125$HO0@g-F0VJ
z@#AgXmkgjM0uRE;WSh+!c6A}Z`Al7gFhz(ACOaH@&qdp@JH7+3E@ImXe7_GOVwTGD
zv6kXdP0?__=CBsoIC=c_)YXI7TxhuK<D+STo}J!VfUz~tUJk3s|5vmO7C~S+Vx-oo
z5+{V-qf)Z&%7@}Ek#G3g`a^3^$j!+~@Wb${OSwd5Q~O!D4=bGp!*n|dC@6#>k~?8Q
zK&0Y2f9v*kBi6+eL<>Ge^1)A?pWxbk{{93|cB&fUD0><3Peo;o0(At2^hFv5Lfl3b
z!%H%<d}l4j*Z|*v@E?Tatl<jF?X})z+=(a{bvkwuYFDwyV)!gve6Mhv!Ec8d^d6+P
z%b6HbfWeCMV-c>k1*^8j*;lMIvl!HL$6q-C`?!`kXA1@{nhuX5L4*)dXP|R{&|zFU
z^#Clm>z`f(*iPB9Llu*ua4=JJKi$21#orrA*oG(luDr+co`SY>PR}dL$|fu1qqEb$
zm2xGJM9&$heEHO2pnsywnSq`?h7tH3P(3O-4`u^0+9v_BS&Si6b{0LEf@|sL=G10{
zIq6-&k8^y9g5p$g&<IXv#eD%3a_QOzyeJ3st%W|#@fTbEyH40F#DhU02F_UWfF(Es
zYYv^iPan{%*uhw_q)Z{O)6Iu6op&(=$R?qrGA3b`=NhIGy`9HSf1jLWCLw6q8(lt$
zw;JC3UMW2YFafUv$CV+8HzaNUr6MdZX1M)qn#|G+k^{T9!Z8Zy>oB3xEi^4sH8Fx)
z0;JOn(>J@uytU>Oa?{6?I1%BqQvY-D3Bis??J79?7}3v|C~O{HiAr;k{QI}KBv4O?
zD20uY8J7r0a&Ni0w3PAjvh8m{4Im+V_)6w}BfY_?)4{B=Pe2l4TSd#`aZLx1UP3Wh
zgUiHTLYgRBX|f}5L;J%Y*|5wMgn+I(N9m;FDkeGY_)fJ6n1*-60D-e=8rz+hl&3r+
z(PT%F!J$++fbhF<fEkzwBCF6F2&YC4=M)2<C~5))Zv-kk8G37a0xsAfr7SlBIxVzi
z47ZA?bAwCDiVLk(>t-YpC5FOJrK!shAfwfY-vXY@7<AszvmmFRc^ISxzgGu)L)Ze9
zLNgK$X<hB^;i*4K{H__J7Y9jl`UTE{ml@468qW^u;>(J3+|A_38jY_h)1A_HNGN6M
zwbo{HcF6w(;aaMDw4J1ZfI)CkU9AH%&>iJ>W+%Wn@lK-@CCCVLCpSZpjix#Q=<Z1d
zti4HP!z2TE2MOKBa341uQr=JFnLJLL4g@@2PAf=FCBO?EDq5u*iC&sm#6ETz7zbdF
ziK;hZZ1n;K3mcZ~noup4?<XL!C#uHp=Re=iIsuJCETxZ2cp7xK$A|PumD>_VzL;d4
z1GG<G@-mKti|z?#g}5xGCOX+s2}X{V2WEmpTfmu(9-W54A2|@gOuh{n_}q=$ZUxEm
zkpVp_m_Nxc)qzREQaR1h4e#iMBQBJ-364ParL!Ok-}ad9o!adr1=evqv~_2+IuVQ@
z_u=Ba3hHpeQ2Hd;s)ZhWZ^<5Ez^iV_{rb6aS{8C=kua8k+O0z1UdkWo>(1f#?4=|t
z#_`ID?u4U$J4=Fb8HD@2&`?0*NTiFwZahu42*Ay)De!O#2h0h%T#}wkG#SLBpS~Mx
z6X`aCm#=FSiNd?C=9SO23$_A_E`y1VK8VLNJ@$iQX##LnHC<0>JL#Q3-}AiqaX^o}
z$h9?R)wDWtN?vhtb%w_(YZGapH-JO2GxY@Z+nS|zhv#wimh5qiC&e4!IP+?f$5dC-
zgn^uJ06tiyJWxCt0^N|47s`+}^N#GUe7a`w4@MnOdb2{Q9^ZP=pCLCQZnW~rf%9ou
zSmsm@t)q%mQdCos6@*Ci2N^h(+%_I7MPu^nJQx1a%-c*(BUliM*g{GyAZ?bl`>D|4
zkGy83zBpLIO9PhU77HIKrMCP&8w^G4uj2fi8+~_mV*@L`WRqiK3uE`FiI=*FbW9Cm
z`n=wgb>@|xZLYU;YUWkG1G1_<_^8i2c!Sdr=mWVep>vvp_Y0?GS>$NcTQyuvE|H%~
zzQ*%45M{zJ6K=y@o<WQq^MD+mn~JazPr#vb)JOkSRA%{eo8#Yu;lIG)zuvKbVa>lK
zD*qQS%=&+0;lIxR8(8?S6aTh!`)7pye<|a#{0X}M35D6&{&w$P!k?wn)iC9#3iEZK
zK<1gvZD>IR*f=Yi)3dSdDrVx5c+2KtAD0>1F!$B_BwD&XL4keSJYM-aYnwc7yTa6z
z>m(2=kFUym6-1PpIk^iS4>Q+VR&@yi5QiRJr>PMrk(KHhj>1|EX|j*Jyl#2@R5o#P
z+om-uuAOCX{WNTnWP2L)u`;F9Cug>qziij{F=6q-F60<|3XOVu*Ugpl)0eouO*c6<
z_9eiEQc<&MZ@E*}YU4-AN~t9MoPotY9d&ec0Iq9t=HQ7^hD|qay7r%Q`kH|&;@+DV
zhi`Qq*1&?f5!6wfi`_{tNq%bBzE`&3Wa~hLbCL1))6;PnxOQJ{J+YTZT>uV$?@u`e
zga~~IPvXX*v=y9U7j0!f%eR-2mOnWN0r#*g)wmVrxX&b8g*NLo?6FW)Oxyo8nV1q=
z-JLf+w!(C&@!q{jdn(zv&hM0Hm*x8a$1*$bzC7GLLOYZP_C6T7uYSKzXJzyNJ)t+@
zD84n{=5=7Q<jel-x!3M59R?e^v2{3o2B>5g(CfbV6|~%DaOXi8g!P$6-a*s%KZxO;
z?g1O0#K0;Z<)ZdP?-i8np3Jdx;z5EuHi-`hCBUVr%Z49Z(AB1QHiM4Ev&rCcf<xs7
zkLZ*#HH=MNA){~5gZ5bUeclo><i}8V>Q@fh$3?n<`E6p|rE#YiKC1L+;o10R)z#_R
zbJwb#$IAT@jcQSvW==qlF4Jmtg5<}p>nT4Rnm>XvD;!xu9NE1Yp=s1eG^@D2Z%vSf
z$v5(%lYE$rTn8h9A2`xE;O(g!JLNIMII|VnzG;PxPgKJ_y;O%F5Ydmf-!e@B^<K71
zINTKNtBX&%H=TK7q{B}7;)m2-ns=Oga&rJp_RXW;BK|Hz<x86E-j=sy+CdO$7OO=z
z@IoTWfmdf<-hx7e4Uf_XM^TAkiGsz#o9s8RXGCkAN2SOJv`}R)9vhoeP#DB1<SaSo
zc+GHKWJa!r9!JV&bMX@$?10sqWY(3wXdurxxIGhl-L%pl_TodKdN4RKGo>qqA4gSQ
z-RH{l5$7O&?_>vr?-*gS_eE%zYdT8KHn$LeNq03D<vvp*;Gsa(Wku877gr4cOb6a4
zPw_(c@%D20Q3<^L(mB-cU^fC6&8Od$*6W;+blLV%ZIj{@<f;T@>UioFL+!IV-yLQg
z-H8pCenAe^`I|XhYwEG$Z4i6iM+ZiUP{9It6IK@xBn`WN6yZ~^4i<2}-7rsIN6{bx
zs(J|Xku?%<6ar#digBdaP}3IPs1AzB8Ul`b_c+t3cxZr@mFX#l7F=;{H*_S@?taTB
z0l%^lis1(hY#KKhF4+e@ImAAc>-4v$H2?J9sFX!wnlr~nLgeIa#piP@RvK5gn0MK+
zkh`sVg(9Ghi86i<^K$xBC!JbIkrUj0<-Is^gN(!~1Ht!#2@CatDBSdz1s?FViv=+t
z5+idVd|JbJJWjI&faD2hu9rhFj)*8wdqky;ycyxgF&?}S=m<LylZfEseslsdvpji^
z89tPJTK1=~EHt}x<gvP;kBv}^pAN)`1NTBhDJX1X(oDW3`cuqG*fwAEw>%t$Fe5~t
z(p}ZN)<_t#gGdI)9)U{{hd#&=p~VJg;(jJKqw+9;!+-03>K5wEDE4$Ag5U*tpbS7=
zvpRqTnL2HS!Q_-`f)Snk+WSmjNKk7NLk^3ajZ_f+dMA?{o(CLlB5fC$ngDY&6>&B*
zA^u2QOjDjik%$XK9md?j34~Y`!Nwq>(dSXfF<q%&$&Y4e<Xo393oJTy;j_#EQ}|GQ
z2%&P4$kEX}4(r4SKr~rQt%(j_OB{Wiv!a&wToeaHx{$jgj~XN#Y#;@ZY6L@^VZ3`}
zUr^8j`Wtq3^TpJ?pZILxd{P_%Xb}#Dw2_7`dRr&0cvBM1Q!M|c9UrYq&MZ4-a+$jB
z`_t||Tqm~hb+um!^lPUCI!DQHfC2Myk(FJmz7{)H?JR}@9_C}Jh#&xTA7+)m63e`W
znWH`jB=M9;_#w_IxGfZ_JXq$S;KVg(cd<d3qASQ@?a>dP*kD#L60Pz6=dXOq+08}F
z%`QT<SWuXR26ExS-5W-2Th|s++%D0q0$XW)!}B<Gx13(>7AkWwa=0g`{`ODFCXYC~
z#5`;~e`PrIkMOC!Urw&cZ%Rer{-@gEfWoGz*)msWCF(idjo$6(?ii-Uhs)7fEgQ~;
z+fY{3){-UCihk71wki4M7nM&b7j?hW=)2v{VoEW?2LsX>5%JS8naIwd&m%*IpIjcu
z?*e|QBW)84m@4d68>?Vg-c9Q*)_rI2z{~(tx%1@|n>cO}??;{za1wMI38ts4cGF6s
z`Ig7*+c&<!hfk2ZVtY&l=`4WbsrbqS2>c_KF?Zavkb&^EHy)^m%D1t2)*x%2CeMak
zZviI5K(7~iU{eQq!!+fn8%(nwg>O}o2mXf+76kwoQ8`f(A_|9MZFF{(rgA(sDVuQ$
z*YWi(YV{O?qzp|M(DW^apIGO#AgPDxF*hbjw0oPWNVkirX>+!t@CID1EcM2KUw(WZ
z!B2~N$c|i|oH&+f5bm&a6z?p<2O<Z$rM(|l_|Qq*6aq5{o|4Cg>Vb<*?z;`eZWPPt
zx}Qa=Qqb8z4KC6El-CMBC1q9Pp`(`stD-lPawuAgo}cG(D}79l<!@)&`RF^%Gqqj}
zkNlW+mMbowZx5Az$V$s!<A&>{uI?(E-U;-uHhY+^D|mt499LM{(!V}sIV3ijD5lGe
z-s77ryGEQHAz}u@E@Cg0hnW+!zdjzHpEhOj@*rPcTwGW$;C^(xkFCEr>b0>_P14Ee
zQ+6=;YKS1wBRyJdymid(hTgw)HKiYI8TIzt+qBxk5oO8I<8SZR^#$p8l@y6O-=3^T
z;q^oTAV6EqFT<xB_)I#i2WV7FXi^?D>AqI`RVf?+yxA9+80CU{s+66*V$I}mu=p^^
zNBsiF+e<sBp|!@liPY(aYgb%hhvoHlv7;|~r+|Sa9?$RF8A}J47MIXyENdN0;;&5N
zyQz7u8@s-LG;4Z#VET@)1so;r{xR8C<<`d?<?G5YVOMTIpL19tB<*1Coj&D&LXmjQ
zV~yQtaeD!ryoGJOk(iL^3DNB0;pN8(smtQs)_?!40jS9iIGUN(>ieeq8;Qztgxb41
zu_ma)0pd$<f_N>J)z$+FDG%31`<F$&dPQA(6|xTfDQ!jS-kTDM(ef8e!B~JGP(DoV
zp4$rg2~I2d8x_Q8+OJk_UC_esE;J9;#<#D2)}GubLjz^VnjcE;C8yL0oc6?4&i;Ms
zgHxkVZ)r`7lmbh)d7#zV{TMJDy5c#aH38pjVM!6Iwk=eP3OT4fX;tsqu<yYtcaf8j
zcCo3@NE`-+cyyA7T3u*pv#?BSU}EhAv&%(k<duq~<cC^l%%<{Br4~L%cjlt#ZEswp
zAYn`(?Sw<9b6c<uWA-Tb>sc8YN3W4QsjrZF{qerd@6lLszmxLehX63!kFBSwx0h?_
zM5DT;uOFI=tr<RPp@0=kI$OO+kC*}-pLCkESXC$HX36Mry!r2wPCgTFvE&n5fijZy
z9=-OQ1zp=x07sn<?}2_YeE*I8{T0tp{c^|?O{Yg0^Fo^qexy6}jo0Et<?-JzhCic@
z{|#gK53T%<X&RRQaHjlEHkALu7{2CS{?k_R5A~eo|JYLgN3+LYC;nH)@c+_*e_5va
zZ-n%}m(OJRlhpi^GH@{b{e0#>D8n%9KXz)mP^f{8m2am{Lj`R{3c7Ju3WeEPvXi!`
z#&#R%68B8^%-=VYaBS~t*(opjDi3?Mt&PM!#7+kK*6}}c=c%A?Nq&4gQzZ%&D2P*z
zJ=BXgDhpbt=ifw)pdIZrO}m=8!_VdL<waDl+X&9~GLsH6cocp=#v@<E$M;khE{h26
z<@sO=D|Da<*Z91VV~Of!4#~~-FqJZfUohE%uoWr8*;Ar86fu&SjF`=dRA?IM<MInQ
zjgX4nT07IEhK0gP7Q2EC&zF}&8zP><g}KZg#PwII7K8)m-{5VBh#?&n&sPu6=YnOX
zOs-wgx*hEmySY2xHPp-H@mskIavT!Kgfc5Th>29V&ug!|-i5zgy$I1wPEJn*e`32r
z*mlj^;d!#@+G5l4mu*JG#5E@dX*WE}rw%*tq^$)ImUpRUy$f?sYk+_mtsq@{Ss=~l
z8vi*S^tNQyJmb~wd3(}Tz{%O*42`4hptY*C87ZceT<BTyBaGu}?AK$_=(mDVFqhWc
z+Ci;cG=_EjU&)hS2{|<l9xU}ceu;(p2{3qh^Qb@>cxF8=?$<qE-H<@Hco(Gh2I~uE
ziaF31#I^C9+TRi#YSUaOMR6v-Jk3UTU?ivAtG#Gk=Ma|-fsN#w+D-&f#On+~fvZfL
zx>9P5OZB@FZK{I+Wt(vQ8CA<owq`Rgmuyh%jjET9#p&}M__mb5J;fb(EU?I8e|2rv
zy)iX+*Q_u%WVmQi@~8V#Nk5Tx2(LFnf{rGcl65#Y-Y@5#2oq7PwmPB#=DADN@DxHk
zSg;evY_Jm8ND}k$wr3Hb_Rh>5tEFghh$Z@S4g<%IfcT^67xW5|$h2ec05`~OWr3vz
z`5h-a1Wd_JG;Y@n87w4$BjuQiq-4;QDtU7+*0JBv!KDi=CH`_-%r)_I<c${ZTgBy4
z%$DdceifF@dtymUHD*ztDx#yC|Fx32sK(5|#G=4OK2avl+Ww;lz@)%<T~#!-$_QJB
zGC+cln(6XWyHVNzg6gb*GD|f&LWjKr^>Hn`A|E%A3l!%_*EvubWb*uhpp!|<h0JZ<
z${Y2{Gm;np##;#_`wk8eOm`%OoTkm~-a1G@l)6S@pDIxt1DGt~e%<TB^BJSU(P`#{
z{OJ|;Bl99!hI|>Sab%&Cx$J#Ct<4J#=GUC&34KLTgOt;NKCpnfJzX~osjbWQw3du6
znJouCG-e?p;qBBZ30x?ghbb`G=lMOW8Q)5O-%?uxf_^hF-!@Bi(1W`W?if;S1`NNd
zNRRQ`k_vZyPDCOQfI0~HVJH2@J;%vVVmd5KhDcA9aXql@w3T(rw-!6=<_u2UuvwSZ
z@NXF!GjaH~t9?Y(p}9O5+{W(aX+yA>sQBUaLFV<`>=Q>%WE&isolY=E2>e@&Ef<^5
zzp&~tI4y!N9!=oCc_jMLJU`vVsrtJ_$n^8R<^ZLyQaFdT{#uKZoAOz9gcIU`YnF|m
zrKeskq%Q@_;)gtEXw;AO{m~=+BF*)97IjEmoSK(4WFX-)cgjyG2e4yPPun0v>(8g5
zpTEv6)2M^9jWu$BEjj~aU}K1Y%VWhaC?g5>Q;kN@CBSxaJOj_jW!ZQwEZh%_M+3$w
zIqLYN)FkSB7nByBmKd=oDgu<-B@*50>5VYWu7mZ$eRRP0sg0*-hulBVO#lbL`sf&<
z|C_Z1ZnmBMhh4HoirdnQAaGjl+nx|`o2Vl1WBYB-?W3Ac#&sept~_lO^dPtPF#rU%
zLroiMmV2cdX(8Zf|4<TeVD|H=d$t}<7?8&Nnr+TeMDxKV90;q}elEwa-<{`BK)et(
z=g;=0+=VGP1Fgl1QBJ<MC|$oiLCo(4Z*T%Ax@~&>(-AB?D+m~x<&52~r6NW1n?)4F
z!gAM!RB3WNhJ3%l7XaBdh>_I6_-BwjaT4y~7(DL&)D9uol^b#uOS3!!^)7u^Q8swZ
zrPNvhz{!G32=l?=xI3(@*+9*DZeXxqq2E9vBAmk=3QpJz;dY0dPt&m(o<SWRl&zx7
z7ta21R5Z@Yt1W}(54?!uF%6q13%(0EQ!A%Ljs&wN!-yD_&>re$2@(hKTi?dG{y62f
z?~qO0AlPs|d9X1$INXHB9O%*~rGCmF<t7Q+gpNcHQ`QSi64|;%(L_{)3JN)c>3GO2
zC0g+%!Jh+P!AL>$E}qLa=RCm>n|8+<5pOfrx*i#^P;fAEMivY~KCO_PcAErPU1Kq~
z*Gy$7k+XlUxFP;fdU$H_7=*4K`lkih&>Y>pbFS64wTYk!m;xlIID^Xf9jRC56`h$S
z`I$ZLmt~6~(G3Vu5S*^I&zAG=oX0W2qMuPWP$IvTwB@~b73F4Pr3C~Zmbv$b(6oU*
zCgnjyLkHO#bz@mdJHU|RMitE3q3emJbni~H;ZkOM%x<1<InloEyNVnu=+!j^L%yZd
zt{B|KAu$w~`gE-9;*aymh1g|5Rt%xGg(_Z6QqK57!fuT*bI)S1|7L2DY^Y|;C37TW
zNFPNMQiQLYvZ!yfnfKZR2$zGj3Te9A4g4t(p5pxJ83k#K5>>#C8s)-Sk6T;fzmXOQ
z^^&5)sG`??86>VVB@U#>SWF#$^F2ox`^1KlJkv0L*bxFII)8sX1Up(lVDrwAO5;ob
ztJg;}@HtvWK>(m_9;Fv{Ch}l3#}<<7V4Xdkt{&_YNG9pR-{xtI0q1aI2(F--(lA&~
z7<uqQziX0x*4yG1D9<6M2yUaxOBUSS_F%_X7pgCCx24fyO3NfUd;5q_7}EQh2Y3_n
zlBXy#VV$6#%PVt0ePv06;oBIe++d=Yh2(l+!6s06&iDKI<C#7=WQG1lN-YJyb$LoA
z8ifbhL4Ia?w8T)sxvhzUQcAGB4oZsMJ7t#aeg#eAs-zk3CEfVWbqh;1f`yyuM$!$U
z_m^DA#$`snQybVu1x9gxUQr&a=a8gpoR*dt#B_p0s;5l4P$UfT0lK$=&|Etw%aRr}
zy(l`mVq<Ljb!?c44!E16!25ga?qgIE%|`W8%z2l5ocBj0Wkj*iI^6816)KiQ)GS)s
zIuC%PuHN%+iiU}Z{s^<HnN#RI*^1wuS2c=hl#>TzZGN;Y*n=L4p!X8sHH9xKOCJL_
z{^(6{cD6#gkbaV-KyP}78notqbRg||X%)ycI)TDT8i3mEZ>eF2f_U{3R2&9V+Fdez
zoPa@%zPIa5ClELp+JB@E8`Q&ri?;Uaoh-KFxCWQ(dW<Xq8bUCxgBN=gMSy1oEn$F7
z)kmq}n>l}<eolS+a77d5K^$So#TqS#L8iJItAs%g<<VJida@DPqkN27T1bW@*NF(}
z!|5sk3?60hdmS}U^+{EYRwN+$wfF-n3f<_-Lp01p2v6H1D=o3au*RD+W?oWfZ_jsL
z7#8-EjD^EUl0gI$j)jjqst`{l7CgQ$i1pVw;Q*%-S-#M_RfSzxEU$?8r<TcD_sg~~
z<n=Aym@xh?-SBr_OT!8M(~wm3@ZagS<~Io0;ZjxSCoawtNP^JOC1<4-P8`a0!Cj~N
z7`ahWN0O+T%V~>4XK{7?ow_pDA2hPvRpNFw=($oeu;11IRFlD^!myQLYc!Sb4u<b@
zu^o(YMZ{kmA_i`@>sGj!Y(}Z(T#n^hiM$o(K`u$=xUPO)2Q@@*5oTCQ^Xe2}g1-Gu
zj-PC?_6d5zZ==R=|2K@{Pv`W1!zli8y!?wHhvhF9&Ho2O4%?q~Dt|GGzuZ3mvwi%p
z6aOot_>(*QVITkZew;7X!Sq$w`<DUe@3D)2Sx^5byI^DC_`BYmh98n~Un`jZ=<Bs8
zC`Cluf;KF$O<8X?ZQS`CQbeU(<FOSy9gG<|kCab7S|!Wg+7l#HWenFM#s~b1<>fjD
zyWOq%`B{rXHDx%~`o3D{ShpYng=xMlXy^82TbFH>AknN&{`!TN+wNqvkZiWZBKoO<
zCY)5_(0KDx;*&wIqYI-fb^TTZaLD(N=VSG(RpVsO!G^J8GHT6~j?<&>T-ppg1n!v(
z!1f0(BYPKdFkl)IdZvb>{_XvCQ`RP^YeD?*4n`Ec>M8>gRl%Gn(S~_KyzP5uRckGG
zJsExa8kg4RX<wDo{L61PS?63dubVv5PQ?nKRRScvc+SRpCaGG77^i0gOL?!~>-(&%
zc(Ct>Z5cIaPW>C57LoH@lLP#-Lv~KToH^AFN<jJhjnYU~Fk^(EL}PmSLWG70utM)j
z%vRn;G$ru{h~S)&unt3`Bm$&}G64@;ce3?k{6op(8_sWBo9uPfnpZ>=LRmB{I)rsY
zzU@^!CcOQ2URAGy)V~CB+1CPA*6&%{$SQa8=^;;6SQI|yh!=(*tC~a|7V7^oZ2LJo
z&+1I+y0QQm#|IM9`8|8J;-}8Kk8TF<V5dn?F%UL>VXtgE(l@dm+(!P`P!_<=aUlSv
zZIKP;PdzFln9Qe6%9m01A;5;=UPHWvvp$UR=L<|+Z-=_F7s67;&|Gt^$05<VNx>Ri
zwxG!v_QSrtbyO*YoF=2qx*i4q=bq=i#a@}lN2=?=R=3~&jAuq?QGRS_rqu@=<YL0*
z9jvktSdGjM<dM(EAkKu};qo7aS{AnxU7F=0!G-CBg>YQGoI!O+iDO_0>&tAzW(eaP
zjz0`o033DHx4hB#Qtb%yZvcZO)JDffKrBg{fBaCMuPMCU4OBt>xtbC3pynv##+mHO
z7U3u$QlEl6HBb&EfGEZat$r176Bh6D7%@~AwNi<_wJgf7M>Utw{Q}f<oXU5|X%*`{
zQ5ZFsqCrg?OYkD7C2K(pc&{|r`axtMZb!sA86sS$XcbX-aZ~%?`DvV5kqgOZ1fYgT
z_z10`VDMXxmLo9~2vPJ}%CC!~yW4_zdWhA1h-h@Di8Cd%BQ7Qj!4AKq#*X1Mr2T$N
zo5Lqb%S6ftDVC*kb~yn>kBTVail;fFmq2s#GUU1C*kmlX#A~J4enNpTHEX<bEvqDJ
zuG|V@!)4)i01>F&q>5jF6D9^!J|OlbA@&7%eR8eVf$n);mD4St%Qb^O=^}Ah!|^GW
z5teUC@O3_1qe2+Pu*!B<(yTOv5tGuwi;I<3&`Mhwoic7BDqH3P2X<KNH|IPZUg2G}
zHj5-c`%jkx2Ap=lK<cL~-7B5#OK^bkmU$*1V*%10WosHcY1ZtPPQFbN(z^ki@K)j%
zfuQLnQvK*|<-p>_w+-y(*ODP4z)i?H6tbO+gQw*HLt+J@(7q;r<|Fod4wc$rPI7rh
z0NY%n9)@7vF<y?=bL?z~-2M^3Sp)zA7uNDbhvl4+Z>6nWQ#w`8A~D0BAel%#qV#@6
zqi8+MNL;Wwr}&Kd+N#}<;g}rCTmVJHSh=v7KJzoo)<<m&Q0n2aX9}XKNTAX3@RPn2
zP$pa7RMo370V!Du`JlWU`Q(dGADsenQ>I6FK0g@4I~8WxvyYCj3+}~<cNU5K$&$aD
zI4(Uouu1#RzI;YzIfwIt$)Fj+r4trCMBkV4wbmy=Pe_AcpH~liXqPV@I2`a&1}TP6
zL-1P`Lq^4S0-l3HiJ8g=f-iR4ZYBN_<Sf|<GXoIKDaX`=Hs%8aEm;XG^P1v0449Q2
zVCRlC^+Q`5j5SqYcQ!W{&D%$0&01<V1$SVCm&>IzwuFQ%EvxX2$Hl`U0buIZUfSXX
z!Jlt5ulL5m2WzqBQH0Mxkh$%L@sRh<L9NA#nKT1O7Ibsm1h+S9E)m?nD84thTdYIA
z>2~YHNgdU^PR~s9ev9UQmvCyAyU_r=dwo&5$fSP6%YGsqh-i}jv_A?~U$jgSIQ+I#
zCpK1c&o-JbIbaP*aaRO8P@EQqW!U8-<LAkX0wf`0AJV~}bR^E-zmSd`LONxFes!&3
zeM>7;q67Uyp-tE9Qwj)&kn)zi;9YUwNep-h^MmZ5zY^TSyh{!jqzunxUIo^73;L8_
z@F#O(+V~<R&?3X<P<t;{SGG1da*6Olxxq@qd*$gn7u|q$$+jp}5i?rTp2XqH-mr{p
z?Uh{6si@|r6N>^se?4g&%nX@BTmk8N4=4YwDx(I)F?(@vVyoK1E-yX=ISZ&rq?;HJ
zKjkkU{OJtKXyq}x8S!)pU7Yvz8G`H(u(efRHYhFMX<Uf|>*1swH(xORLI&l>q+-(1
zBm%MGpRhqrfkAq9`i<U}xPhl#G4&i$myDR|{h+M%sjXa!W=LPLwTTEq-)dk{MG)HQ
z9s*Mr2TSltnR`Ky7XCoL)quE_gPm9(Z;JOe#ARPg1OxJbnX8qNNPl6Gq4d=CO5Pmg
z^mpL_eF1ET&837IcSf7#fZSdOr%va_6RhBh9Nnz%EziE0Cl8~V31@}?UTfEuWaLo$
zJfpoM$nmG+c>fP+?-(6<yRCc2wmP<L+wR!5S+R|d-LY-kPRB{dcG9ssI{iF*?X!E~
zto7{otq)bBMt!*F_}7Q~n%DfzIaA{k=C@dz^E#}{BMA<5ww@h0J-+rMjF?i6w2G$2
z7>CR?exrVbmyTRT$(){QbJcqwi;*cFN}{P5|KQFsnn8dHqxuLCJxEC$6*<Z7N>UeK
zeYg+~5-mOeK1z^w<QBDUF2epHD~MqdA+QllVG<!Xy*>=d!^XQzbyv||8dc&{K>1jj
z%y(7cv8f_wv~zVgj#*^w4P~V({OQEyi2fv0oO-@*h1O-gz$jRqI0t@}l(^|xMsV;e
zEqSMTPatHdgy^(ftg*9-D2yeGv=0Uxu2;|c#`0_&3}Z4A<gzazh^kV|r!M8$O^2Or
zim(f#oELC9rZAgRlnl<|PJZnkk%Sy!0>hiVvi^!0u~(z#3KSv{{rYvjR=4>V#Ok_C
zSc>-|;{`W%uU7ClGGYOU*lFRJJlTXs_Ut}*U1~m@<oK3Y(<P-{FhEX72RCV4fWk^A
z!Zq}RLjCe(zD#bDoAYS;b2OGkqK3=c^E|7p%XAW>!LcH~R**1YoCs_%^Cy%6Gzca(
zNl_{lX4ly>XzEL8vbb-1SjXo*|M{U&B=MOanvD3yKvEceHJBWlS5yQLqbn1<jwYl_
zuS31NVs(cJkQ@$Vu@8=*(VGt*Y<qop<-Sjp;=FVVW&!e|`(NJQ!BQCAKWNo&>d>wg
zk{SH0=Y-*ZWKXX6=4ghidhwRUmq=%67!@N#V{A)vCJUdMXjYAts%hiPP#XrE&b$dP
zjiuFiG%+jm&J3X=?74`L6r_GY=jlvgoJxHyPn=aTK|xS!(cJWi@E`A!gp;*}r=tM7
z(HnK92zA|omu`EW7=Z8Q8c*(@9&{unP1@+#o4n-6=^0mbBvJ3#kVG~+IR#&x6T7-5
zmsYY<=g6^p=dWD``J*a;&?741$oqzZ2MklXeV=1=e_x_8lO=zrW$73A0sQ%#J?@`X
zJlpTG`d_Q~Kc(#7lSFL)h0FiP_SAo_3;Yf){h{LjJpV6{M1P+6FI4>hQ1!o;2z~?X
z**FQA*#2wN#^0^o_}}mLWc$4X;lCw4E8|~gjH)zcocDf7`WN+MjB=}QK;A4Zj$~!&
za!L&ms$3@Kh-lE7;ZnjABg?D$Id*^`M5%{%D`$tYSs<KfUtfO$S2w>`YfARE9Eu$?
zx8=e<^5p(chV3h=0v=T>n_)uxeD+0G3u?ogZ69Ejlvha~`}TgDZ$MjNw~vmyC6<lI
z4`=2s7d;Ugy*c|uU#j}oKXKKp+k@^zgW7&i-_dfQd{g1>^HiARNgz2fR~)r|JNLnr
zws%t@eKVx#sw)J&4i(czzLw0K!I`31yfrCFJ0d-9o?9b)=svTewMs7pJ#L>nN3B|2
zD)zpxd@$hN^lpJSu2Ck$ZQNX+Abl(l1R;9kjj8Ha%OC_#_Mo!}5l5T@6MvIR2DOd7
zP`LNV7<3<Hgzucb+ZovP<{aL*ew<|*&nGiN999sG>Du+tkBKaqHY4rF29EFrEI80^
z8)K9{KZ-4}DKf%h5aC5QE0o^D!mCtaFCy^KydP`_-|UuS!dMg6)yoM0jJe_v9Cw>R
zPs>>3xuZ_=*T%njDOogg?m730`mq}8BEv(H=jY~(aj-Uu2W^FXQdl8;e6Qxh`|%ok
zv}2*Qss6Z6z3hx`&Ud5aK*vavPw+LP)-k@6$ShK*E-<|)U2TI=%yLyoAlginD&S6D
zEBT>QXq*TQj4)pvFBo=Ea1Hg#ks#t#U+&jSBM=O@ApoxdV!Pq#!q>!;D_A(sj=8IJ
z5(di=vl{MnR><9z@@tsTy03;@uv@SvKGj|+$~*&lIj;3i`0{E9VirD-m8ydS%iM^*
z1b8BkcN1q`3dL%jt{?i6R20)%=GT2vsnbnHhpar!G~H&CcfI<3*8oT_5g12Lal`I!
z?@g_y#kVRg&B8`iGg0@sm)1>^esIV;@VL!JQ*qhD44#}Qi&13CUTkpR<phn<t0w2!
zL+K1H3uLsFPqbOfPJZrm(|V>%U;8V4TUJ9m4~z}GmAEgKW?iWKO7j&Ie)2ec0RCew
zF~<r+h~C+&2e*l5#jpDv%jR7WG}405O1akcY!1FDG08kytB6|ldH$g+2P}SL(=}G`
zP+?4h6&EdaSjE`33eHB_aJyLyRLF6$t1#+~QmT#2=j~^$$XVxBE5GiSh*wrP_n#A&
z$i!LX`uyZzb1cpL-3b_y0;8xU^I?O}n7hw5^8ve25NyNbPqhU*Wy)K@P<pnt`q&-H
z6hj9n%W=WYC85lBd%W5qh+?LrKH#OjP3|W7^QG8gY8BRx&<^?+cP-{W-Z;9_hJ3Sm
zI26;a4j*XgY;nFx<H4c%u637+bDzHhL=q`)Sdc&71;_g>FhCZS;W-KmeKEXMM)&BT
zg)YoBo6+jpViT5QfuD7avd<dGeN(2m8VW!>x}<P_T4qo7p$oI}or;6P5CZN_@>#KR
zxkoa_Rf2Tq4p^g9cgiDg*GR>0m-bn!nYSE)#2PKae9KK<LmUqh&mfs+2UM&tly@jB
zzoAoPQ%^OCF_#h(h%VLTkd3WPffAU=&pqbjFM7FqqC<Y~Zm|~HK2p3Aib9wr$?}Z`
zK~-(OH^p!AI8A}xW5RT5_?9x6fJvWdsAw7<q7Eg)szA_7*7UB5T#y?QP88k;k5WRL
z7A$okxQ!tDvfZ9QSUe&f=mrZEtiS53AZE%S9vpg=FFFAP9;GV-(0v_W*L!0wZ&vB*
z&_eUR<}3v+n6mBtHpN=OBiRa8!AYwTcV?hgwILnf-%s1|>@G|w0qSfd>bBvGB^rxS
zNklshbzRgJ)`H~2IqxsTgQ?xpj)k*(!v>*y#qCTb#kl?*WK1@#6booG$QGDRE_wS?
zd;0!n3t@PBzRxbamnz@@Ig<Q_>DJl*x||kjwPdQO(AW=Fy?t2-ZAQ+02w(*Q_YE|N
zEBl1Hlig)avUP43(4Q<{!nR7(`kiT40kRQPEm<kZWhrj&4TNy+Lme1NdFx#PI1ZLI
zZj^(@Uosz2-Ji0x8pu%|9mrR`l;4#I9IVLK=nG|geo6e~J7<m)f~A)x;hfM;<^Zv4
z9xRx-Gp5<*o&w;ZzMqZoZh%r3JQ_ZdRH*S;5UsL~1F(i?gG+Xy7Y245zTyUo5jsO~
zU{iD9lk>;6qsyNvX0V1m2j5lba|zErQnZ80Gz#{&9Be^+q(#-M!Ic$?M68@pW++i?
zR0Magxs&*SxdmC6JHU?#6A#Z?t^{RLgB&i!7BS&}9706t`o^&Jna(PeBz7RdebeHD
zrlsdP!(i2yv^%&qS0MVL4$kxC2{e7zbA5hvm>fmr%<$YqaDTM~jueV@Z3zx{j_qYo
znB)szCFf7eZ2;2z_P%6$c1v3JrkR-{=yIGMqyny6sYk);vEldI_F8;}u6`?$kQW_h
zC=sLQ!qK$+Ju2#Ecgv~kotVv1;ge02NQlI4vrRoBSNX~t6ed^WTDRUJR4oA+{`L_f
zomrA8>nCgL;v%hvx&r{PZ*w`6uL!`Zryt?W-mYmzTMU=lnb$PT?xT3>vas{cI5odT
zk-0gzEDEFMQo4h7KR&H`+K}2-q~1QStN{v1Ssk&BcezMeUE-oYbuev}7ye~7gs1x&
ziz3pO0?bI_K{Y_e9`omn<>nwMhW$ZHRl?}d-p%t?={OI&o*tn_d4n`%v4n{C3y+JL
z{*N=3=!ZzP<B}9YP-fc`?DyJeq6W*K8USANY5Yj42$+pIFbP*K&RK>eCQ5HYdDA4H
zd#4a6kKmW>R624JuPrUDO650;EseR+jK%(VTbbw+k9u8R8ka#Q`+j7Qn&8r|(~S=M
z!7IA{kx@PM3zo@-<iIm8%bW<+gK<ZUt4RWkRvJ`3qvD->_1+(SwY-agJw#d1Dn%83
z68;V^2uTsJHdt-Ft?nWXRo1F6OrhgSS*<D4l<eb22?7PnPsE$&^{}Pi5UpieBW<B+
zu<}Lr3;5^>rmr$wJ;`HlpnaL=fMDVv1ZMi~GP#?zv6L(F-+&VI8#%qjFJ#a4=Fs$`
zuVw88*Vw8TZ8c-xb_q}rNLXf?9DTgTc6PPw+|ohN1ng)k@5EcywJzW5I|fe4*ik-|
z(Ix!Z=jB$`-<%%jr;U_1A|?^t`Von~>ic7Kf4h}){zQf~f4{M}AwYrrNVw;VapV61
z2RpYYFp%Gy%3Ld+^tH+6SMeXaZ574-vdEwKCxhy(4B9&Pd*3uv<@E>9^c!RBztFh9
z!{PrAjr%uS`ZxB4?N4Im|ASM_^h<L7J^aG<Ct35Kt?ECI{I??ach~y=3f{lwNdKLI
z{x|vh3(NV-@^wgK&2F6?$!DtuX&q9=e<&Qyg~bY#EdfX>fLpAY!6iyqPf>5N-3AcR
zK=tzyZ&PiqTCEuv)JG#6JAvibkRG_QE-m`aPB&c=(fCW8iG|*nAe~bjVAr@kh+*b7
zCI$uux-~akmZ}QjnCcWZWBB&&ZY?c#_8qbD1|#KP-R8Ga{Ei*37M!8*FeD7vzjjkF
zh~so~W`hdwu3}!;if)icXxBXuc+-bau<aQnB(glqH@@OGrf8j7o#<{d(G_Yf+w2Cj
zQGDP7EsJC~B&^xRRu(kqZN;rcJSzj=0_@CLBGtd(FE_7*$c*}~f@5Ty$yO~*YjAxX
z5oo56CV5${b1Irs-*}r;JLe4?7|T)J<!`LC5RWv7n$$2|dA{H@e45bS)!`?pkG_LW
z+$@u&id66CGo_x!UmorHyymK#>{)M_BRh@Xl8)oWEO0?<#mMT<Cn*NZTE&9EIn_V_
zW7VM+)HrdgS<0c&pn0pV{P4i#u60KTk$tJ*6kNkm!X_i1Y$hi9=9=n!g|`N65}nz-
z+<ga@VFGD_Nzo>6s$`met9_tHrgCmcgO0}6G2TJ`GMs&~`Xvu_NL-m8+K2vP0u~+s
zv*Mc@CUp3{7zB?$x)}*T!@%ki`Sb#Q9>iZsCHPZgF(OcZi)NY33wPvg`m9Ag(ocuO
z>4O19?q~7v51tu9Loqcy+OwE*DZ`!m6iPE1Lt?51N^LL*Khw2t<c=5wMnD<yl=Jty
z!8C=CoC<srdw~#T4DoM4e5j{mnPZLR$@NUoKS3o@`=5icgX3WOil?OE&ur!{4Us#_
zYbNX7DC=(x6?Zk?<((c?g$Q7e$fr)<!p(MRgHQtrveSi+_K1D(>|5^}NWLHz`3!^-
zZ@X-8*vuk@nbK1`*$41Y=U(G(Sc81lZPsdx$>_m1fu9+GDb-wCJa|f&7Y4;-wJ^P|
zwsukW*A}goK5(qUaZ@KJ#<Ntb-!C#{2|U4T>*sdHKE~%Uv-Hm?1hx-!KI~j#d*q+W
zFn$n(A;6A7{?+p_A;4;b5{U?(;dzC2wmDSMWU4H{SfRo{#1?@VNqyqvJAU(tQF%-e
zBx{|hDBGf3xaxC<)1sN`iUl8UpYUzyq63jKLy>pCMAX@JEN0UyEK#@ckB4ww;rL65
zZCuC<6U%WcUl)AenkXMQMi`w+_Dno0xHtk&t9BaT3rAJ7g1EUX>NOIF9NI*YEcOt8
zane9IgHeA9c0oh~mjO}9m^CQf-aQ(e89{H^$%AyfRYKsK{4276D<sE7&n#>Uh0R>R
zv&=Vw!Ol}!n`(SoIMda@8apJx2s5t43NWgt+(1RTmd02NhyZUYOm864$(de30d|1~
zHCJAoF#a1x)w&z-ZbT~m18bISssN2;mX_$uNNcvCy>W^QqIO&h$){!|Dj<eJ1pRAu
zyM()gx$N#RB62XUbR4nNmK9VmEXQbySg>*Q&mFrNTeSV>UtY@nl`0~zBp}xMV6`D)
zMCoCKd5OO|#09HxjY_Z}{6rJ8%e~^S|KYn)zdz969;q9-zOgw*-3v^-yfyzm;te+N
ziwu20E=+^^3H#aeJ4{b`GA=7!X3gsyZI`W@i))E1<L0Djt<H$VazTLs{GEpb?IKoe
zQb&{bv}oAHl`qU>;3z-UhqnhwVDd1{_uNTWV(DH&+>2G!Eh^~cne~0+6%>yK-n~Et
zQI&5d$`#5}MAbIQLv2Yyob7?h-d`0483^#%{3^7Xq#pFSP>89umBwYhc;((Fm0%#q
zUE_7Sx9B+V^FXkJ7`dZy`5IJ44lNB$RvDARjy$zG#KA@%BM&3=E1ICeo`zXvGVn9_
zZk5#vg>BHZ?fQAg8z=}2<{~l5ineac1`f<1e*iQcD9Z-|!1A8Q@^Kr$`@^2}vfUPL
z^Nvu^t#K`1KY-VPmI?p)BK$il^Iu<tf8G-RVD|j2iu<1+Q1(CPdHp^^f&Cxdo<9Tl
zD{jy4Z02t>6n^t{eyiF3Z!f~%<-mXYI9#0Uf4MY^Z5_KeeE#wtN%wQdWV_OW)l=i-
zz+W#B2pZ)_(@MS|!xtl)*rn5zgo60`qt~A|Y@Qn3U1w(l!YsH5V?<oI37Z^`9QxiB
z1#85#%syT{9g}-&I%0-}w`(uwzubJknEclIFQ%DfLLDVS*fytD^UL3ZrYX+cf&F!r
zSLhfjr{Va%SDQ-u)JI!Rv^y~%*TTq>wH~z_o3zw)3^aSzDO3o$vON$cot`?ASw4!p
zUQV8vymhCmFS%Yje7xBLI#j4>=aLZ4LOz`87A$dFo7X!b?{u!&pCX@NF;!!PVzVq|
z7qz%qzMXt)4IvE{g8njTmzUpbV*^#_jOVR>y*kF)Tga1U7B321PC6Y}ZdX+zWZs$!
zu?`tXJk*Sj2BGX$pW#H2kY<%Px4nKbae6&oHrak*fkS-@g_0c8m>Kl;W0Q85@~NrP
z3s_y+2I8sukU>&*&~iRp@1!TDC!dDbe<Pn-t*n$7{@wTS9)~<vgf7apHwA+(d;3?~
zhM<<RN}=L^<>%ZpbL{1h0adYi%m{Hd8N0zXQyK%f!e9+rl~G==D8S6jZd$|B5-(f&
zXvD$#5)-Ga8^EY#+Be_5{=;Jh_pI6q@AHgpT<#Accw~2mO23%7QP#71TUlqb1-8=;
z>o-p{zutm1x?E}q{PVhpWwQ?3N@umF#SR)s)1`=8AsK{VM30fTY#UVbzS&HyXaK?%
ziGc?uW*VgR#Io%?^kV()Wp&U*P|5+7j;6I>Z5;TU-G~7)dB!Hp$QNS@h@`#<!%6pd
z=hy`RANA&4V+*4tFGs6@akNQcJaoKiW+2=)ZLuS7-UNdFL+#<4OdnBA7%&fsmHE7q
z4hUvP1`?&)jqP(sRTa*yN4A`?$BZiflN|$?;EQ71J{1lmg!Kw^JHuOXJB%WzzBkU1
zNF1cgF{g@{@M)_)rGofII;dW%s>70vVt|8W+dRMY!1g%^`V8mI$9J_~@@(WYltOHx
z0tLA8(P$I)x_ukO2$HAvJT^X;Lm7N4k_4*;Fx{|aEs1u&_tJtyd;opr$ZBJK9vafR
z_3|T)ZpLtqCFClrg!X+HX;lWxvw1w2Te76)<<pvH-NCsq#E9}nBW92LX>}`xgbm&S
zCMp78l7)d(80SSsojrvDp9^-gfOTKpTnMlM-F9tazn(3T;rYc)Hg1%b6R7-9q1ikZ
zYzJb7GQl`JGC7v(c^z)hw$MAly)~jWs?G^tLxUQ?99RD6a9Q{!1%0ZUeV>E2eYI`!
zLw~>M74MAbhvuOnhL^hTeRkv;xoFJyC!FPrYo@pz5dBGsN3vOE$FxFhURK0;^dn9f
zZ^G#OZ3g!Z)~yStE22B3xKr)SCwD{mK`*CQQTQ1BK|?JIB&;&uH2tVxjoX`v=Lb?+
zwNs-O{1hS@nMVDO(~4)`8Rfhj!<-PY-Bg(x%J99)<Dxl_QWp@=W#k5)k$mh2EcgA>
zX8!C?ae_YGYAS{OluFYR$)n8e)0x5;)i<(?Q|kBF6kU1}OrB`DXVR|$TccD#vSEC0
zpDo`P(Vw5>y^Fm+2s%bSQar26xqdAz*7Xra()4{prsFjxV{DM-zwqVO3gMdhg+4+R
zP+UN^bl5x_8I|H>UfEy$j178cBlsp<#jieA^A1WD`}xt4Vu}GuM-s_L38dy@>k!2v
zH%ER_r<uQstbjm=uXIB~Vk)k<(h1K@`lF`t9$B-iBI?(J9U^6Rz5o*ICnb^SEmYDZ
zd+N3?t=u=I$5rz=fF_yRG1Y~kAt&X{W-PIM<UMhQ!{jM_!ac}qoabJd0YJ)_+T<BF
zY@K)p;<zizc$PaDwI4L?XPOQ>C5n7MF_0dALIN<xkrsgW&9aoTysH?451vpEscbmf
z;t&abEs)QH2nbT)#f71epF|F&LfBD%J+HvQ6gamo|1vj0O&LwWJ<_(+maCi~BfNN?
zoQR<H4NxOid9L_H!_it^=jSbvo*#ea0=gS;0zX$eml<w=x!8(WEv{F#bU**_{Ucy^
z%>$F_vc2a8EJTQGeR|YKKt!aK`VCobh>(9}I9tOSDqqR)*w^1I{kBzE=$@Php;*pA
zL=Cknd-)i8)5J@Z4kHPQMD)}b;=2wyk&co+bgre|3?Gid$QVKHW9R^zJW2dU9Q+JO
zI<oR4ZAvAYLrfMbwn{$Xs#ddGrYFcCXQ)nLFHoK{(6H$+_jG$vq$EQ*RGM;#kAKH{
zz=yyE7A1TN;9w6Zs<=8#vg>Sz`YRHIj?n?JfN)f<?16+NmDf*=1nDl}YLw?{;DEY<
z1=x`wMUXC-efLbkX@=C#!F4hYboTF|UK9W!RRW-&ap8hg;s&IS!GV3iHMk&IE6_>l
z*gz-Ve~dHf|J8vA3o_h!C>Xhy9u&tBW=F$15xG!!%gd?Iaz6W+Nuoxsr(LUhqGG35
z%!kth>O(78qd@YRS92E@7F}wsF0?f_Kx_IL8#@cCxLW7MroJu%;mbDv5oczb5D6B!
z-^(X+Fww030A3<Bny$g+_RQ6o&o>)a;T1+o!Tm}3!NTSNKQB3G1Rtz0qwDjfA1=X4
zE^~H7SBnKLpys9Rcn~NhZ)>Z6Hq|4~fZ#A_SSpc}z4oG~(mrVQMjTB|e+zaaqN5aZ
z^ZzL>;={yoB__id(TDDTnVwEBWpDZe5q)#iSOVzSAB5&CUC#k*;7s^IDl}3R5xoM{
z)u#mEx>i%ocU#1nF$K8vYYn?B0-MXJ5kgL_2=Ori62!E~6J~d&fTR?5h~gp3$R`55
zE9>n(<E+j=R^X)O6^`S2$Mgi{Dd$DbZ}q#Vu?Cr5Q3xI=hVBX{G{QOxas%Efn}N@0
zr;>FsFd?6DXiDP_)se?Qt&UL8EYnEevECN~1kpWme4Ja{Qfke7qE~d$1wvnWZ+7SR
z&NW0~-a^tlH`5FP!pON1Hi?&6bu2)*OS0Vuy8z)S-1NeDWS@H2FfAL*UnNy<gh~8g
z4m7Vc<S!ZgDal0X6bN`zFucea(BO_k0x!R-#5R}l5DoPhnBX=!I$Y}ki#lmw;t?Iu
zTyyPLIcT*NQf6E+y*e5w#kv;_q{18WciuSo@tr;?={jXUh_~D;6zen_a+wm7N&*6z
z@GLJAHmljS`3OF=!55{M@TUYk8!VC`DMoS1Q<OY^{LD%B^vNxR{XS#z#OGS2nY#QX
zD3d)v`%SdUQiu5!i^{jXsWNj2!Bd8zaQFL9i@jHL)rGbox=O=N05?&pfnsJ<DZ>`_
z=h#Hfb(@$6;}x)xvSEB#o*Q8T^y`XH#v>oMlo2AeXjJ|5+xHtu+a}7e1yqs}gA^3J
z9lOku%=Bs_|CPlJp|lH=PYQ8FFJMQhC~33!`?-|Ronn}-+B@d!I%e_K7B*zFcU51{
z&J?&mzCGHHbe?cssF!cgWt@D1BWTG*nPP^Ku27)BDtxCY<yt1u@9~`!dV+)lW(|K6
zlu0|B=DDxcK%qz<Li8;p_$eal5N;%$igV8C=^QJ75#r{y?I9ShRStE88$9CXCb06r
z<zz>UnrGZO&?DqPTJt7q=)MP#9s+ve8=%T&_Itj_RZ5XtULlOLB=4^*?$0fXYk=Ka
zYe3}+R!`{*BI|4wHmEMXC*eUthNe^{kEdpUq=v9tOF8mRK0Nh+9Bwm9GpF0@?{fSA
z8dG!-%$|_D*tg}KP5x9X5Yl1AevkV>u<JHEFqnB_q#3n*NmQQ#SaWFB(KGZQN@7W-
zRV!`&dD6opz}L-L>tiPAz4}R=69M5qxO4Iu=JMEMDVyN|fxp>-yW6w(w6BCoof7o!
zFyjU1N3~cB%m)F99Odj9uDx%Y$0O2)oqhh=rG37_r;0TSOo=?|$hAGWgenv7*5{MM
zX}$=u84U!{&FvoVC_nh%15$+K$hL!NiO^N*Z>TvSfmy1e0`Ug#L_HgC8un$hND&|4
ztCW{Z|04hS&rNXbe_C~ahpe*y#U{9a%6~Y2&tm>4cK=%W@Rvr&{dwZQ^#T8H%JUC3
z?f>xy|Dq}Qdx(F_g;*Hb|1zdJiM<lD-+b-Uogcuf0dGT-Upg;|aRtGIWRJpg)d|*I
zXV9`dKYna+wmkOgbDAG>v3X70(RUc3Ql~+S@x?{a{^HX2qYB}Vs<x-I-+JK=xoQBx
zF~C{>b-!yPw9dSR`XRfegHcIuu{n0{>q}OZ^qO;zZ7ZI<nFHUKtX{A6azTqbJ0n}^
zuiLBL=MdUZb=9?aZhi>6y-<;S#2f;ULhh8dBonx6)hU`1c8{jU+=eY%{jBqN^7Uzq
zj1SE~<KtTwfH+Tkunv4kmZV8gKf0&Qcc+gH^5q{%H@BSRW3Oyrm9id(oZHxVe#vt7
zJVf!}dR(3T@)vxX5NOQ=?HJGk^JXj*6ry&`gjJ8N67$KS>}hf+;9z|!n~z%`7U1GM
z<C#we0*bkv_Fxv6B@J&HT=H-Z+N!+1xM6ju>E~aYfnV%#zRH_RpUGGB)-UBVijp97
z)^WKtQz8bfsDIC4R2G0H>3@aThrjtT@}`hC+8FsAXIPO}!p1{Wl*^3{sS*pWwClW0
zKPrVOGzHjv?cP*i`Ou&TLi2ciO#qhK0x^7}mb6iA6LER~x<#^}D!Iw?%WLO|F{Gsm
z@2dv8SkFHF4T7zYxEgNAo0b&HY7JpL_h23_A;3zzr7(YnLkt)aY$CrIB^V8BHYaXR
z&Hu%3WniXYxO~vd7!e{VEo@!9c8hi;bx7;gD6Y~VolNl}buzMr^g)lSTITDpQoo)+
z_Vi+vbpZ7z%T|M2kWz(Lsg~B&HK%R+dE4u31Uwy;&M-K<RbX&@aUyM?wg`XU<JImX
zWG0%`itz&U{eoOa(AArnujwQ0b(*y^qhPBK>T8_j@dL*I!dU<zub{S?-tD5AX|{tO
z=hwr&`nT4U-VEf~xj~xP_3^Q5NNcbSA5qIzbYzj(n04M;Nc~DyGA}a1z7EFn)+k!@
zRuAW%MJ64ej<pO+cFde>LbvP`wgxnd)-EHtQFI7;HY8%2{H~J3ER7b9V8%->XcL^7
zGA(5!pJRfK=0p4(O29fc^O+%RYSuCm>9z3Hy-EVKNM*U57n*y1C#YsE@SzH-p7Isu
zGwmRONmWAT*sSXhDInGpp!~6-!WRg&=XbDh;_|Z%c6W)seBP*HP!=;fKLlD*z7>b#
zc%|beXQEzh13kw_6POE3JMv*9Rw@ebyQZ?ym~rfRtA_{Cq}Yr#-?(@Ma=1?mq4g=7
z+J+1MfSMv9Fr_<>O7pMoV&5xuSZT~B$IZmZ7Nz~4DifZKdi<$JVz$fe+geiD%rt=%
zOn|#uc4X-?7TDb=K)Nh_c2qI40q8&TD;TXX87)tn#%jI+>GUS#a!F+t9QwnMmAM=K
zpr}U<zMvC{(|?6f{3>oBGj_}?bisq=B;SDN1<`o4;zp8{?*f2@OtZms6s#`t+7}{G
zkwkp=39Yb()og5Ork@4j-!3BF*Jc7b_L=?F!2iCby?+n=CJs}F8DcbJOp(f!ljb>a
z`?U)eD$id=(<oPh0tl%=pOXprUUvJ4F)CZ$R?H25yNioElCH&o4>>}h8rS5|rk=L@
zsvUqE6kw3A&z!}5F7yy!9eQr+(+1$sA*o;<N1p<|SBJzr_?7|=gp7%L1#$dr(P)~B
z`$^9XEQoVv!ayAW;xN-lxZj3{B?t^6{Cx_92yF-m1`GiqVm}tx1Uq7`pA$zN7lLRJ
z?Tipjn3hD;vKDO<a%I8&)=H-~Pb9wJd%6!;AO?yAClidpK#pMRT4+EE4Q;tZ(9eK@
zu<<))P>aBJ7)`)d!{!;4LtdW~61#>7j>+@I^rIWh69T(c!TfUU9r^e1CBu5I1-G8f
zw>z0ys*Au$H@ASATbo<3w_bIR?y^fDp2>rX_Lu7m0@R{G#5p+696_TJmV*(Y<`udp
z50R6M?0JqXGT-bX)M>Qf??)3^VnRPgs9$unWdt0pktwAzM;PqsKr&DYXRWL;7ZhKi
zUbK(us2>KvQb-zC1G9~t&<5Tz`+aBpPYw&}106xDcY0G(n+e7Y0`M?~ND0P~+OKD(
z?AV=B*N?m$rX8~maRT=$n?QJ+&c27M9g2^ja0;0Z?)%#i37g&#gB70eGB5vJcg+MF
zIKqW7TCgP-R%UglDUOdrt#3~mDC3MnBO+vgl0!5rPgt-=1{`4ml(SLX{jl7h3`|b-
zuD;ZBq`|~?#qm3x(ONr@_z#hN0;ZjEl2T?Isg^9%y>5%rwTy7}=6ud)@k|VuA}wVx
zgc!-OA%wZ!lTAd^>+yxR<Aur$=)1`2&K^YfZ*>yB^Wk>F<R{s+4YocO-Rj(Rot`8x
zr7GF3q3_lwb_pG{L@Hl@HO#MKw(twfkxoFsEoItiO~ZBwItrxYLjM7mwVg5mPp)TD
zS-W^(_91~X_RbTbP%E18iaFbe*V6KU>uq4J?rL?EBrnlDB`n;Tik<#tMAoO+8Y5t=
zv>f>YXfTb>xYbj;lUi~c%6uqd8LgEPl4B$~PM(D^8;Wc~GM;~EUe9BM_r|$41)Fm^
z;p)pvoZzz%lvCzPn=6sf?g;7t!A-Q`56B!_?VQtlk}Fr3uJ<o5bF~atKPR%fwy{YB
z0^#eez3FXL!}ZeSrI{;{Giy1w(gMGJQ^yBUUp8iB=O#g1fkfZhUqjA~Epg(sT^wmc
z%H&}`1DVJV4@YMHq%+^f8cuM&!|2NQXvud<mt{rH#M{{LV6H5f<G3q&9=4zthQw{#
z+wnrg8NpC_uVoIn=>Qj45;8=425#jHy9FLvg8o7K`NO_B#l#ondUHZshD3kJv#IG5
zYmPJWckd_&!k#QPcaP&r&LHJn&_ca9G<6k>AzUgIq*d8u@P&cqOPLwbHadq9mAIPv
zoFP>yLfs2eTi#~8AUHpQ-tLzBgOyUwTM=}-ejSmk4BwLmMJce|&^NlLHg25ceP0-+
z)`4gL)iSu|l%pJL>l`Zw^j0=_5Kj+lj|+2$>WQi@3kTyO(Q|3QA=|QK`S+{Y4&kAf
z`k@<S&^2+^LhuHzMLZJ+PRBm#;r6*2W2!uR;b}t^(bzHe>}$61B3dBo@nrln#+>^!
zR&}wwnnZx<F;Pd|RKGJa&`W+RuzsT0LUa>`uXUak-ae?8vpWDiO$;zqHpkqlKg5_u
zC%Ick$th`cO(!lM6GSE3-$Dm<D?nba4|eBAgEPV81La}S3bT5(5+1kN1-jw$i14|s
zWl^#J^R?)me3=<GwLY?=5Ub&fAoP;&qx|l1cF;82r%!0yuu`8%$|QYUb)M2zw{K)z
zp*TYZJ8%|k7X7k|?9N{35#M;jNsCoQT@2}5wgZL{lw`8uDB$6ArV>2ammGDFSF=vN
zYx)P(4QW5Ya=1*w<^~H<QxdxQvd$M~>O7o$YI-mzpPS348+c#_Z*tyeZq_560~ScC
zK0WU%N6Kx>6>k`Y<z+-rzt?G;d}IiHDH(8q-(^i!C`i(0uC)zKxcH=A1niW>1nTgW
zNkRqoyp2O(Rulydm@E#dc;T(7RwRmPK$bLO+*5gZI}UsZ>csTxZt~R{l(W*XzH_1+
z24V-wshD#Br?lS}_A{SAt=5!6UN%dLWV-h+y-3tEf;>HnA-~p@3D60PB!i`q4uPrb
z>l@rXsACdb1E&L%u81<oYEKV-KJ;#mu9Pl^5NtM4$>fg;ifb)<<(?#p`s^$7ymux2
z?AQR$Kp{)8t~`tY8`So_qE{>%uN7dVYk?S3o=J0}M3wNtUQ2V2%c%|!fV@V&9eCW0
zjuz0wPbI_kweCuWkhG;s@gs~gxU`bFjV1Q}`U#AvkBhfnn_6eXjcu*1p&KtF-|v85
z=efKm7cxfbVSvBJ`8hiBQ*7Jy-F90N@f!cP$Z{v7oX&Mhnsb2`n90E>bS0A1cCBWY
zlT=hLwZ#YeGAI1I<r_bONPIB@a&b?$Tw~J>GcGtyqs7!@!LjPx#~Do)^k;5Ynu#lV
zcn+pw0s`b^F)3Fba!~A!uTWyfx!))JQV6XSBT=`Om$r**Q6L`_LP{&tvOj!#32eRr
z%Uu<C7$nE;4o(j!&`Xu#etZCHftg+Wi|p!m?BV}WUHV%o={L-R{Vz5I{!@1KYpMG`
zU>3j9mcN!>{SNZ}R$clZmiT|tsQ>k{KaPJjl$(Qt<F8hiCN*VZ*N2h2FKaM4A)TY}
zLMPP|rSp3OO%32lH%D>60t!LRmn%#MiAKCFx}NW!%PZ1FF1}UU>>J=gZ0J^2tE(>+
z94$@`5%k0cs$LvfHnz9vn${6e%t}}EA`*ACQ*C*U>)R&~UcIkm+>^_K)NH(u{0P;<
zXyJWWd{=-KZrZ9z=0F(wE)*IT6b~N~MX*GD^-YUxGf1#JbVQxwjPjb>n0+mugA8=S
zh%$J7Cc4A$hwNp|*4Aq+j2aI3^T41qPGsYU)<`brx@NxPK05E6>TMqkdYJhWkkAuS
z`JS`X*)mSm`;)^0f#|P&7RM55me0$#oJ&|v;sFoc))_TR)VC1BX=y)3JWCPe7gmNz
zY>8mt$Ka?3$A)S{Zf=8Zci;|9(s5#KTfVyD(P2h_*W_vz8eit&S)zD6l8Vm4!QpF#
zYDGZTR($C;*I2w0nf)O<tq+5h7vgj}Q0f@xuS2zeAq!c;q+gRb==^JDQW{2B^JRst
zsygTl{(7-dS~JU;lu+p97syb1@EJr~b7_j=D=szzD><Pl!EPaZq#_3j3SG=!bE=}s
z^AEvg`dLV}R`6NrCOZhBaGniTbUVA-6s|T5iqs6t`?xiW98bfzb<ZC@3FvqF5LAwT
zF}NQP+m`TXZXe;jQq8V;_!L2KrlWR0^!d|Anq-l0E}Pj0WQ1HPi9kgEq8a%#4Gv|#
zaW0h01*Ehbb7Gt8%Rq!x+e0GftCGr>z|qgD@10hPK#PLT)vFzZ&{xWZTg*n~n37nd
zsH>ZBZnjhxMm{glM+TzV>Qi}<Q|mD<Z-fRgrC#%J#Yq#`p=AUmae8FF_}Zac?KCK6
zd2W>4yNP1B$v{-@ZPt%ZYWKDS+YtaJ6}XJx!KJB%WO#c2gw2QHcc)Uv%W_>djpuL)
zH_~ATYr=Q(C>SEWB(hx>0dOtv^s)W?CnhIgtm{~j?>?$ayewyo*ZlFUNIta7O8%FT
zbtTJ0L{k7&9%=cuCZ>w=oM`tt(auY)&4tGPbR!&sr*C`--_)s-KzrDA_3uIVx$dJ_
z?;IPz9phQ24|-+BN;dXLagLM|T!4Mn+qe6%o<zlq#@_rwqNFaYv!L=dC<C{-m(ZYI
zN%x4iL%T?3@i8YVvA<=TVM%C$*uuxYE#o;fOY$Zi=FfB_@F5v*-QG1@n|fJ7;KuuF
zs-JH(0ahP7mz&^;EmY1SHi18S16uAW-(eNVUd)#c!QI0WaG=1YEd85ofktkCP@O#T
zs4}uVcLJ?ti-i45uuAk+m^0tN{rToE<1orUutV`;zPF5WcCM_DKZL5bL}@cxw2`6f
zZ`&`ym7BEua91HH)RAesJgHk~+h>y{=GI}spU)umkIM)Vn)`|hDmKPdrcw#p>x#EK
z{6hmxC=lGS5&qRbzJ^Nl>Gd2&ERf2k77qE`4$F!JB0i_?XFLZ9clwlrrlF#T!}q`)
zoE65EwA3ZuBvlDXo=5}BUCrS-(G!wX>C%C~QbK`vCEH1%65c(M?=wUas|u4k!wE}9
z_=FlqXIR!ZyuD>H`HHTqgglHEJNB}fl~i!1jZGLQ-KgRZ2IOFI1FFZ^Xw(#n#E~c_
z1BsN^!z<6S^&*F}nxxu44cDDU!yU^N@#*YiEByLBwlDB66i={1KSl?34eGkZvr2h$
zK5?AUZqGIFp&s?BhxKTN&^jA>sFk6x$V(GvJ$vlm(3@OF38&5(?wd)^-$Wp2T@a{U
zZfj+60nA9saLDwqO<+Oqk&>=zK`%yQ0;TkSm^O!lYBYTQu#`i`{yMm)omnh#I4%-_
z?XCvi%f)P1u^6JmJb9R~JE3Sw+YGXv;j=cT<J7W|nFzjK`DsoGzn7)zGb)zzn$i})
zQ#b9ZZf88L2R>aV8>o@MHnOkss~1AhMdUyq6tM(X44B442wfeM_OE^;%nzHTWG_E^
z$iAd{iekWUTs_-cCB?56HG;!e3)B%Au-5!f!ngx0R;lgE61u~)(<<;06YCA4v5{jg
z$V%zxa81$z)$Jm2BKJ7vtLm-QJ#He%$iEs@cPXbWMUZI(0(@|%l;Op&pM+9;naYdG
zYI1vT%PkK|92*tRS3-qLp7Zl^g~cf*JW0Kh2u6i4vSB|xFUyLdq3^A^CS`q>YGWh(
z$j8ZoWN&wD41{B{T5p?vNGd@qU9f95f$WXKBidDhm8hg%;E9b<Iw)nCcSk=Plqi4g
zid8!JnlqI4ZJ&#8obZ8!6gTWPDK#`NhwM~Fkq(Z9m1o6V<%`3yq@wd;_WhiJmEE!o
zmElpkj1OCp<TuX&GVNB$7HMhpsGvzxgo83CF-v&+SGh4?R5Ar>*;J3zaSS`$v1gx6
zi0GqPZWu$!;U4T`OPD1QyY#6-gx-Dog1Z@tN+FWqnnA9|lew&Q^#MFL0&vi30)zI@
zBQiU73v~8dkPt7SMxB}Y<}faS;T65ZYCLO8y+Zv($;{?~uk!-8Y2_s&3sE@}20p$_
zB%^P-su^W{XSrE)n`JBotlr;-7M!{2S|OId4mgk>$wV&hyaK~loXE-2R0pz!u%e+f
zCoJYAiKcny*D5U(&^lV2R|gi8*<%z~m3g>O-t^kgcKW^s!uZdlzEJR$anF=J+Ari#
za!3d@HXSj|05_h&t!Yk=TfY?1J`6!3H&{(!4`91pD`1f`r6K}N02f%BGK4R)0s$zq
zVkjKO1YMC|di#N@(=M<)Z@S~+Ryi+L9bM94xh{4f;(H^;YnZwmoKRw;X(Xke=aMd9
zVf3BMGC-`f0eQ&J4^dnfa@UIW^NeY4LO04}YzS|$3Q6L6rCL&6VlEfD;zWn!WnL5R
zEbJmp95o%)=kg3CF>j=<wz6cgrj-oID(jsBz7jl-vA~AY2nm}_7m<fS@d=Y$SduGl
zXJ4C~*&5i(ct>AUVu*JXOKQJ0J`7neZ(IYm9)i3uoEWu}si(WB#`?f+#8Fyt<r#F~
zyB^S;B(GAIdeH9tZ;a|0m?hE^CU@7T8G~wl6RywpCQ=!eGFYMZ>B7uv><qFud)n(!
z1F1X{EKf`my8T$Vgn;gE$i8NW^;|i)+$UmDg1VR}rc=s`;~caV-l37fd~Gc71G%<!
z7z>vkRv4BX))*EYRuPw5H_aYP>(O@@#a{`%pp>J=!dApk&4OWi2Cb=jiEmS2!4qdv
zCnL|BaJ9y%RMz$0a%Jn~2B_|_>+yeCc|nus*Q`+cyiAES5iT@{kpC0wH3K2uOZx{u
zGfceMQK722YpAKR!)MRgrV*88V<m<sg0$WxsqWn8t}cP#pNly!d6ns@`nv6?c7u8s
zP+5i_)Sph0R(UBqH8l9Hn)faW@&?cYT80l1WR}IH+AkY-Vd-IUsC}CV?D7G=o8GNQ
zh?zCVM=|eJp8L&Hy7wMSywYx3)NL=(%#7n`>U3)1l!-l8NJE^9D1+$qU|BGqUU~wC
zT0YKpP_cU7S4#HjqPic3syu>^m_y$t(z>sDq7h$RkeGA=P#r&Haud^RuoP<f-jOBu
zZAkfVeK~3z0~4_+w#@@Hf8c6Cw#XA9-chXWm-#N?OU8yB<Qybu1|<pv6M%1f$m9*E
z_Om^j^8|2CPCWwW)^VZK;q`X@7&_u?^s`&`n-j6FhEi>b(G^JF4pn${H?~su%Y(9;
zsAE_5dlVp65t8`lbb;e{IP71i3x8_qzlYH{{^AVte@Yj){xMtl^ZdWSqWyW|zb(f6
z|H&5q6^q9F3n2gV1;Ze2X#@DRpOZn{#t>jCYHDn6V)_fPgK-8pnHt){xUUvz{2D^o
ziu}hA!T?{+ws6b%g#nlupq5mb4qyZ+23WLKRsxfV=EY>u<fPr7??0y;D63wV_al9m
z53E@;N9;pcy8Sw}dlH@M3^fyI@!eqQIoEnw$CxpSGlq<xx!i9LC!Lam4QpB2^H$yp
z6vyksAUXQ@@KxkW<USwy>G^ti_dtE3!T)8s8W>wX!NMaV=BRv3Ga|@sUZBgnLw!6u
z4S{k@ahZMsu@p%hN2H5(F~yUa$X<ZP($pd1c=dwk0V#bK9em5XL5bc|Tx6S+ZmQ5y
z-HZ!hYStM;vfx&3(mOw0<e;KsDWBEPN?3$#4;hS@C$xxT6=fY{C1ovSwXqoOHmx3`
zWmtcEo`BUKxcHeR`3b96lvq|x@z5Z!Aj(+6{i7AE0RjP$cmvG7!h6eVO&z@IMB{7~
zMF5NcHi$-S&_q=IJ^VpBJ})YQfL_t5Yd;|Gz+Liqr!fM5kf|hZMoztgEH>z4TO1|H
z<1*I9J~W>o$`jlgSVvWFsidLik^B<jE^Bl4`%+PO(7MWVvl@-|+l835OIfA4%5k%*
z7-B9_gy3#U@ub2d`uLvO2pM9~s|iJ-Gb5MgjWsw*I4Oih0$U;_WuKd>_8s6#rKzZh
zDl!yIV77kVU)+?|h(0E8B#1du{>IVBW$)|56b_&r>?qDtHTu*8npF{BWn)&}QDF#G
zZbQyi4;7lyPDpYQ!{TaQhuh@R!i?Uf<7kya-itJv2nw-X&4&B4CNZ(wJFEc=G3S@S
zE~{jsmTOuB2-s=_R}ndxK@g>T$OB-C9N-b)9Yst~>%eLm^TJhhPUW!#lRYUMT?EP{
zX%pNkuSsqU+Jp5(o|Hj%Ss@PkQ4N&n`X3NiPDR=%*}WMYaUry2$@6{R5?TR$hT*%u
zK!{+U!x<Kp#kHieYb8z08DD}+Xs{xHdXj`U!>N!J6NZ$9CHP7g*;^2{YCTR|uFB1=
z!gd8&+sNeKh%PN5eRi8Ar60@g2Zsp^nMdHm7s`)yZTex1fP@#-&a+D!sggB^9f9a0
zOrfO9u9?O3Wg)r{8kIhc;&~A5XBKl(sUj+F8$k_roS5QElN&i6nuX6+Pkm<K;YN)b
znN2R(NWQBoN=C(};2-SGVY8L>_>w~3ZEz>Vk{1X9xdX)b+51)2Bi&+V8`-^4OkI3-
z{bf(lmTeM&U%<a0Nnily!q>M`q%;h+8_94upHxfKaS(-3b$0#j<>EHgL2)zW%cC1n
z;)<DLu*ax-3SBx665Zr$dG`z8@iAi2YQHI+spGL}+(fRABt^`r-N9*a0wLUp{7N3i
zbddlM(K^PcsHSJBG}jA?7db1A-!=<Hv)nJkZEM@)G83FdZjTq|b1Zrb7LgNE=E0k>
zf{fd^!bmiT#e1}Z;&VRBb(`q+9v*6T4A~0b>y#f&#mzK_N5zdpL0(f?ztoCB&lIzA
zAz_^?6!&fer{iZjE!Xd!?zwCA2&dOLfGE>B^5y&Krm>O<^cRk6O?kg%#hF765IWPk
z@b^?ed%!LkHY%eO-w4$f6o&aVF$xnb5%vc`W=y9Vi<vs7*ixt^tM1c?19tIPDIn&c
zRA#8!%>UcI5-q7nsywm>1+fEC0iSo<SfSmr(JO)%cfkVp!6>HnqZS)bV&_PpSW9D7
zm-dtr#0AbOq`Od(q~tXIAaod*+e12zsS6)ruwGWd@>moqLxV`n!x$2(Ud|p1%kFYo
zo(O#lb)dd#s(R8e*&;S)l1Jj@1`GHX54x`kw*GqtkopgOMisTS4mqdaQSII|69sf{
z7y!RWaIh&AsohgP!JpI-iQ9Og0S&4spBcC?vAZCtAa{qj@N2ob^op6*Mv*(!7I|Ky
z+S>17$WVkuV)~BQR!{Tpxp}}yl|v(1O_+Dgv4+=Fp9`<94D07Bs8C8a9V(xPAa*&@
z!AN(ewAvA?bDgR}`>kjR^Vc+TJfWfSdP8}-6ZO{<IeygM^m^-*UMu=ujfvZT6u}_o
zl!NVohF5I*tx&73I$PNC<#PiSGbf!*TvEx{%OKzj_=qT~pd|aK-Q_2zR}MvL9hl+2
zGbkb8!CgSOy*<idhLqLVE|@YI-ikj{76P`<A^LfHE*`Lxc+qGRZx92Sff-^N@uQs~
zGo@G_iGEFBj;YGM?T?hPVC1q&VTvEC<J(`B|9~koWeoAm%aIi_6Mhz1!=YR=U;LiT
z*mE*9&F{|$RDi5(?>sJ2u)$lusSYo<DWqJow#4x>ttSCgG}P3;SaZ+H2-xKmzg&Y7
z!Xo2Tu2Wb#hcXwlQmb4xfRLf=$qx2Yc`b{^i0)4mBv;8#X|E4E=UpF__Y08GIRqy`
z<sZPXW(+!dcgB@@rF$Z|(vKA(qSlGwn0~h4mqD1;g*fO<iaZvaLf<Al_Y}?%jRkSQ
zqdoSFng+e-@sCw#TZ7%u%dOqzubIL|mXarTc2Azq<?1v%i=C(G{mmn{v2c=p%sg(K
z-T!pwrB7ru*g&Cm3<Rk)UH!3CjpGK7#=7*7%hQ43U#B{{gm(ea?ym##UVBD=nH#Qs
znYp*r(}AmRN5*p}=TjE>P0(p{*4gKERyHyIR647H9C@KHP<^Jxl0XQe{Ksf9LG&ih
z4^$rp=SojeEm4P7QHH0|>)W0zg;-eojiceGTR&P34Z~GjbiPfHpC#?oc$m+_EegEO
z@X(bnc}vAk0?57Oq`Ft_E>|}uT_w)jA@KOzZH8wBw+NGYGgqzoTXTUqtdV;%E`BYD
z^ag<fsOJ`^SoXsJP;IZh29zI%uA~EDYYY#dl)>Z!X=UOb*tT{=n~#yk9f!^t_|Dof
z8>b;I*5)B!KC?<j3asfsilo;uKKs)0CT_rh`+1U}K=ozG@X%xUDo$b`ck}J0IP-NP
zjJSk2&Y<(^Xx==Tf~E;erF<Le3T=lnKFn4XWL}uus_G(n-8oP@h{hJ#`b2)Bv$qn<
zn9JL!j0{-MT)=Qbd+K>K+VB1sGVwn%I~@Pa?EG4S_?K4Q{7okQwm<!sQ~QUB%*63u
zX0q<@{S1Hp_TN(Qza;Cw-J||XVluHX(zE@V9Kpi%$0y6L?_Bgu?7udxD-$xPDoOp<
z8>IyR;K0qm;O6E=@8AKju(zYPcQR*ivv;y~wy<|_W?*JyWTazcVsLP>H#Rjjv9vRH
zW^gccFm-ZfFmtg1*xNDuv2>k<-U482L-ltN|Fb9mpCabqplAGL$}|7^`~^dCveC11
z{F`t+?Hx@23|7w6*65FTjNA;JoDJO=EFFxQ80nmCEKN+Ee-DZ4_kkmSKv5ij5sLcr
zFa7U_|C?+7TTjSuRMc<nA-|3E`)g1X=kFo@Hx%_poPSZ2S7GZq?=@d<^a$G*nbC%B
zq0E;!)2IG2C*(%tGrsEJ6Qb156i9{=?mca}^#CcQcyr93e1WFL==d_^E8P8SZr`D9
zz3z}^n%U6lPVFjrg{?{`77)Ryeo)6fc}za#)6S(p4ab=4r%r8i4@Qg0h8A76EC5gW
zEPyVEKTWztt;Wym={m#BvXOeAUNYXPG4Q@t7MA$6#szmI0H4<WTLB@$o;JRZiipd;
zA;`CQv~&6aZ;6bav!5=w;L|}KyuUg$4h;2@Dkx<O(iEsM2&c?|*`-=Hs@YklN+_oo
z4x$nnCzK7sDF<^%Jx0PpXMNl-bK-ii!EWsEj)XXx*iMAxue)_egwPY*Gk6gbc{Oc!
zIcaY*C6CDV_$pfzb3gl#4EkEB6om}GnTP|ISd#Yg<gTsEviy7s?NJ1w-WarOJb0dd
zw9h=nu<7{#4Otv;UV<?-eqKA<F&S1|;>F{6SIf6aGd0cC(yWfggKW^JP-Nu73c15y
zaJb2c#`w7*1Vw;1B5o4`9ZA=BvTq=)Ly4#QVzDX_@4=hebq<!BUaU>waaDL0cZo5n
z9rIZ{t#}9H8;HyGeD31^VehTOqUyTFVY*uo5QG6Ksez%DkOr0R5(XHW0fz1l0VyRc
zT9lLy5h+Ea5fD&mN$FMuzcaun-tYZ9ufN}SUEg2db=|j|bLQ-|)?Rz%UVHCzyH@Zy
z4fl6><yun3qza<%whN0mmOXDd56i5mL8X}GS2`ziC3CI_$tGh~-?dvGk*5z5mcA1h
zcTj>UnSuS0xCJi06f7b|>}A5!+VI_GQ7b7Iw^V=Midm5hQR}MDKGVa9?m0NN_=(F(
zb~^Mcmpwb=peRTnD7&{CpDzuaXd8W)^u>kl*!HWy@vOvN8=o~yKN>PsD{RcQd_=Qr
zGivjN%p@x2vgpewDW+iF)zW*|D)5kGa^LY|6zA=Y#oWja;CxwAF^T?guOW{WTB}4{
zrlNbFCU5Co-2Z7t6U}Yzbk3!`6dyD|`f(|kiuo(<+mIn#Ord64oLexJ$sXGYnojde
zkFx0Rq|++Az?#fqpjFTMu7JZpFK=csiDRjN^#a|&(c2?#P)%`0=I&m3!)`j>+v>az
zp@ejpw~AKCUw9s*aS^R^O3G_!YGhJQNaP!?@Agt$UN?7G2p^4*Pr6NgOT&gjG;_Mw
zy7l|@k@BgSqsiF9{>a21A(+Hzl=O)dB%t(4yJD?G3gVWH*I`rfKF}V*ol)03L~<$P
z#nF0p#Qo(37gb#m>*~@}M_g4guU8&<Ubc^QAIk-Q88Az{+`cuA-9zFUBd7aSrqp5b
zWgO<%lC)4#NM8phLrgj=%T*N~ndev3&OI}^z1p<K-DGdiU}Vic&PF0x_EFwkqR~%s
zzH#^}!J|c9k~jj3;QBDP8kYF4%ief<5g}&UEEx4!3XIHMang6+(tf)^F-@RoeSpz{
zxMXu#WZ`m*R&C35{N`ak@8a;NuN;>?&aQmuyztdXjz@XE{(U})!eO5T2|<o6zK^&$
z@OAhHPtDLmSc4IIJ&H0|Be6a|S_hYh>NSb5aLkQhUy?%I@xIVARBaDa-!RcOzdk(q
z<{|eew&#EsPvJU`W6T>K$C_YF&6my67MYN&!f>4zd}y!K*{*%j4#ZBO9mc~e*6s{#
zNQjKdr)^2a2VFxa78e*~oy0#xM|bu5Y}p?tW6U64!gQX<J*QYH1|DCzT=|ofMS`6o
zcVQ~|9!3?m*S$HxuyYh*GucdUl5V#6cbmaxJ2L|KRu(>F8txOl;Jqff#*W4y$XcXZ
zDmTUVfQt7X=YW>#B{VLaivc8#+>Y~m*DiixzqYd(l^32Xp_2pNc-%?)9SuLHczgIp
z<yE&Wz7*jvDigA1n!W>R8}|>0Rqu?%*mQ9hPjzh?3Z;Svl*&u%v1G3%4VTM!3tqMw
zxJFdXcPn#XQm+HA=!(3NOt3$eLIR5-jH3)hhZEwC>B_LDzdii&ra?lreU0zvDp^mN
zy&cbw;-qP|%eA}_?&aQC%g-b3RoF#G7jXyeh8JdaB|Gy4r8I3OBwZ+`4USsk8w=`}
zsFg^wGck>_-A&;tYDj7MoUEul9DET^h!?#g`~}pUy+fF9Gg?tY?&ed+#K~^NymH3w
z@Zst@Gxlzb<sS1><-qBrjF8f$kBspK!|GiIsYCaIH1K=J!sZ)w+pfJ$$tx;|*Q7{f
z#B>;?aoveikLJn$OhhvN9l>a=SvBb#XK7nqs`%(ORZ^K>Fil;+b*t!sS9BwxsR(uQ
z$8&Yy?E8{}zB|bjh*9eC^hgY4Ww4!WmuZPO7VghBXgubl3XNBa;rqj4+!Hj-4bp|8
z`HF`HON5$-5~YddhxfDg9%LsE`1S<ToTm&lY>%k2dAi0_@IcCY>ZbL`L{NbU#er?}
zQirVuME*fNAx94{D77|t-OzO`(YzvDEWah&t~`dIlDuCzaTvmVUrhV^i^OcYX!Zet
zgvU$)Q$!=C`qDJ}o!#vQml%61mHo8YYBI_M>j_`11_c^~c8h-0b}O~LHU|Z<suiV#
zzkkXR|A;JZDX8cr)$1e+p3F8q!E5-;D&_KIZL}W=ePxow?wq3-QV|tbcMJ}hDiTh(
zuJ~TKOZ}7fjdaIXqU0DWSks#FXfB*;T309<X^3N+^$OKP)$?;KV76u?aWCeYTNKQ!
zzR;*f-6Af}E-`5SksDN(pKz61$+IsQ&6+Q+<GqpW^(oR1tJd>PJbOK1E{JT8il`^-
z+{|LwERahat}N`5_K?VP8~BSQlaI>2+%bn{Iy{#DvWgMuE3FxKAg`mdzmPs>Gv%?2
z!$Gx9v5B?zYKH;uE4_!Nb#@da)zN$p&##3cLgQRY!jC}V7e*!DXr#^`;lE<9qbRFO
zh73y4x2H2)_}b&Ko-a;hC(SO1@8X^8k6T4LlhEq%p$q$i0c^?0&Edzjt>FV(pLaZ$
z8D2RJ8?dOXno){v#jRZ&Lesm{t9*5D7oUzxBGzw~&%m&&$0&AIKL4VN=!3F;46Ey}
zf@O(wXjchT^Xk7<p^Nkgwug9b_xLag#gy00;dAsBcQSylqnjp@t1B}S?j+B??WOe~
zAL%K?D5Wg+)0pHetf)~ll`z?@LtOr0w=o8J#s}<m8wDm!(DX}K61keq!{^V-?6tSv
zG9bL&zZt8IXG+ar+4j^}Gwcy)IHVzIIY1F8%x>bd8@$jOntu)_m&|kDG%kcTdg)o0
z&YD1Xe*~IROEv=E=_-m8%5^Hz0MXz0))^BMsj^5(-jUR729+NXFvs{}{fsA>Aqt^M
zWl7&aTUkKWSj4k<U8A9uL%uiWGc7a8*ThdGtKMpU&EJ-j67Kn$@QlxuRJn4fNArui
z99dl;XHA|U)J)yd@(XfPyPjWIuJPK<icpD4@=kEk3ddlbYjE=|z9YT|2RKUKtCadW
z&=`d;CQt~n(RTG)>-Q~lI6R*GvUy8gAPy|`xH9j%%w`xvW!G(19T`#Sinz{L&F{hN
znQP~KY2r3EiZ69CZ)4`{sS9hav<VRwiya9rGG!A;d5@O89M}-G&?hB$Cn0^$n5=Sp
zIW&h|OY43VR*CUAA%&h`lXV+kj4fxhT-B4ka!o&c(_l7%XpMWT$)u^D0w``mD1Y>5
zMDnvf+5GIdO__<c^2{e;>^*_=s!@W@4;Ost0a@wGRPol^1(n0;{7Tz`%HI|*8{pgw
z>&dnVPhEcY7HxldTXe=6{<&Mo=mN(#&G}8s1s?1?-l)E-SKg3E<hTm^yi0nkrnu)y
zSwSnQCJ`UMZFX<cH5Yz=Z#wy2uWK=k44gHgmW$iIKrQvG9fuDdA=_-Zvvw}BczJe4
za;DEnA%gt8YHx8sS6&j-2V=VBi~N_|lz<yqLRWv{Lp+4%@#`j7(bj#4dYF69+dZAL
zdl+5+v!6@!y}kdzp8AtL?Yi9Al$ZuY4Iguu&jIk&y_X5mdSlOXqRc)P+_%eO(h($?
z_#C)*B$2akT1D2g7Yk16^9H|vYD)@C1zojFS={Y_1+If(^7?QAzsomT#H00sG|wkZ
z@G=a%=j#oMRrH=@oLZc~5mrvK#4o(jbTzHF&vd;ewm2SdBkoOWpGxCf%jQE;MyYjz
zpT<?h0Z_D8V+bBR**6@I3^+2+twY4n_g7j!87ZV@ysu86qD(2+!MUzW*)&k45D*)=
zF~LA@u_o%A`*4jBZmw=w8NI>Cw#gkEy;5*x>_@=XSEv4hSGRSg#^sodTbA(KG-y(1
zm)!1BU8NNPHiiG(W)Z$-A#kqlZeYjy$B0>O{F$_&w^UyD*UtZZg#O}y@kk0Ye#3O=
zm7@Ls%#=mXRyh8>pmLnq{G*_9x+LrWcz3_p+4>DKkvqo!HwBfmdrp2Qa{r#Z{r)EG
zU*>Me#2=L<qH;Hgz+Z3fe|+In`KRiQL%;Cx>)ra0HOj4p+LF7<dGGcG2UH*7Muo-c
z-wo@EQR|)XbOK37Gd%TwkAd9E(xBzUz<8@M=7&i@*MQBQ*C=d?m+(gpBdq@IrstaZ
zi$f+N#M%|%0$3?OY^Q&(Rfafu6!(_zXGZ3Uy`uoF48?BI(&rkoy`cp>OJ(<xP0b<~
zBT41;#v=6isM*HBP7^C?ulAz(;g9RwlNq%4%B3A@3xcy+AZ~2$*`E&-e4!m8$n3Q1
z!+2O7F#2<2f2L>RV#s6~JHE+A?9=^?C4H%l^u6G<`z4Q32*>vFzTWO`KUlBH2sD>V
zBG3GBm)$(^LQ<{no8<Y3!nwMdcqeb1G(X20f}H|?^W5gN7u1!i0(}pHsWaRPuj&l9
z)P9V7wFe_bXzLXBR0UMI+(=QHCS*htEU48jmgEykp}5%?x#yi5(}Xs{`L=~OF?X^?
z<W8Tn-0Kl$W`+tHG`<;UI$Y@s^$!<?vR`kq*-l?%PpPoD(Dr)s65_&}{5^i6oVWh>
zyoS^Vv?C0$hW+1{QVMA1=)|a<gP}9V-i<DsGBJ7q>Un(U8;e({1T^jPL=dN%uI%%#
zF`46b37sPXs#4R1%sp=gtEBs-1_lf;8tB3M+hL;pb|w(U`y!c4d|pECb$*riSkT0q
zg)#~R2=NV+AyZdExLOO{lMB2BjSJJWGB`md3)R#wRRb9cRXdl=_Vvc#hVDdqJV$H-
zwp?Ur8<J*I%08RpOjlVx*+Ame8g4K@zC(qZ{Sz+*VS=WdNM0MggrOrwbn^m^krgL>
z_*3n44sli%vq#Oi(iL5ItI8*rG!2{?FB5{xB|z6&j(VGNQY=Gj&&goIZ_q5<#LOG$
zNMOzXh*82(J6E0!f3lP6pX9Y`W0^RjfScLeGjar*h$y7+w=q(X8+cLF_1&Q(^*OhS
z2+_#+twzF%$E<<P71{eVk4Cl{o1cnB9LP?FI{hR*tXS=t+@u4gJbt;>?a26ToDd;X
z`I!SFw`HH^jacIMVacY>cp5gDK10!;;Zqn<O-Zrsit+)I$vRSQL$qQvxZN>D4@&nx
zP+G{AzqQrg`2e>bo6@?LW`P-FBlygeifkcG2Kqq3Q~nJvW!asg$~+V9=>Fy-BgOB8
zeW}qZX<i}vZcfz>x1kh4tVy)fbnIFFQD!eZ^dwAh(=i^R8AtQ*Q8-;3;mOj|Gw&0C
zv^MyV!|+x~&)d})nb;agZ<+E5bl!EodVh8M%a1jx#8`<~aWV=_=bY%;c=IU+YcbtR
z+cM7?WlCkF)FGcQ+!EXrlzSQ;WkOr2N-t&FpBnT<=!(HE<;At?qjSxLOs0{Qyt@n6
zm7+NM!o%wdV@f|fqsAFwMdve=3FR$u)CuxzXq2pt4`7MBpunubhwX|}d@&~2`tq*)
z{)d&m`-bYu3*^_SYxT_6NOob%`ZO<-hb-GdR8px}Q^Z?YU|1J+@ICFIHy3!iqsyyD
ze0!(tnI_U)%Q!qP!^L1al;0z$d)f=&Ae);6M(O0A<Ps#*Ty%Kq;B-0OjJMG0-Z8I5
z5~^+1(QXl08ao9IL<ibAzfagVc<@4KW*~XW!=cA8j*oheBkb^7nAP$uPQmIOSWm=G
zv6AIseqoWQLSg0)C2D3Y8oACvHUn?Dp4YD}2r<!yKF-<Sg-w|`^!vHhAeI~KUbSdS
zb=`k_rQ&C{;`tCQ!+pftE=TdufqYolyzXIfbs?}W)R^WrOKh!lXg1z$qJY4QI)qc}
zvqo8A+n0#LP0*q`879Pu=fp|%^4{W%v<s)upiOS`Wt?loJy*orMNLFEU|S#FK@^0+
zSiR37ibu#@OQoDGBEPTwQh0d@+Zyp1P0e7mA;N!6obr|X1K;?AE^4j|N_6F~379gp
zY4|b+A%2FuMX-JSrq-hGEN=SX&lZ*{9bCpbCf1ig>AHgnUlOk5RyScxO3KR{M-#Tl
zfN%PJ)trE?w>NlR6zyuI_$K+9*Mr&L%fdwn@wKlW#EL^LQnkv{`Q<{;x&l=Oi9Jd1
zh+c0QDXFOSNWJ^b5E!u+P&B|Khhxg(jbF{|BlkFO$Qv47YKyH&&iAl0#eOa-<_#_t
z6T8u}<p)cSxRwNFt4E_xWwZq^ZVN0VYqh)PQd~7iWd6QcBKU@SDumF6KhJd?mg|Pz
z``~Nwyj?9My}YoxVA%xc;*}nFDbw}A)wQ)R3*_2gAKk^`&vlY$kAIKutfYDc&HfPy
z$m8;eun<0T&Bqq(cULj<m&q*-YipJ6Q)sa?-kitzg7>s9?kh9SNV9bL{Nf9{+TdW!
zg3n)?4hX?&fp0bjB?BC5#k;AOK#Pb+TcPe+tim_0W<2pXY?ND_o8Av%u}EuU$!vUi
zv%TWh=mN{K1|hEJqm}%}x|;dDMIDc8_eWmTK4cSf(Pto)t{o@NMl@2zip`gXGoVf1
zzj5EATdK^v(^w<H$=Ekej$$#^P>jg0Sv|@}D?anS)$+=fpXN6l&752F)AJtTg>(+Z
zRyn`*r%LtB9oT``8}hsjdPll@o$>Is9%SMs?3_yjRUEf9i{j8jU380Um&o{Q3f<(W
zTBAHdh|^Vizp^o;G3u>6(-WL*#<>xYkgJsZQ#V|jQ*zIs`n$q@x(4$UdGKY$CiG;V
zbzeP;mPadQ%nk#%A1_fFtf4z^GbpI82FdnZq{-N(1GNXJ&#rJ$T=4CDov(tP?(;-c
z-KZ+Ea;0?icDH1ST>eades$wYeniFWMnThg5wy&Gx}Zu<tOq4)y;YG~7_N!*2-V9&
zo?k!onr21UMY8(6CI2~Myj4oRRMxie&iWJC$DsG2FDGY7iWxw_cZpe;YK7lu2Z!Yv
z7bL-z&m%^gSnMS5nhiXnJIcjuOCM>DG;+5bJz7)dbXlytWggNwoOH`<S=2$Ct?e8n
z%FB+5Jy9rRMA*kW<@t4$+*Xdq=4n>_{C=!C5qG!+vaI%NzJ%lz6T817FBEb(AQru(
zy3J%xgm?sH!Bcd-lWZ3J^2&F#?|mr+pO(&vid#I*`2HE#Xri*i@Qn7`c;AQnXzGU-
zBdSeFuH8AzhJqRiWL;b5pOE5yd#Qrp_$-q8itoW;vP2;FXlN4}J@W&?Cj;uq;-*3c
zcUE*kZG+A;Q!Un|Nlc2VcOI%S$|ilzmveCFXy~n;B>x!)tQ?`_$bPF@{n71nhR@Y+
znx;ku9`~r!)45ce`s0N=(#4Dwr&>M^rX#MWKj=x!dHSB5t-oNMYQN|5`E1#$AzJ84
zGhW_E;9Hsyuhnp)f`wt;R_(Tr?copeB*+$4*h+4!9SU*rJ@0ObqYjx=(v#r^M;Kb|
zKg3$nWADb0d{e&rnm5z4jGq-vN>lb0to0W6%Fq{km22Ji<HCe02%H?QJnWB2x~zUL
zJ&>QapXNnB;nPHRib;965&ro+?l8)aZAP(EIA4t;-qD>emqmzr?Yq>zXQ!igj+ifc
zLCcU9HAg>&+ga<JmeZOV(VU;RCTI!etFaUQ9JVqfYVuWnqP1f!|G_8iyK;0rg7=m~
z(~K5oh1aiCyVfsxI5$m)$r*CjAmH~$9DID*>(j4v;IhOrf3N6H(0{-rRX?He>8P+c
z-o29}+TjUB-BR48o~E3A$@<&O(N(3E@k1fs>Bva(ZcGzhmbN6ML!)O6>K{qDS6oYT
zZ!{)yAyqq<j;&%Ux?icSb9~TBY+i7pbwoIoe0mrwb6~-<(uBIHfiEtVx{bDuJY@f3
z-ja}a%t&=2Il}Opi}}u6;8h&GuMb+E)>jDZr067+)Oo?%$lrc={5H{#J`K+)EMK3~
z4D0;F8f;^=D5I?YsF*O(|INq~)17(cq`WnEu&Iyo!TMX5UccqGx8dk_F|U2ks`8dP
zc(3<Wch<%}46*rqusY2TGs<%4;bS7t4ddI5wPT(RG1B6n%*hzHL~%m9;%@72DN41k
z-bq2v;5(8If1aU~?m&nqwez`ml+USAeKV><;OKmcx_i}#kLmo3DK~E{D{pEJrAj;8
zS2y~jbi_wZmY~}=tS|WDgcF-`GVBf3JR`mPU4B>U{MW;_ajz{{bFGpJCvU(&lY8lV
z$eYej#x!KLAD$3h&NI5+oBXK)U5@3`v#S0!Hf{c&U@Ygn_aekk_Gr5tKkb#;EPxrk
zJ`UEYg}M)T9cf0qm#4y(niSj|SiJPE)s&GDqxMB3;rdTOWxwqK2R%X3490+;trN+l
zN2_mff6~l89TR9e;&0zF*&=(&9{e?ul(bQmh2+`{-#PElz|mmP%n^*8Bwm{ANNU2-
z(<O;vi;waS@j2h%pBVL@yMq2D@jJWa<Ub^Sr}fY?8*fCB`5TZ6{%?t&=y84SOi@wn
zEV|>w@A%-i|NVH=V~nWW?04dKrc`$(2mC)y{LWT7PZK|g7(nY^7VWAoyi5Ajwj*iU
zY}CX_8t$L}h`ICnm!xw~1(kI=Hqo&j$jSi|=$XmyhUbf~<-EJ?Mw2me_~Kya(|5<o
zuaWh7PL~3;mZJqfiqi|rzu@}ea2NZiC%(K}_AthiwtH9drk~T54l`~w0=qojh*^%t
zAFd45!PeE#>BA4()CLERbS&w#ZFy@nef0E$hkfG0CZhyNov$3*a%e4G#!XavL4FPk
z*D)~B5o@DFPpw{?)UP}zm3rkv_nNSCPy7Yxq{z*v+4z8jjkaV5o5_K=z3wQMuzW?4
z$or*eL6MI4yqKwje#Tbbi&yIQM7u4MLg~N_9w=i}2*C+bB-zFUXMC6D!}w+~apx;+
zAb6ebQZ0@^I`l@xus!JuHMp3tb=KZwpYPJwxtz5~9Wfy<!gb=C<G9TwO8WG~<!K5C
z7UG7CFH!m4QyHH?XpT7@8tv^G4}7$K+OX*|EPs1{Q&Cc-15?TVIt6nO+5IP?hHNrW
z75JT|+y^CgJ(l!Q5wyNmTMu8ocyvu6T56-Sx@u1$Gbd4x`t|TjqwD3%RpbX(3x;Z}
z=&fhuT;7DYQYMi3b$@^FRQZZjOKdT8@e#G<L&gV^57MhJ7nbJcKh^iHjpDL@<$BX<
zO!eHb=0_6&I}J{b3dP!!^vPbcEvqyhoab<(`PBj`C(H?7jOPf-WcO<~q=b@bGp00n
z=ASJ5(Bf&~l-HU4CcbUvT0vsMsi7#x4Bpa;Xx0i$Ogylnbvj%tH@~YG;4@sMT5)Th
zPy-xesm~35vsYhY2d-kO;CMOb1`#U%EXf4g>u7!b<rx>oNb(B=>-Cq`oExlrhCv?m
z_aQe7>lA|WeBBc6I_g0_1fh9d#VLD<AolzqE%^N)*}l)y=YWat%ljeDRRip%4cpt-
zV`hagynAovSGZ&d)3hivua_zEHoEmLV?7iw@lPD{9oXFE@tU2O|0W-xsx23Qu8kJI
z@Wlo472oq&y7SG->9|(x44#mtoy;3;OdUAX6YigATE*2(F?78IAA^p}ILrJ$n|cV5
z;%t3;Pn0*OGBIPJNWrRMw$LiP#V?`s^5=rT$9P}eGTxg7npLMTJh$_K*Lw5H%=lIO
zwZT8~J{*0)_(q|h@*rE1xwCzG`eXYo&hug(WHD6JW|mYNVVlV*l$aIqP?|&R!7)-b
zzS!pJOHu7bZ${?(Zq}~H)*F6+G>LS6f@KZViQ(guU4BQ^2XQ0>>E{H;K_0#{StV#-
z*CMg;Mz~w=AO4v5fwm>i!oNMB1KMF-3^5JJ<kQF2OuKr738GXy?5eui@<D85IgM0H
zMa+|rhhl9km_O@h>P1V!^-nuLuG$l@$K*NB*(}v<^9^kCJ#+<=ZH6J<5j?*@q<B?-
zR*o8{0^Y)HOj3A$J?%+2U#Fu%D|6=s)zYFV;q#tswBy9lZ0U(MQ^Qm|3H^lNM|3S_
z<SRlmobzi|YlY<F#NQkeL<_q06EA(b5%#@p+3vA%)`Ht`@|F}Sd`zeua&r%n-tND<
z#NI!9f0589GhqKUmt(Itfn*+c7R!ZZ5WVPOrOG)UD+9mT@bG%7UHN3Zyw4o(yQ(N&
z>&P#9-Fp_p!RHtyc$J|8E}n`>7(Nv<@R~Lc8&@n)P(-GYBG`?6>3(LV{X?~=hz5CC
zD460?Ty}dR(dybf5y?~DpJ>cSIZ7^J!aw;ej&j;Huk;1b(%K}yMaW*7kBZf-Ug_+|
z!_;KD8gb+-NZA|Gbv0Q{eB>OOX8!r4o(E9N=-@on(ppi{%;-bh<~I`wp`0%RgCB>g
z^Y#!Q-tOQYxdky}mBf0s@73nLS?=GU_W-AqXg$bOGod%mP&7zlc-cN{Nzobi$t0i5
zgxoDb91VIOxM!dC=gp3<`(HF~`et4uccF@J!u;7|U4(;|c_jx@tVD5xrHrJ=2;@e(
zM&E@#^N2$ETXCa@&!x~?uOv1}Y%gzqyWPrC5kgd?FO8GZr&<l1PBrv2PD@AMN08&C
zMp0hE2gPRH9h?Gu@2d~4T`TdpH5?Y6d;i0VYj%YFg?0ZsY-ny8Q96VL_A~mmGFw>$
zf_?VrkXyTxMfKMiuuMbe+M#@t+SE*WMRCHfWi)rju*fawWXeU#l3PtgEQL%ruX_%~
zJo4%%_6cUeQQ14N41+R6=_B4%Ih)PzBn@8jQeJXVK$NN(z(u-ZbfyUlbJ@qoi6Hm5
zP1`|J^HbfAZ3xv5#^0`kB)hJJC$AztwjR95`aWK~R=R7p$ZWb|9Fy9rNo<oeu2E5R
zYtti2X+VJI_M5iK`S0dB-{}S(*k0!NQuV^K!yY>+T;DH5%1y}9qVp2=l>DTNlyQ4p
zs_r_$IbTmBO5YrBq9I`q-5i&n_V4;*@N+zE=wipFs^7FmU}rh1&hTMYi47UAq&%w}
zZGaC2X`n@FXsM`L)%1Z(w6oZB?BEjYXn}ovG1Y|Ax#N<#RrM_M<B};j8YQDTWhoHe
zA}#f>s0Rw;&%?=iF5;|f?L9?g_mIf6Jy8OunTDmzS1j7Wn>SXp?>EQwrj|RBsFkav
z1joE8?3}v2rym~J6L4@vN;c8}u|%Vuc<v2T8bjZZnT<S$r5~|g4mw4^w1g&6iLuJP
zwh<o-(NXZ~pn1|mgy>SQ;Yvi>oJjn|A@N$lU3S_;qIEAWH}$8Xo$4Lb(C#F|oA*1q
zAJ`W=ZyT%ENSaZ~2l#sxY+i-h4w{WpVwX|&O8V+JKM3t=FKi!?wDJ1<9+#Bf#lFpD
zDoWW<pt(TkoD!q_Ck!<GQJOACx`Fjr+@cQ}e0v&cH<E^`wC~{~1XKH6_=FmC{f&#Z
z`U+$}^DesHO6BeLu%#JzqPu&cFmQkkn|S>|6$&*Hl5N?qj%*^Mo_UH7Hx*i@K_`9U
zS@xVwa+Bz_tEC#&Ic3W20&ISZ$&|j&s&N-5E`VQAZG2wxvy*<{<4a)#G0ML8=(4=P
z(G~2%CiM=pMcC76sm_7>F^4QO4|Ly!$<kwQd`(RhFr0qYn`Kg-H-S~fg>gf79k<l4
zK)IhDL)G6*reXhywEputP8hY-UV@k-`THNKH{$4uL3^FJdt4VBT#F>(8gyv0V?qP(
zLvBq6h*5uA*j;)~W5N*AKf76L+axuQp#O%c&ie%O$E}_pH=yilB~q`N9}KGQa4fV@
zeXRMi&}izZ>(abhlx-NJewSD5AxJsx8=lTP0qr-p()<WnpED79;MHloD#?$YBu<DS
ztolZsEj=g>+Ov8dHD8fJ-$wqW`kw5DuW&X{s>5Utty61hKQM~SP+TZcRqf`iyj+`)
zmP!(FPrzcVSQW?O<z=Oidb~Fy&ZQzh2o>IPzu9(Pr*5p-4S`-EIh@_#@Qut`o-+^c
zA5tKl5z!odDk+B1N@?(VSvVgWwizwcW7?t*qyIEU{`x&hUQq_Ttye6j+3Cv<t-9r>
zDWuJ`o<=R&Yd2<}lMP2cai?c5gwu10n6RWozlJFDwG0k$?Y{6#q`}lS>r9~=jLNl%
zedgp<7|d)hH=)C6n`D8RJ!6Wv`?g#HZ~xjiS<bNUStID&8r896$~>iv3efF%ufSJf
zmyDBU5+fN~C|k{*#k|&MQ48Yw^nfWdu+I3lMe!phS(`7bPa>OLl^1ih5Zy=acL%@i
zHvD)eS6Nk{5G8zc1pjd4y_z=F$i;jceV;+08jg{qhK2cf%}sbcL$I>*k_R!yT{s0A
zUtQ}(<ASCj^YaqecB`0Q4u4$aG_LXg0w)TM>02lQH7s1_Nq`n}X;}OSysQ877IXI=
zf#Zr{+g7A~O&@xy)ttHBhYL#BmDMfxutUT24>^TDzWb<W-%`)Wu!@n{F@g1v`!mZ%
zk6ML_`-=m=z0T)Hd9Suaq{hG1KWi5;HCtW_$sT#MIxU#$;=4Dsx+W0q+vL&+^1kyk
zouJ}LO4zLEXKZPuWv!mO)XJ+3<KwuJmX(1=T+TbBM`;@#ggsEc2<xSq%E$O1os7~A
zDT^L5m4C^C&hF~}4_Od0NBFb+1swbQf0qUE|But%L{C<Zg7pmyLBPsS5kWy-F+LFx
z+}*+9k5#0{DF163^gH+YKR$lokE-0SG>HGN)1b$OH}1|<AAfh4M{_8yYcB`!ai^ry
zqnImsSzGh+C*12{R%uU^c%*$GKP3;bq;Fe1ccsN3&g{3@p0T$Xm-y_zWpdA{#o07(
zac-*eU5*L^&H<=LgmkC(lV?50$m+nDKMhv%?x!O^n-_V$pT`uv%ls!8pH?g?)F13j
zy>)sO6RJ`NuAmvkdU(Jw`c_%l_^5%f)$Eg6DR)ox{1P_Z;;zy%k=9%GR_)o2FY{J@
z)4HSrIO&@8cWf>Bit~%|y`7ir(j&#`?r)Zft+o^|7I}w;A;fNYFmyYoe5?A#ikGeG
zEqw8Au~?kS#FY0&RDrOlc?jq2nxU+T@Wow`9~NnDOStey2QQ>cUQ>P-w=Hez%hvX;
zYan|05*=#)SVU6iQq3);@IIRt7FUN9u3k~YD;kbwf#rWpp*vi(TOu9f#lM!sF{Uf4
z<zC*;F8S1?#1Q6M7#(240QG(J5NFGzVd`iwpJ83bE=$*>`%ZP)rJA5`1~^7jItE82
z%EXpxb_<CK>)20-KQlKpmG~83^BT`GAjzmx^m)Bzqmy#UkjV2zK-lW|W047K@{B$7
zWLmC3bB3o`Z|UD&$=QBoZ|W8xqFc-5@x5n#zpQcc`u)(S*Ek<SG$qTmGiybA^<9)C
z)mBD~4auM6OV4(bgVFV#b8z3{AYy+kEi^<}LuyZbHSLDc#Zhjpotri~Kg!5x3|k#N
zqqxUbgJFsAKu`|UlQ5WiuRvP2Ytv%Q&`h6y6C=qx(ysC*npoz6pnx^BprZ21@D($@
zhYG8M-S~AGT}@>XVXF#NgBaTy{%rz9w&sIej1S9_eP<qqd>h)hec5ALcXArXJ+I^&
z<Xv{Cv8&E7rEa3`yOmm%n|k0H8-+T5q4XQ!PrV=?^l4pUzU|-8(hPCc{^${M5GH{$
zYOgQ02fGp!ilFb2y^R?hqdw)Q&LYCj7O?80z2mwU0nSgI&10PpxleUYTVP~0Zwq}_
zc{E_t=HTwFNQcW?+u2iSJArI5)u2&KB@6HbyTRN$>djdoSR11<C6ME4UfE`sNl>7)
zx=xP$x*ngi(`%Nkcj|S#jy?GTE+o|0NqA2H?V~JHK4xUkvrA4%Mc1frh%J06VC|F`
zWaGed(-D|T<%n*?-Pvv>6g<Z!JPdgT|E|2hKbN9)E1{nGArD2@J?rfds$$Qi?D>OG
zhghb#CoNfA`9}nUSVFwIp*SP@=yZ5Ee(O`JYb($O6|)!1cl#ylnMYnp6qo>~@MVQg
zwqfxRyt8TQnR(3P^>hmzt`=MWQbZ`s`9XYaKj|a~Lt1v}^Ka$CJXVAK#;hv!e9CIp
zeXS;vZthb0DS?fhx6}BtjX<&q;J7{S>8X~ksg~*;GFhhJsh_e@w%mA$7kcIgqO#?p
z*A+Ym0<5RTX;bn-R5h{1N_%AQT|57b{OUD)6-M<I3{v%$Oc{A|<sb?}*Ymg5?@@lU
z>xd$|@qze3x08xQqJZ2zUb<Mhkvu+P(|7xQ-=B$wrG70XO9?p7)8|5Oo8G1yp3yaH
zJF4G|vyuBbv|fMgPLS1M$M}}cOM}ky(N^|VBFdDp9knhP=a&`qrkv8+zb?DGIfQNY
zD_Rqk4_uSpLH|5g1icr}vCCgEE!S|tM>@Rpn*)U_hbMft2$w}@7sIgBK5JB9nVY(`
zhoIDkY^!lb{h}g6{{9RfWmmp+MR?*sUj~e%ncxvq%nu{9nwZ!0&|I8GnhVVm5w_j!
zF{2~`Wv#kcS{A(s9id8b3*6Nh`-TShR|a_V#N4#b6(LN_30AJM%=s6em1|5HTzUR<
z+a#)i>75qceeK{6ahH=R!#9)hu|F5b@)mvFtES*z^a#$ZArx#dVW3e8dwcPPaYK61
zQ5b&D(X~_+)}`8?FD4(}Soc5JkLtqRTHt(TegH<F$2JT7XpAOJz!s8aLQjT;d0FZw
z)GQ$8${jK;;|IDT9IWjvXr{zuFF}Ns2*peU4>ndQXPggZv4U+*Hh1qm1jcamYOoD4
z=o3M6<8n)iiDrKc>!x2hg7%FBEu|QF7hdkI-8Q|i-m%HbA=51%J(!7z{Rw)M3tFRu
z3luNX?bkbBQ8+A}j@3~cyJ>wd<mE#Q$=FHd?;87tb4))nGJK*3`D7AcOCRPBzew<y
z{mdY9m+>~5#OC&5&Z4{EW7^sDxEOmCf}7c4T8+s~IYf!D9z$D8S71xLJEd+UWIfx=
z|6-82+vSoj8l2+U6-gHdE-_HcXRg*Q{m|!}^Vi$RdbAfvUOkLcP&c5zXDMZ0XHeIp
zfxbJ(H9gbWT+oSK@t$x`p+EYnx*98`5x8Yx*9|QHDtakaLx<nMlv=!7!uQ+DdBV`U
zltx2}?&9SJlaSr(hJ!^zPPj8N3CmsXI4j(0{C84`uxri72Oc~wO!;;jheYgvzqxOi
z-rwNuh@R$i2G{#H#n$`Jg?ykH+5h0{m}6<|z)04DNKa721d+^j4iYIyoAG{{yCs4C
zI8BO(Z=uAfX(CcrIOh|NmA18jvdhEX_xSTB`u9?lQvLN?->h`a9tHU?N4v`OT$3~i
zVIxa2%sPY>L?jA`DCTo$VG&`;9lH5m!@@Q2dP~>63n^*&fo<>9`Stk@(SW!_Jmf~7
zO6+q*Ip<z^gRb2NOjzss2FZ8dgJM70(lk99yAZ+pjthL})i}oH{fE&9OO?;~@^g+N
zY!$Az(79%Ga+YFf;rD&Lx|3EYFe#q=X~JZ>=1O1kTqC5PI%$E?gUJndJENe2;@nS=
zztnPO7yaB~5E_LrU%*;9*)(oI|Bda7Ypw(M!JV~(4aST+@=PJ$hj+y};@|rmZdZDQ
zUx7M53Qrij?+uX}^{&CU#Llj3k$QBUbo;q@2U=xWnnk<-L3R=?<cCx#XDid*z?-Fd
z;A_l#yq^iJ11?D@Qu<NR#*&e-2yZVCZ-GHJ7RBe}ntV$V??(-LpHpZyo*eWjPJ6%z
z&M|glu$3mC9S38eEk1vJhefn`cz4?{;$Cngk#|;E9t9{+CRNO*sQryI)I=nENh!o4
z7s|3}>RUSRhpp{#=^oCY4ij7`D>W?jA@{f}#M$`eJT<)c%DesB<dQQjv=HTM_qn($
z<e>()WV5=UyEpCizl<db7&~2NN@g(ZKajt8Z68hNmPJPG%pu7aIUnMj*V4J0Oy(V$
z0SA~%W!cmiHsphZ3w10cWUe0xrsG(o=ta{uMrjP*29eG22+Dq49Q978SbWTkVIFc7
z*W>*BIyQPg^qr=U96!|O+;By*X@V8($#6&ZGc}w?lREcly#!a*<c4dvTkPxbo)cZF
zwmv*uuG;s}8dFTXV!@4eIr_X*sM$y?LG%;$46_ZDv7zB>O(7m;3L=Su@1qHECM@J9
zW!T6>=@`4^J)UNCPIkJf$5$0AdBHyL-R>6?6kJzWt>fBsSpC2^@gjAmeF*LnM%7}|
zlbX0l!!T_W{@SHZ8}G_?5t)^oJ5^DGu0l~qld|jPHHeg{yyh#RDu*}ciQR-RIOq(F
zWZ+XzK8F?4W1b^63^E9upxJl4k11jx?4jdwn~<$^cDcwU!Krb+22UV#8hz&CEkBCs
z=c1Qd--Tsjx_T9?laa{GyqdESH#XT4Z_9pt-Oh-rpCIs^%xwkUHS2W)D;s|nN8l^U
zuiwC_raFSS@M+P6TBjAYrImB_f8bYN*ilK4VOL$Q670|Lp3RC0oS^9LbNy6@XdLR;
z6I`sP*A?KgjJsrxUOrq;O6tkf&fp;PV=qE*6xZODSo+$l&xb$kOY_%4y5l{YC3$1I
z1u8@R^-~^hnIuuYhp$PF9gdaN;xg<B-1Xc9Q}SMGa;8{m<@>%AnAvnC9IBFJbpVMr
zV>G6l<vjX{21bav{m-=JkNr^pp0*sv^na!;$EO+mUlni8u8%_&Z;tW*#j3qO&QJJV
zyg5^``BlgHyOnyVG7^EfI0(5LPSHu*2?Uk{aX3S*VSId$%OC&_z%L+EBH$N0-33U%
z!4Fb~yCIw`-7VZ~o#2;$zj1n!!ry^ICW1)79Q?feAUzwX8_3ob<Yof{8LG+2NS%Rt
zx>oT&KovMkB!usD-xv}q1f&K)as_#~@`5yB!08PDu}}vP@aPwC2;b=n%Krdd@K4~U
z1BV1I@QY+ad9~lvLikWz`gfN81K?@Cj07kMk~Vj8cLO*8u)*3L*t2(rN))gD1}OjO
z8dM}uA&|6%J>1FD0cL3pbA-X&&UBq0#j1Y?D)dL!f$a5kv>?ah15!sgStFp1j=(<1
z-*f^=^T}I(1rjAK91uVNl;N&!PzMJ9+%fz!Fi|}EE0`!*;n0M+xZ5KB*=GofH-7^R
zf|3o6GsGbUR2^zz4=BhPUP4gp`75|65dk_50Vz3oo^VtTxz+S{zac30{5weDKZXZ_
z5)c5S07%Ul21hay`VUq@P~7<&kOC<206+?YbX<P}1j#zVt!%B`5yu(@0?tP{BdP)@
zru{n(kw3^5K*<Y$LkOgYuys4Zq7Lk0wRHt7wHrv*0qS~&D*`Cq{X2NkKf$9!2LLY&
z(sXx#xq_^m5Wf)p6V2HX6+m(H-w}!Zfk+S~Rvao$mbO;j$X=+pyFtwzU?4SfJD9~;
zy#w~b{x>E02V|6BahRG~!`z^5ZU|FTZjh;|CDNds+(6-$zynv*ufIh|5XIhq?E^pG
zpM5~d8^A1)zhaRfioJgY9wmDmvNlk-HIhTeS_QX&fuw<s*qXb$o#np}ilu)8TnHs}
z90DM~9RKD3u-p5;@cJJdKz;g^fI|Q<NXL39uLk;uS{{E#7U{PBoi6@Ac#8T8EfON+
z4-@wXkto*u4M@}{WC{L*g+qPs@2}`XedGcF<_<qK8Llvd2MqC>g~uPv_}dO2pCo(i
z)CB-Lqz!XDR_J4=4zTjnj2@efGYI~xrt$v~gg{US&LTM~h#c#)jtHdc^oI<gj*a>c
zy!Z!Xl&AoZg+N+RM`u8JrJbD}Y%Punz5=oUVie#cDByU2-^8PK#Qz5rf4ckQLlTa;
zBM8z)K;f>?;{eJPgw$YNq|I|e4#FAe|6RlW0Uaeq9D;!O$~pnQ+3m!h0?>ij0)f2w
z4I8yp<v&pVr~5}O8zKN;LqJ;Y=5A-mKLZoR!+)3Lf51eUasZ|PNC${Akw)RzI-E5G
zsHyt@K<A%+P!uIg08Bv?OV79uRKegs0Q=KBq0$0mr-cA}?%-zYW&<4hWeKzVBL+GH
zA2s#=ckof>nM3F#J_X<c{sZZzezWvf)cx-){RhP;dEpQOL;!g<#fcZ?b%Z)Q1Kj2P
z?eSDjo}591V%=XL65vBwX8;l8EFuR0;TbG9f!kjw1bzkQe}w`iO8^Q1kc>7026*q&
z$m<M96c7IuB;*el2Ygfq6~t;g0l_9n4PgmGAcYR8f4^8`@jIFk0BT47#eYElupk0_
zzZ6gzFcT-i5fU;o68Ixg68IIO|2x%xq%;Ejznt4K6F_pdP;0moU|B6d+D=Xmzhy<>
zSCIZ!V1KEfG$4JhPVNYcGjb=ukK)z8!}`PY2=Jq1heHOWsHy`}00s*r=ValIOgT;%
zansfU26sKfT7DF_{ssXA6*F<j0D^sLmCbD(Y~8$%2Tu-WWeW$QM<@6hcKnLp{~g00
z)<^(?5+ncv1Zg?U0lFOlGwO_t=6(wt1jV+$0*VqJ0H^>87RVM3(r|~u0c-NxkqG>X
z;Qt0LkmjF=B>-0tX)|oCPQBEb?*9tl{~h8VmO}tIKk*ktA*Ay#2lxqS25Jz`!bNfF
zui&D@2%uCLfa~A@1jEQ-0y$azD{vIY{t7rsgaF_oAZcrB1kCzG+VlWR4TKw(Fd!4M
zM5YmcMsWha0`|W_BlueZc|}hg_sL|!Z>bPOvG1?op+pEETMP)25I_?B4+9~HV%xvN
z`y<T}5d5uuyntm<aCdV@zyO{0utf$G$DGziPAVL$;92V?bgES#ad86hX`z~543L|@
zT(6s~f8*{aVWL+DdJ7!ggNBJd>`!w?9cwC-KNSt!zc~>4A$!=t^4(BQ$xnZD3?HYb
zE4Nwj&_VlC(hXyd@Ks9({HZ*s%?gPwcd@=#$^6?7yg!EmzAE*4gwH#K&wKBh40}`|
zsv)Y&@f{ly2Wn#r1rAZ#7i~UY6w+eYzLo4d`=k0`YOZImXi()6<Brz{U7QleT!rq{
zD4D~-SA|qP7`*{SBr9*o(;p8VreBh&U+NIImuBr(9ZeqT9lLvJSw=+BFtsBw%ikrt
z?+E8|O9?UdvW{5UkK3EtV>bH<acFX`SY)3WM&s5b{T|%Msqh?nbv3{I@d&hMbe%o&
z#g#Hje~9(lnO(_<nd;<<oo1HL9*-T<!geicq$tX2=+7f6`)Q&+QJYX!?&U_kU%1O#
z{^0zJ-0ry|`?2x|(Kd58l|+XN&=2%Do3sKbN7TF)F9te@$NJStW<qhR`tB7954<tf
zN}12hzRzg066tCpLA4ZCLfX6|bmxG8Oe>Yxt1a>%P`fSs;ioZ$roD7l?nJMWd#|YE
zTkg!f{+J=0{tzc+J|Y<U?Mf-!cYG!68&9hwx}ZW-+mma8t0UF(dLkmJgh6=blXY@@
z9&jD0iRD58^Ph&#WPK|HRoU<m`~q<`Y*stQTorlJ<&@!8b#3uRm(9O3v?(QBbrJ|=
zs+E4Fh1(oVCVzpMPEkhsOMIF<Ck{77M%8VKEs4Oie$|qPx~aVv!)S;EBpN6tc<Tqw
zEfC9=y3&QWyke{j;V&wTXEYQlb<y=be6y8n=d)V$__CC>d*R;27C+)knszN$Hs*zy
zn!^NRd0<a05eQ?yL<nDBsJHI)TRlTcgTkfco0isdTXlWW^30nqgFJZ;wLcCQT<YL&
zb!NtIU=q*oS?TDI)uBwrr<s|hl^Sui^SQA_Esmab_iM^I1Ob7NpV7c)j-W)pm<lUr
zU)8X>uj>!r)Z_%3X;?bDI)VdsR-VnLm5Hfc-(btN?;2{D2H#pprmEQcOiS$j_@bd1
z8TZV`7(GFLZ|{|8`NIVC5AildjA<s~w(Izbz0+SPA1U>4xi(*8-wLvP=}}7|lmiX$
z5_=syT7*OO0LFhrb{PW8MZ=^L7@1%!>}b+bWvc6|kSxC-o2s=q*j-9LMidh}09R-o
z?3TU`$ANsqW-~r&(Sfx!7{EO8KlXkk;2R`0PE;rH-C7-6iS*1EZ_H+!Iv{9l>}$cI
zh&5^TSGelL=Cg?|P}<DST|#57^S1EO^U`m_)vd~t0%#am_xk5aZCf((@Qu?Dk-?F&
zW^QOd#O|Osf6i`<_RKZTq^Kb%v`51moPD)`5K5f;q_5tTrDu5m=Ga?_j)11~uu@hU
z2PjUUC_iENXEV0>l8dy%UIuv=w0z$s7)mkvKDrm1jxiR@8V;O{iAAZ<zh*M9#rTmS
z0MpBG?7X;yb?;I#KMpVRj(Kgu2=O^Pk;cdQig_SJc*Zr@-sR^0<Y=>(?~B7e+Bo{g
zfz$DZ_jvt=?^5){b7weB_`P0;U+KlmW|Z1k3Z04EeYjV{%M%#lP)JweSSnl^v{Bd7
zabA{yI&UV}rB>Qb)c^9$<{}{xt>}@a`yzOTk1~MGdE}SoMsB<<{H%G0^wO}JwTLhq
z8=i0nxLnP|!W+#{NX=fdrmVEpg(w$BNBD5y^}UfsQJE+M!*eB>%5Pq<kJ@slAq>KT
zUApA#=^q$hG8aD2=5PtClX-Y@oO}2s+YPY#ygm+ECAF*RmAr1;Fzc$G*sno9p6&YI
z4ja15ydGK}dGPikx#z^hc;J`C!_}$X4LZYZpX~f%ol&^;(`cGbn?;t3t@Z7<C6oxR
zeJgZ~8Z3;=*I>k*Yrwb>S3l(peg9U=ys>-NwC1N4y0~VONKq?m2i-L70BH%4N8ZP1
zrnMHC3S(6Z??#`+`vZ}IvJ6kLK_nH@rb%c#Vcnp|gK4b7!LLVN9~G_z597bCe8@cI
zKy+7L80We{|31c*gqsU!YRSZH{FYcZCw}l4-WcA<U`-}=k5%f24HR%cm4A~bJW4fC
zZ<`PG7F&|&e13g^w6!ZUgN0jrYD)vlYqjcUU77wB>5h@KxT|5~tVu!82o||qr=_&T
z{ROS<H$Ts{7FRqH#qj3POKb`xt5m3_zoe(RkhwivskQJt%G$?sDS}v18!W+ai-gsw
zr0=36y{S>2YplwYh`*v}ev4A%EqaEwZF?51$W(Qr1yU?gey*sS1my#DeBpv~aB`1H
zN22h#!Ojj9P1VW6=TX{){_Yr#Vr4T%YE+!|?d)Z?33PKNcK9l|!B^KyI-+H$z@iac
zu^C~0a*w2~fphR(?X;K=%(Bm4?&dbS^n`}sI!LoVD}x??XI~V7drs3Ia!#k<!dPfb
zMycJh?GfKO*b=XLuYT?)>9mq}Y^!rJ@MIYX!oNNL&FUsA;|(q%V{r6_%hB7p;R$iB
z$eJIdbPGc7U%!~FAE&aldn_z{Z~f(nI6Q!1;v2}~D#2o)IFUcT1MYh&nG5!oA)b%S
zL*^Ob6aGQIhjNZ9kIywpaIFTW(jwk7Zt)-C)U`_9{U0SQ0o3%_v1|X=l9s@+_y41$
zB_N0lsQ$jB_4~+1f#VrCP0-mzb|=q(>5elr{MUmIfzvB^A;3(!u#gz9Fc9%WM8$Z;
zfcY+AVNqTo5y*cz0nr-netZC;1rV(}z^vTBf1GxB8g3nv0hHTLc=NxP+fKNFqW`~_
z+rSFH*9-nt=}iDNGkJzN@^Ij6aUkd>0L$50S-}u6U~beEWCRQem>2HkW(l(b@q;aa
z%+&%&rQE<)wqPp<uodED5(#YM4!4FP+#MaD?rz|luE4ap4cOu2kCU|%9A*zbPIdYD
zegoqNn<HQzFfb5*y1N3$V*dU`5Nv1xMIg_#Jo$6-FMu1En{c&75`nz116x@E{{eJZ
zIe;LLU(E&hz(8#X0PNz9oR9;$<NyP^IzugBV3?PM1Jv>42TOAYFx>t4$VyvlH~`ea
z0g3<vvlhqQ118{r`3oR_K6x2oYi;8O9JmQ^<#>dTuTCC9fl2|w*3};D?BMPSwg4u+
zpkNzsXB!wC3Cz~X5`4l29~i<33~&PM=II1R&g8(rRzTVdFcKiu)z%B_3NQl>h8<%9
zvL6@#%K_klE6maM<O<noFi=>6IshGnK>@S?amYzd;4P>dSQ;z?mIcd!<-rPIMX(ZB
z8GIdl1AG&#0#*g9fz`npU`?<VSR1SZ)&=W<^}z;ULtq4eViF8sCm^-vz~u?4r%zG+
zv#_<butfmrG}s+TXArIyPKZ<H9=C&n&A}F6OEAzMur=5QYzwvn+k+jzj$k<03G57Z
z0VBY!U^lQk*aPed_5yo@eU9<MPO&0R?#^Iyvvsh9f%qXJzs8gwA_|swM>qiw#eP2o
zM*8;{n?qdz3LKmOIr}xh$IoCuUC$YqiUM0gt*rr`o%}%#|4FZ`9AI9^p#n}_bw3_)
ztrK|zs-O<X(sN9wnmmYKkpDMsL6Kirj(<NU1OR%{4Rc`0FU$=LSVp8Gz#NbN9Cr&0
z%w1Z-9H9t%u<JkYoHTU;dM<RP=R#s&14~;#AAv4__=V3XkTCzLKwCO_!okO4i<CuV
z6%%aXjzDU#H`p7HDRYDqa9WT#Al|Mg`7am+5Rfrv?^C=8ODjOsjwKJ-bqAOuQqKUv
zaB#8)@_Prs2%rQT$aIlYdkB~%*b!=h)Kp6tAd;|S9XQr!q?Eh5A1eUjMB0DdK`nq<
z3)s;esZLNpQ=M&26$ROYW0)4e0k?oQBDLjLD<_1dBh=Nx9q1hl{ENO*(&0$0Kp+(Y
zXkr6(usT6``q1^4njAAq`oua&pU@$FMhK)&Mo9V>cchU<3oI=UmPL^yf6`d~q_O;&
z#`3>fDWANfeDaR+nRk?by(8~t16Dmjr*`tX+R5u`XI@u3ec@LtN8~K9vx7HS?L<*^
zPMYeRG}SrNROeS4gOeY%ZJZErFd+YqfKfAda0Me-4_F@P$<NR;ZGqVUK>xtdUm&2z
zY=8n53B?9rw2_TrCyile8UsUjBCBBAlXq-S-Z?cP$I52=>m3-t24E@*i4IxN2S7TV
zyzX=cq|@mOzgk(^dH@R)z)mMja6f72e$vqWOhflyExb>D1XLVgf;Z~%*?56p=yYP~
zj7vm%UErK%px)r^3<H6Yfeb$Z809EICtef?SY!axbL^qPvVcVg`p+Zd<Y0Mb!!$o?
ze((PkaBW*hm@AJmFn4TgaR#`+zfg{%@n4+ziF%&6re7NOOR!Mh%n=HV%qgEOZJ~}%
za6tWlM@T1k0^#`I6Q2h-2M5@h%aa$70|6Kjl*pdko`O8R0W8YN)v+0J13Li|W~ZhT
zWkG>;6dq7lZ%1c0KnyI9?hm+kKn5pZ1Pm!nuI}bQ@%h&c&@tpK(pUcBDo<{l5imf6
zVW&S?0Y2Z!^Yq5m*3lLj0RWW=gp(KQb$}woDF)#2@y!_wj_wvVC|`Vhje-xj_G8ds
zC<1|8{{mRi<Es;Ub$sdWeDdhnCLKR9cLErEd<6%}Wi~KiEKY8L>G9LsQzB2OJAM_=
z<=^0c`5)9zzmS+a{Kf;k^$%2@KooHX)$uK|yU3~?a@7)W>^LBMU|V3}1u)O;4F+5{
za&8?A_#MFJ0_Ag{rVa#Is0&-T2N-@j8G53lKn2Lv%?XGffM5p*s~iBwby6F$1+LHd
z7Z@<k-pKLOJdP}Z@JJbTfn0C^%;LK_IV0T#a%R;Q@CL{M17y$U|0?<c$N^lJEz;|t
z9OLn$W0^&fg^U4DDRct*>W%=G3IS_Qj+y@J?qpnn=)(zd{KN6p$wS~6Qlv*detdd;
z@&hu;I*oylSEr%@w>}$_0et-<7(@DS;D2BQPlgLQjvgnMr=#j|CXVw&Mu|4SKqA-5
zfq}RK>Bf;R|MUqQSRw*F?mu8c9gw2~OxU{o!4Tl~MEqS{PK55*Ap;}y>mC(R{4==v
z?cFUL+|2=7a%@DvfLJ5kkl{Ge!=RMo&#(@NYyQ77Qot$yXJR@Lfm3tz{~N7Sqjfw^
zAbzo5@#tBXiK>ksJGGMue14Fqpde~CANckGpBNwNP5>k%072aVfvnhHe-L*?$6i}Y
z?*bLAH&(8$p*Oy(pPW2Pod|y$Ru(2pUlbNzHeP!dt5`7M)VTBAYiQDy97ORS(FsMB
zq~$Y7s<VSQI5|Tzs|w{3qAxC7P>&YRt>n;p9PWy4veK6!&}h|N7uZv?Cpj@{A81|N
zX-q}4yp#{h$s|>_G0`~Bi8X%I?5mEI$sN{g>7zw9FwMf$QmKxo9*l;a{=z0`6Rm|B
zLB<r{Ylh*Z`S|8kKfhsV^CxfJ*SP5$2`#fU=s)-!iDIAnw)0crvCWDOvQ-2<y^d{J
z!7g|WM=0TX7{Pf=`CyDxduhtpHhdi44~b#wNkLMiTy9Gu+RV9vm6uC{g%^ozVhEDd
zSuwG-AtRbeuL$3(9?|8U7vAg7*Vg0?s6I5l-Z)+6Wt{xOP`a(q-B1ZVP!s*Kd6t{V
zBawMhs#@LvCa!*SPUBT&xk8Y$yhsHZuTy%HkYp_bCx%P0I1UGYGEUD_!v9m<m4{bV
zBzqlHAPg>}e7K-q#)kwXaL&2+oO2IhnLq+KEMZf^5_v!Z1PF1+GKhp|KHNayf#3p+
z44cRzYaU@ARuvxt0<suDQGz1`*#rSu=GWW3r@JrnW<K9P&-t8q>vmUHRaaG4RWG+=
zgOV1XxC2HfT$*xu_?dzinjY}1d$&|Qy)(nU<I`OyS3i7x^#@&MJ>RrS>$EDxds<w5
zaYfnMZ!EKR-5TNhs)yGl+%`MD!Fx%63#|F}<*pBWGx|3tZ{N3f{GkTL`}VZH?a}rR
zcbxP<zk)^aHJ6r$>vT)HQn9aOMMaZMl`7&J=U%Q><=E~AU)er0u6|L~+}>L*w4AZ6
z=+W2TsX0G2zr4yz3;(lP_Pr^kw>6vb&ZRS(@BK~c?xZ)WoW1vsL!Z5u+Gg?W_%;K-
zfA05>UYge9jlGqMJKj_N=F!JG|M>PpJr57Ryf^RM{GGwJ2TwH5pI*3U@Q9`*_0~Vo
zZO)w+npN#l<D-vidFJl3eHON#zNujC;nV#FFF4w;_tctR{+=}}YtNfi|EJl%eXiQ6
zq^0f8SN2lc4(PRiLYp4<7fh+uJoHUq$jrNLUi-wJHe%lXiZe^EC7TsB3Qm5NS2SVU
z`EkpO$1Oa4Wxf4DVXLQxO!{TRsjH{&Y5&XEvLofGe;c`~<?ja0KDWJlPR9{x$?f~>
zdfdEJ|NiQCz4O=FyB;~RrP9+Y#}qZJ_UYiN2PSy^m+b7@;fB5Hk2f3CJyQGPsl-8D
zroDJ*_LaKtpRQ&MC<%}F?~C~>`>(ub>4c2S&*dIow!GheJ#t~shAk;Qe(Dg~vnze%
zs1LXQIQC%c+W9kPcVF^q`_rlOZqAMyn>gy+hqDJZs55iTp~A*npKLy^{6O0B0kf8`
z{&Lp5E9v{LuK)45Gfy<1GOg{IsmGtcmS26>*r|P+)ygbu^4QlSOXuw^9cDZ+YU14V
zLmU3OzUD($Zhv>zn@`^u_sR9u*@Na>`cqt`_34dAHT$q{#iIA?t-EdU<6Yj~dH8=m
z>vVnSsUM!2eJs(r_VKLw8w&Q0*>|CI!nm4uw<|nxtnQkFX%k96UTpU~=^hQ_R$f#*
z_fVbdc+~nxR`%MP>kIeirLQ{H>fD`Q<Si+QpH-eXq1N#8?HY7HILCdg<m#3=8z&uk
zclQ@5bM8x8wz1@=>xpw)P9I?3b)sjh)P6f&{OaL~%GEErDFqYD9(i^Akm=VN|HtAT
z2g7?RI`6CJPVBa*=jG8os~>aU`t$B1r~YvG{nt`kt##KwdSXtZdwj|rJtj37RNUgr
zxi^|m{d86HIlXo^-S9<4r73H6oa~ZxH28AL#pC(83v+I4c;USfn~#^DonMh%>7A5U
zzFpX+`<T`*-}h9;{4Tk3K1$y`u*2rh&aeB+%r7fHxqRNXqQtcwE_|MpUR02Npi=ti
zWp913F8!tKvfe{(&KsZe-QBgHEb4S-`lN}?9$K9~cGTA;cEQLw7ix@H6*&KFryIW;
zTRvm$yHgK0$$oL;>0@JOjy6XZe%|BCW6g^;JDtAkIrRI%7vC+~-eBveD<^Nl?`)=(
z-bmS+`1!%#Ke1!e4+($DOCD3c@uT9El`kJD-nsNxos9)I*T4RkuWP0+ZnAD&qdN7I
zYp=Nbc*gj{%~$R``NWMcChRSH_1Yh|K0Cee_Q9k3oY|lC=feC89edrkX!7K;y7^<q
zA0AfP_`S8xmwQ+5+Ise<<UcIgv3mJ0efuB#?ayUzz13p>ri5-Iu0Gg%-;y3Hlhc|^
z9`N(r1trUdcPh=eKWAC?f<fET8cyswxW)TTzl&e=@#Ae0Gm4v5Ua_D{|CMVSE-c(!
z|J>1n#%EhsJ=N~}3)|YY-161yxjXas9;lu%wV<@*%<!Q@T9rL}ZN-=|7f*k?V0+s&
z(|Wyf?)t*=#6`7w+<9ie=ypf)jZ<R=7i~H|=^1<L=(3XXvaVJ|o8cvCBmZ8IGw9Bf
zR~NjUSh%mT(O~iqJDgrG94c9O<Kx#)6k7}W|GZ|yls7sYYcgYE&C=|hOBzle*87<!
zuh$RWe&wE;-`%W!qwd4IZzjFDXVSR`YkYFK=4)B6b<)b;^cuLR+_LKT|C&jX<P(`>
ztE^$Uc{qeN4E0c~mB;|t2LqqFWo6_I%th8oD#}vUnbP{{RxP_d)v|q;?k3h-GFWnE
zZogq!NScP>2YhJzX3YYi6lBFwfS@lamn4A!*G9*deV%Va>yH5^i~$J_@?C=IVNEAk
zJM_s$9;^(<64E3R;-V>9cFD`keip;VHA>^?LP6xs1VM`yIl}`z6L1tf!E=m&Y2u)d
z>)I%;<M%BG<Oa~2Fx(|GFAy)Mh9WJD5j59hxXDTI0Kd&2WtNGHu-((dZzw?&HvNC{
zU;jV;N9ZO0ri+^{;{468Dkktsg|ChvPI~<+w5&#<lt1`Eng0XCopVsW^3lxC>-;Lr
zZ8C=xWFocflaShXNERyUKK%a8n}5-g$ypr9IwSafc*Ws$AK@LzoerpfPCiV|iEGr6
zY&PV<+{s}aAZ(kJf$bP_7^riAo}BI+KnP7fne-YF`AeG;t0HiJs7~yRp0N722Q=xe
z8X%{q?C_T!h9iIZU{o9NnV#X22Nma>{{2CNoHKg(3CKg#hv6?dY=qBj_Yi+65fZ2L
z2i=5sV{`Vei59hWhMb-`#~=<L3{QE`LHX~L@n`O(K_!<OV^H|D*z;+IslEhE{&tAC
zjmNhM{ev(~_$QoMVWA~dy}$vTbs|2&nHY?UQ%0_vQYtRjquLIoq{|Z0K)y<mbtw=u
z=^P1-b$!|$l+`;6+Xw-6P7HdSg#V<o78QxW)DioE_?C_0bK&A93DYBvj;(4G9XB~4
zVFevEhPXx@GKYg`CK-uV|1q?#966nFMk^KFt!Sn9bifoxu2C-T@%SkZ@@;Mif~ol}
zk#iO~a>j>GEaEnjgq(|(Am_6cTP*aZRD^QQhaWjVb|@j|f+onhRAVEjSG}Uh86Q66
z;wqSgoC_Mr>1kASzXfv6A`=Ve!;hSNA2Aa)p}3?96Q&bNkI>#4IcJd}=Y05)lkZjJ
zT+oDat~j@#oU_Q0Gd}#t$@eO9E@&X9XO?47&e1W-edwqiDCn3Mr-ES4Au473MkSRI
zvqI+fh3N<B)kTcWOrKj3U#t=kboz%S;!uZ;S}Q#!?&Crl)}nWU@h#(tTe2T{3~Qx<
z3;GZdDb~tXM({{Ii$NW_pH>P{Xr%|~89I6Z-$$#0PO?w7=t{`R_bT$JeHw~=3gnFS
z7_`Q)NF!>I^m;>-)`%o2>v#v3O)m4~L3&RZjkzc1I>us;grJWJ)=_05Vyc$ML@G>G
z9aS<_&R}#lEl)R9iv=}gHZ6MlBnC@VLV_hfI^8%ev1So;igC)C$5!`<=JCm4@?wNW
z<RxY*ro40;w%84O$`ZW-<YP&U%4UfL0e<nzxg>RF7QJQ!9AJs3I}!0K=r|KxUToHU
z@(SygYVmB5BInA*otdQ|=Sstn$DmxdR7<=#O(>TwRYA^vdzVj!Md|G97YqGf)=JBb
ztgHCtTJFkN3>`tvX=RlA1VK4d(M2rP62B!Zyt=G7;v^fFG8Y<G9Ki@RG)p|%z!k@q
zSklegawFz#(c6SR#Sx=2#c>$#K9(p%WhrX6EVhj}e@j8<w_A(FzkXWrv%sonv*_hZ
zrj;y!z7=vIJtnUhl*h1Qmhe!Or&mn<f)H~}T)f3%c!|~+21&26GFok}HA2Z1>evMM
z<VwF+<Vr8va&%mB5p>*2*fi0ddNky$^R`@7IO)tMTjF=AxvE%xmgEh8<`hI#2&P33
z_xU9xSt@7H&r%FaWhW}B!0BPmD7v7K5MxO!zKkW|A~;KeU#9(Bk3`fUX}`Ed;)@6^
zaUK*biGV|hmF0tenNWx_FCxz%2;{}q5@a(b@ateS^x<T)ER(N}miWYH!jwjhVYKu%
z4$}sD**+>d@+)M~5X<6;fNv%!L}emsv?ALOQV{5v>{+htr;dKFpc5$vWJ2=?A9adM
zxbNY}d91@FByt!wv3z7Pv68Net6lzy6`2X4L0*&9dEvY>8aTUtdBw18k(mIwNJE6Y
zWCle}z>#Z~PQO>ACo&U)T+Ah3V-)1l(#vWj{;81<1Thm9nF&G8g&&)8(;s+RoDZK^
z#C=7-%tdBGkaMX<kaKk0pw%GR689_p=tM38bdhvh2tuY#TWl%yid;o5LeSm9QrR?7
zxj|>S9*T5^=^-mkiHbymq=$5R)KR&V$*96li>jM{Rf!Zn7!6{r6f|&txh?Qd+*|m|
zB9^$KD+w}c-aO<N!CYiiHZQEW=hq=|q1ca3WGrAw#Q1!gRdkYh^PI=0Lo~Bc<Rvl|
zf?T#+6}e=$F({8=w<3Q@LY+xolV#*MjKG{In+%=-MJZ?B%aofUO(Do-yNw{{=osaG
zbb64?9;2cz9v2q5OIH+!p%YGlsY--&rYa)Qj1dh6#oHZpZppR_YQOPA`t;2lk}x7O
zryk0$tT-8cD~p##IRKZq+43n~)54W$wvY<|E+8{S7680u!0%CT9Jr&)LJ83V7eCpG
zE&zBfhQ%EmK%O*VC0PN$izF;w5CB|SXW^0nfKrn!Cjvr9r5YdrAeGl<8D%C&Wg(ns
z0a969C;%YUl*a+BrqqU9&u8fv?__~klRoj$SwKK7i`fML)bjVz7%3KrWqGA&0b-d>
z2mpv>Pen(tf?AfAiYB0z<+|7iYlm})dCF|_vBLG_k}VOEP;|CE!1P9{!Uw77L^JH^
z0eO%|y-_s}ikDrOGoj~QTlT?G(;Qp~q4o3VPK2ln10qb3DkvL?1{4D_aV;EMh0o+>
ziq=5WH6^WKE1Zn)+h-`w4F=-ry<nVbe?Fss*UYpA0saU{01x1VOxsJq(`-gx|7Dt>
zcwdH6*^wS&BZwFR(LT^93FjrT^BJGiuzkZ`fwnlMNsi#v!x3J((#DfF-WsXTx3s3O
zo&~UckYjVCJBY&GBK?Syaml!G5r|K2Y#BxfTZy*m+W6xqZ#Yh*#u<U26O6-FBl{<W
z3q>{}t^NTHbLn|q@?~JCfLS&P8f6$hVSY6jycdOWe5T==BxTpy!96JL45I96JF@P{
z?HrG|U%4IbLWy<}n(;uNXcwWy@pPJl1_@?ld_l~v2BYO3r5!eowRSeiG?jKPSq2nX
zn8ddV%p+;E48vRsnMQaES^9j0f+Uw$`r0H~QD6@7vkJ^5g&GCsk&sM=*#@yF8D^Lf
z`9<X%j?tH{WO83SNFq-K79!cZ0<*~islZ$krYo>8agQ?0fhlM(lcWSnJ4=_J6Vl}e
zbJ53e$nRDw_`)RcRA3%i!DN_gklK_2qcN}_20NnF7C+1|gAQ5mWSEPl3JlRBak5f7
zq>Us-l3}i`$sUi{kQ_*AhYb*tHpwtomouCs3CW~(A*iG(tB~txa<;L5r}AMtw5TfK
zv%L@r0wkDYg)|#9gRW-#uq<s1!wr$zlY|e)L`hmI!_Zmv5vEClTBTi3;}?6@+PIh^
zvh$$y#jzI+7S?PP!;(Zp?rTB#8jO}tm3ASLmMAcrgiQ(z&QpVhNlvJ=^EBUS;_)A?
zUC`2OAA9tw{z1w?Qr`5BVH%;J>JQM4W@i3&un(I=?Gg;0n3QW}nB!@DxL#PBqcC1Z
zQGHX`@yHgrgwI3Zq1X&rkaZcjG~$!`nvO#PV;L5VA!o<c_`soMQiG8CqGCakR~ZJE
zqtIuVE*5#U9jI1$gWn)=tBeoxp~~4JldiS1wK<2WYIbC~j<$ZF$gSxlDsUu4kZ6Iw
zBKr>#3|@oO6l9nShbqJ1ID=ZO0$-=aB|(&&^>IyY4S>rbVXHK5&<T_9TZVx^RrZEq
zM*Lc2U9}vNMM-@_@Ehv<hLEtR^BZ(cvH_v=wY7Bt{+T4OayvU3ONkgHgiJ)k=Q`T@
z8^SWJ(F`?EWe{?)ASvTw_0ZOCkfP~0yeTP1OElYtHZJBDz14Q`Lt0<Fg{Jx|gg%j2
zRLH<~=;=LqT!$W&kzsg1N|gb;fhIrO)7J{y328dySau}l60|rdmufV7j;iBSNJ7q0
zz0&=?g9TWXm4mfgm6c=ZF^FSZ8ZC}%YU>*;pVYl2Spl0i=g=yWwfoy)E*(<i^TQ0+
z2$7RS3XJX#NifH?G(Qg0*L27Yf<O(QP0IEXKJ?Y`VO>yd-$j_H%FhjYDsP61Uxn4$
zc_D2+To=)bN{eS}F{bOen%~7*Mv_&Dk8qG~N6RoQ0jiu4W$Q2}tmQ7qa8y0Oyg{nU
zxSpY{@wk|y$5I$Ui$&2_i?gu`lJb+p2TV)zb)KQeiHP)6yF^$Q)^rW+wDl4&b?rf4
zSJO!^sK?KMsdI_G4%t{z`0!%T;;Hd2`r0~uAtY-mzL25%XY|#5ju%1@to5a*D*g1K
zFM<`lot9g92s@QILSIXZS<z0jOD{wp%T&gVp&KE3l+WMJKwou@$GAphtrh+ho=LMA
zz*L*TSyoS-H;iFxIT&CnZ|ED=d<5EQaiSM;RDXcJnyz771cjRH5oF3R^mSs40f#EL
z1HN#qzS<g!b}{;T>UxQBLt1>`A%;`vL$YAR7*~%m(M}&1MK1*(#?{v{M2=eQi0oXo
z5A@aR7HFr{K|I7Ximj5qks4G)uTX5$>;r3_vTm8kIkh;<vqNMXS=MJ;lPT$|+7@IA
zhPA%hT7kZLEeL&M(1PGc#z*?9GQhZw7T0;UtIHV)oTj_B8$-^n>W45csbH#d32QNz
zXNT1s7JWlnEeIKG41Au-2l}Fvs*dXzS}uvJBx>x9zLr)y#B)w$|4^P^$0Vn0{4n%2
zbsa+ftI3Z9XIft*qB6|z9C|R(KL&xRc_>jyH9sfqv>3^ALYluakWVXopl?X`R|qmy
zTQEE~#u!>`0ep^T`vj=jn}b0#893V74dvByd&gCI!w|Yo!g;E?hITP@68WG)A1ya2
zF!a@HP=JvGE&h2!UwX#T4}<s+Ov&vCO!dX2uj+@;&eZ*eix+@oeCSJWUMS<zeOMKy
z#{B4O=(g{os4tHJTIe}nKRy$?25P)U+NpXE7&2BFANq#%H6HapP4?JH(c}zuR`6jA
zJ)ibm6ri<n9aRSC>*&5634>zOpc#%_#)rN!bkaq3Ah$!`kXF0#+_1_U@M$$3^o=D`
zMBnll=&RLa&{xmF(2neM`{xoa!cb#Y^wsMYXs7J~A=^^-1JKvdYNcqW`eKZ$)ecEt
zeJw*7PSHtJ$TVL<s%Bc=jVhrQ(_o`620r{OuPhfV6;NIk<~Uk>g{527Ni4~ps^cUo
z(b_?JdR&Z9MWqEbRn<NaUcp$S^x*|DTy-3VSN?I=GHV3x)?mZ2QRkc-+;1WA8oB83
j^w9n}_ykMCC%g%WpEnwoNB1XzEgW{^>eNX|Z58(qDmSJ3

diff --git a/branches/bug1734/log.ini b/branches/bug1734/log.ini
deleted file mode 100644
index 09fedec1..00000000
--- a/branches/bug1734/log.ini
+++ /dev/null
@@ -1,32 +0,0 @@
-# This file configures the logging module for the test harness:
-# critical errors are logged to testing.log; everything else is
-# ignored.
-
-# Documentation for the file format is at
-# http://www.red-dove.com/python_logging.html#config
-
-[logger_root]
-level=CRITICAL
-handlers=normal
-
-[handler_normal]
-class=FileHandler
-level=NOTSET
-formatter=common
-args=('testing.log', 'a')
-filename=testing.log
-mode=a
-
-[formatter_common]
-format=------
-       %(asctime)s %(levelname)s %(name)s %(message)s
-datefmt=%Y-%m-%dT%H:%M:%S
-
-[loggers]
-keys=root
-
-[handlers]
-keys=normal
-
-[formatters]
-keys=common
diff --git a/branches/bug1734/release.py b/branches/bug1734/release.py
deleted file mode 100644
index f30358c4..00000000
--- a/branches/bug1734/release.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#! /usr/bin/env python
-"""Update version numbers and release dates for the next release.
-
-usage: release.py version date
-
-version should be a string like "3.2c1"
-date should be a string like "23-Sep-2003"
-
-The following files are updated:
-    - setup.py
-    - NEWS.txt
-    - doc/guide/zodb.tex
-    - src/ZEO/__init__.py
-    - src/ZEO/version.txt
-    - src/ZODB/__init__.py
-"""
-
-import fileinput
-import os
-import re
-
-# In file filename, replace the first occurrence of regexp pat with
-# string repl.
-def replace(filename, pat, repl):
-    from sys import stderr as e # fileinput hijacks sys.stdout
-    foundone = False
-    for line in fileinput.input([filename], inplace=True, backup="~"):
-        if foundone:
-            print line,
-        else:
-            match = re.search(pat, line)
-            if match is not None:
-                foundone = True
-
-                new = re.sub(pat, repl, line)
-                print new,
-
-                print >> e, "In %s, replaced:" % filename
-                print >> e, "   ", repr(line)
-                print >> e, "   ", repr(new)
-
-            else:
-                print line,
-
-    if not foundone:
-        print >> e, "*" * 60, "Oops!"
-        print >> e, "    Failed to find %r in %r" % (pat, filename)
-
-def compute_zeoversion(version):
-    # ZEO version's trail ZODB versions by one full revision.
-    # ZODB 3.2c1 corresponds to ZEO 2.2c1
-    major, rest = version.split(".", 1)
-    major = int(major) - 1
-    return "%s.%s" % (major, rest)
-
-def write_zeoversion(path, version):
-    f = file(path, "w")
-    print >> f, version
-    f.close()
-
-def main(args):
-    version, date = args
-    zeoversion = compute_zeoversion(version)
-
-    replace("setup.py",
-            r'version="\S+"',
-            'version="%s"' % version)
-    replace("src/ZODB/__init__.py",
-            r'__version__ = "\S+"',
-            '__version__ = "%s"' % version)
-    replace("src/ZEO/__init__.py",
-            r'version = "\S+"',
-            'version = "%s"' % zeoversion)
-    write_zeoversion("src/ZEO/version.txt", zeoversion)
-    replace("NEWS.txt",
-            r"^Release date: .*",
-            "Release date: %s" % date)
-    replace("doc/guide/zodb.tex",
-            r"release{\S+}",
-            "release{%s}" % version)
-if __name__ == "__main__":
-    import sys
-    main(sys.argv[1:])
diff --git a/branches/bug1734/releases/ZODB3/DEPENDENCIES.cfg b/branches/bug1734/releases/ZODB3/DEPENDENCIES.cfg
deleted file mode 100644
index 12a6b1bc..00000000
--- a/branches/bug1734/releases/ZODB3/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-ZEO
-ZODB
-# Needed because...?
-Persistence
-ZopeUndo
diff --git a/branches/bug1734/releases/ZODB3/PACKAGE.cfg b/branches/bug1734/releases/ZODB3/PACKAGE.cfg
deleted file mode 100644
index 52fbe275..00000000
--- a/branches/bug1734/releases/ZODB3/PACKAGE.cfg
+++ /dev/null
@@ -1,24 +0,0 @@
-<load>
-  doc           svn://svn.zope.org/repos/main/ZODB/tags/*/doc/
-  scripts       svn://svn.zope.org/repos/main/ZODB/tags/*/src/scripts/
-  log.ini       svn://svn.zope.org/repos/main/ZODB/tags/*/log.ini
-  test.py       svn://svn.zope.org/repos/main/ZODB/tags/*/test.py
-  COPYRIGHT.txt svn://svn.zope.org/repos/main/ZODB/tags/*/COPYRIGHT.txt
-  LICENSE.txt   svn://svn.zope.org/repos/main/ZODB/tags/*/LICENSE.txt
-  NEWS.txt      svn://svn.zope.org/repos/main/ZODB/tags/*/NEWS.txt
-  README.txt    svn://svn.zope.org/repos/main/ZODB/tags/*/README.txt
-</load>
-
-<distribution>
-  doc
-  log.ini
-  test.py
-  COPYRIGHT.txt
-  LICENSE.txt
-  NEWS.txt
-  README.txt
-</distribution>
-
-<collection>
-  doc           -
-</collection>
diff --git a/branches/bug1734/releases/ZODB3/PUBLICATION.cfg b/branches/bug1734/releases/ZODB3/PUBLICATION.cfg
deleted file mode 100644
index c806809d..00000000
--- a/branches/bug1734/releases/ZODB3/PUBLICATION.cfg
+++ /dev/null
@@ -1,23 +0,0 @@
-Metadata-version: 1.1
-Name: ZODB3
-License: ZPL 2.1
-Home-page: http://www.zope.org/Wikis/ZODB
-Summary: Zope Object Database: object database and persistence
-Description:
-        The Zope Object Database provides an object-oriented database
-        for Python that provides a high-degree of transparency.
-        Applications can take advantage of object database features
-        with few, if any, changes to application logic.  ZODB includes
-        features such as a plugable storage interface, rich
-        transaction support, and undo.
-Maintainer: ZODB Developers
-Maintainer-email: zodb-dev@zope.org
-Platform: any
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Zope Public License
-Classifier: Programming Language :: Python
-Classifier: Topic :: Database
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Operating System :: Microsoft :: Windows
-Classifier: Operating System :: Unix
diff --git a/branches/bug1734/releases/ZODB3/SETUP.cfg b/branches/bug1734/releases/ZODB3/SETUP.cfg
deleted file mode 100644
index 502140bb..00000000
--- a/branches/bug1734/releases/ZODB3/SETUP.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-script  scripts/fsdump.py
-script  scripts/fsoids.py
-script  scripts/fsrefs.py
-script  scripts/fstail.py
-script  scripts/fstest.py
-script  scripts/repozo.py
-script  scripts/zeopack.py
diff --git a/branches/bug1734/setup.py b/branches/bug1734/setup.py
deleted file mode 100644
index 33a0e641..00000000
--- a/branches/bug1734/setup.py
+++ /dev/null
@@ -1,267 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Zope Object Database: object database and persistence
-
-The Zope Object Database provides an object-oriented database for
-Python that provides a high-degree of transparency. Applications can
-take advantage of object database features with few, if any, changes
-to application logic.  ZODB includes features such as a plugable storage
-interface, rich transaction support, and undo.
-"""
-
-# The (non-obvious!) choices for the Trove Development Status line:
-# Development Status :: 5 - Production/Stable
-# Development Status :: 4 - Beta
-# Development Status :: 3 - Alpha
-
-classifiers = """\
-Development Status :: 5 - Production/Stable
-Intended Audience :: Developers
-License :: OSI Approved :: Zope Public License
-Programming Language :: Python
-Topic :: Database
-Topic :: Software Development :: Libraries :: Python Modules
-Operating System :: Microsoft :: Windows
-Operating System :: Unix
-"""
-
-import glob
-import os
-import sys
-from distutils.core import setup
-from distutils.extension import Extension
-from distutils import dir_util
-from distutils.core import setup
-from distutils.dist import Distribution
-from distutils.command.install_lib import install_lib
-from distutils.command.build_py import build_py
-from distutils.util import convert_path
-
-if sys.version_info < (2, 3, 4):
-    print "ZODB 3.3 requires Python 2.3.4 or higher"
-    sys.exit(0)
-
-# Include directories for C extensions
-include = ['src/persistent']
-
-# Set up dependencies for the BTrees package
-base_btrees_depends = [
-    "src/BTrees/BTreeItemsTemplate.c",
-    "src/BTrees/BTreeModuleTemplate.c",
-    "src/BTrees/BTreeTemplate.c",
-    "src/BTrees/BucketTemplate.c",
-    "src/BTrees/MergeTemplate.c",
-    "src/BTrees/SetOpTemplate.c",
-    "src/BTrees/SetTemplate.c",
-    "src/BTrees/TreeSetTemplate.c",
-    "src/BTrees/sorters.c",
-    "src/persistent/cPersistence.h",
-    ]
-
-_flavors = {"O": "object", "I": "int", "F": "float"}
-
-KEY_H = "src/BTrees/%skeymacros.h"
-VALUE_H = "src/BTrees/%svaluemacros.h"
-
-def BTreeExtension(flavor):
-    key = flavor[0]
-    value = flavor[1]
-    name = "BTrees._%sBTree" % flavor
-    sources = ["src/BTrees/_%sBTree.c" % flavor]
-    kwargs = {"include_dirs": include}
-    if flavor != "fs":
-        kwargs["depends"] = (base_btrees_depends + [KEY_H % _flavors[key],
-                                                    VALUE_H % _flavors[value]])
-    if key != "O":
-        kwargs["define_macros"] = [('EXCLUDE_INTSET_SUPPORT', None)]
-    return Extension(name, sources, **kwargs)
-
-exts = [BTreeExtension(flavor)
-        for flavor in ("OO", "IO", "OI", "II", "IF", "fs")]
-
-cPersistence = Extension(name = 'persistent.cPersistence',
-                         include_dirs = include,
-                         sources= ['src/persistent/cPersistence.c',
-                                   'src/persistent/ring.c'],
-                         depends = ['src/persistent/cPersistence.h',
-                                    'src/persistent/ring.h',
-                                    'src/persistent/ring.c']
-                         )
-
-cPickleCache = Extension(name = 'persistent.cPickleCache',
-                         include_dirs = include,
-                         sources= ['src/persistent/cPickleCache.c',
-                                   'src/persistent/ring.c'],
-                         depends = ['src/persistent/cPersistence.h',
-                                    'src/persistent/ring.h',
-                                    'src/persistent/ring.c']
-                         )
-
-TimeStamp = Extension(name = 'persistent.TimeStamp',
-                      include_dirs = include,
-                      sources= ['src/persistent/TimeStamp.c']
-                      )
-
-##coptimizations = Extension(name = 'ZODB.coptimizations',
-##                           include_dirs = include,
-##                           sources= ['src/ZODB/coptimizations.c']
-##                           )
-
-winlock = Extension(name = 'ZODB.winlock',
-                    include_dirs = include,
-                    sources = ['src/ZODB/winlock.c']
-                    )
-
-cZopeInterface = Extension(
-            name = 'zope.interface._zope_interface_coptimizations',
-            sources= ['src/zope/interface/_zope_interface_coptimizations.c']
-            )
-
-cZopeProxy = Extension(
-            name = 'zope.proxy._zope_proxy_proxy',
-            sources= ['src/zope/proxy/_zope_proxy_proxy.c']
-            )
-
-exts += [cPersistence,
-         cPickleCache,
-         TimeStamp,
-         winlock,
-         cZopeInterface,
-         cZopeProxy,
-        ]
-
-# The ZODB.zodb4 code is not being packaged, because it is only
-# need to convert early versions of Zope3 databases to ZODB3.
-
-packages = ["BTrees", "BTrees.tests",
-            "ZEO", "ZEO.auth", "ZEO.zrpc", "ZEO.tests",
-            "ZODB", "ZODB.FileStorage", "ZODB.tests",
-            "Persistence", "Persistence.tests",
-            "persistent", "persistent.tests",
-            "transaction", "transaction.tests",
-            "ThreadedAsync",
-            "zdaemon", "zdaemon.tests",
-
-            "zope",
-            "zope.interface", "zope.interface.tests",
-            "zope.proxy", "zope.proxy.tests",
-            "zope.testing",
-
-            "ZopeUndo", "ZopeUndo.tests",
-            "ZConfig", "ZConfig.tests",
-            "ZConfig.components",
-            "ZConfig.components.basic", "ZConfig.components.basic.tests",
-            "ZConfig.components.logger", "ZConfig.components.logger.tests",
-            "ZConfig.tests.library", "ZConfig.tests.library.widget",
-            "ZConfig.tests.library.thing",
-            ]
-
-scripts = ["src/scripts/fsdump.py",
-           "src/scripts/fsoids.py",
-           "src/scripts/fsrefs.py",
-           "src/scripts/fstail.py",
-           "src/scripts/fstest.py",
-           "src/scripts/repozo.py",
-           "src/scripts/zeopack.py",
-           "src/ZConfig/scripts/zconfig",
-           "src/ZEO/runzeo.py",
-           "src/ZEO/zeopasswd.py",
-           "src/ZEO/mkzeoinst.py",
-           "src/ZEO/zeoctl.py",
-           "src/zdaemon/zdrun.py",
-           "src/zdaemon/zdctl.py",
-           ]
-
-def copy_other_files(cmd, outputbase):
-    # A delicate dance to copy files with certain extensions
-    # into a package just like .py files.
-    extensions = ["*.conf", "*.xml", "*.txt", "*.sh"]
-    for dir in [
-        "transaction",
-        "persistent/tests",
-        "ZConfig/components/basic",
-        "ZConfig/components/logger",
-        "ZConfig/tests/input",
-        "ZConfig/tests/library",
-        "ZConfig/tests/library/thing",
-        "ZConfig/tests/library/thing/extras",
-        "ZConfig/tests/library/widget",
-        "ZEO",
-        "ZODB",
-        "ZODB/tests",
-        "zdaemon",
-        "zdaemon/tests",
-        "zope/interface", "zope/interface/tests",
-        ]:
-        dir = convert_path(dir)
-        inputdir = os.path.join("src", dir)
-        outputdir = os.path.join(outputbase, dir)
-        if not os.path.exists(outputdir):
-            dir_util.mkpath(outputdir)
-        for pattern in extensions:
-            for fn in glob.glob(os.path.join(inputdir, pattern)):
-                # glob is going to give us a path include "src",
-                # which must be stripped to get the destination dir
-                dest = os.path.join(outputbase, fn[4:])
-                cmd.copy_file(fn, dest)
-
-class MyLibInstaller(install_lib):
-    """Custom library installer, used to put hosttab in the right place."""
-
-    # We use the install_lib command since we need to put hosttab
-    # inside the library directory.  This is where we already have the
-    # real information about where to install it after the library
-    # location has been set by any relevant distutils command line
-    # options.
-
-    def run(self):
-        install_lib.run(self)
-        copy_other_files(self, self.install_dir)
-
-class MyPyBuilder(build_py):
-    def build_packages(self):
-        build_py.build_packages(self)
-        copy_other_files(self, self.build_lib)
-
-class MyDistribution(Distribution):
-    # To control the selection of MyLibInstaller and MyPyBuilder, we
-    # have to set it into the cmdclass instance variable, set in
-    # Distribution.__init__().
-
-    def __init__(self, *attrs):
-        Distribution.__init__(self, *attrs)
-        self.cmdclass['build_py'] = MyPyBuilder
-        self.cmdclass['install_lib'] = MyLibInstaller
-
-doclines = __doc__.split("\n")
-
-setup(name="ZODB3",
-      version="3.4a0",
-      maintainer="Zope Corporation",
-      maintainer_email="zodb-dev@zope.org",
-      url = "http://www.zope.org/Wikis/ZODB",
-      download_url = "http://www.zope.org/Products/ZODB3.3",
-      packages = packages,
-      package_dir = {'': 'src'},
-      ext_modules = exts,
-      headers = ['src/persistent/cPersistence.h',
-                 'src/persistent/ring.h'],
-      license = "ZPL 2.1",
-      platforms = ["any"],
-      description = doclines[0],
-      classifiers = filter(None, classifiers.split("\n")),
-      long_description = "\n".join(doclines[2:]),
-      distclass = MyDistribution,
-      scripts = scripts,
-      )
diff --git a/branches/bug1734/src/BTrees/BTreeItemsTemplate.c b/branches/bug1734/src/BTrees/BTreeItemsTemplate.c
deleted file mode 100644
index 0b435981..00000000
--- a/branches/bug1734/src/BTrees/BTreeItemsTemplate.c
+++ /dev/null
@@ -1,698 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define BTREEITEMSTEMPLATE_C "$Id$\n"
-
-/* A BTreeItems struct is returned from calling .items(), .keys() or
- * .values() on a BTree-based data structure, and is also the result of
- * taking slices of those.  It represents a contiguous slice of a BTree.
- *
- * The start of the slice is in firstbucket, at offset first.  The end of
- * the slice is in lastbucket, at offset last.  Both endpoints are inclusive.
- * It must possible to get from firstbucket to lastbucket via following
- * bucket 'next' pointers zero or more times.  firstbucket, first, lastbucket,
- * and last are readonly after initialization.  An empty slice is represented
- * by  firstbucket == lastbucket == currentbucket == NULL.
- *
- * 'kind' determines whether this slice represents 'k'eys alone, 'v'alues
- * alone, or 'i'items (key+value pairs).  'kind' is also readonly after
- * initialization.
- *
- * The combination of currentbucket, currentoffset and pseudoindex acts as
- * a search finger.  Offset currentoffset in bucket currentbucket is at index
- * pseudoindex, where pseudoindex==0 corresponds to offset first in bucket
- * firstbucket, and pseudoindex==-1 corresponds to offset last in bucket
- * lastbucket.  The function BTreeItems_seek() can be used to set this combo
- * correctly for any in-bounds index, and uses this combo on input to avoid
- * needing to search from the start (or end) on each call.  Calling
- * BTreeItems_seek() with consecutive larger positions is very efficent.
- * Calling it with consecutive smaller positions is more efficient than if
- * a search finger weren't being used at all, but is still quadratic time
- * in the number of buckets in the slice.
- */
-typedef struct {
-  PyObject_HEAD
-  Bucket *firstbucket;		/* First bucket		          */
-  Bucket *currentbucket;	/* Current bucket (search finger) */
-  Bucket *lastbucket;		/* Last bucket		          */
-  int currentoffset;		/* Offset in currentbucket        */
-  int pseudoindex;		/* search finger index            */
-  int first;                    /* Start offset in firstbucket    */
-  int last;                     /* End offset in lastbucket       */
-  char kind;                    /* 'k', 'v', 'i'                  */
-} BTreeItems;
-
-#define ITEMS(O)((BTreeItems*)(O))
-
-static PyObject *
-newBTreeItems(char kind,
-              Bucket *lowbucket, int lowoffset,
-              Bucket *highbucket, int highoffset);
-
-static void
-BTreeItems_dealloc(BTreeItems *self)
-{
-  Py_XDECREF(self->firstbucket);
-  Py_XDECREF(self->lastbucket);
-  Py_XDECREF(self->currentbucket);
-  PyObject_DEL(self);
-}
-
-static int
-BTreeItems_length_or_nonzero(BTreeItems *self, int nonzero)
-{
-    int r;
-    Bucket *b, *next;
-
-    b = self->firstbucket;
-    if (b == NULL)
-	return 0;
-
-    r = self->last + 1 - self->first;
-
-    if (nonzero && r > 0)
-	/* Short-circuit if all we care about is nonempty */
-	return 1;
-
-    if (b == self->lastbucket)
-	return r;
-
-    Py_INCREF(b);
-    PER_USE_OR_RETURN(b, -1);
-    while ((next = b->next)) {
-	r += b->len;
-	if (nonzero && r > 0)
-	    /* Short-circuit if all we care about is nonempty */
-	    break;
-
-	if (next == self->lastbucket)
-	    break; /* we already counted the last bucket */
-
-	Py_INCREF(next);
-	PER_UNUSE(b);
-	Py_DECREF(b);
-	b = next;
-	PER_USE_OR_RETURN(b, -1);
-    }
-    PER_UNUSE(b);
-    Py_DECREF(b);
-
-    return r >= 0 ? r : 0;
-}
-
-static int
-BTreeItems_length( BTreeItems *self)
-{
-  return BTreeItems_length_or_nonzero(self, 0);
-}
-
-/*
-** BTreeItems_seek
-**
-** Find the ith position in the BTreeItems.
-**
-** Arguments:  	self	The BTree
-**		i	the index to seek to, in 0 .. len(self)-1, or in
-**                      -len(self) .. -1, as for indexing a Python sequence.
-**
-**
-** Returns 0 if successful, -1 on failure to seek (like out-of-bounds).
-** Upon successful return, index i is at offset self->currentoffset in bucket
-** self->currentbucket.
-*/
-static int
-BTreeItems_seek(BTreeItems *self, int i)
-{
-    int delta, pseudoindex, currentoffset;
-    Bucket *b, *currentbucket;
-    int error;
-
-    pseudoindex = self->pseudoindex;
-    currentoffset = self->currentoffset;
-    currentbucket = self->currentbucket;
-    if (currentbucket == NULL) goto no_match;
-
-    delta = i - pseudoindex;
-    while (delta > 0) {         /* move right */
-        int max;
-        /* Want to move right delta positions; the most we can move right in
-         * this bucket is currentbucket->len - currentoffset - 1 positions.
-         */
-        PER_USE_OR_RETURN(currentbucket, -1);
-        max = currentbucket->len - currentoffset - 1;
-        b = currentbucket->next;
-        PER_UNUSE(currentbucket);
-        if (delta <= max) {
-            currentoffset += delta;
-            pseudoindex += delta;
-            if (currentbucket == self->lastbucket
-                && currentoffset > self->last) goto no_match;
-            break;
-        }
-        /* Move to start of next bucket. */
-        if (currentbucket == self->lastbucket || b == NULL) goto no_match;
-        currentbucket = b;
-        pseudoindex += max + 1;
-        delta -= max + 1;
-        currentoffset = 0;
-    }
-    while (delta < 0) {         /* move left */
-        int status;
-        /* Want to move left -delta positions; the most we can move left in
-         * this bucket is currentoffset positions.
-         */
-        if ((-delta) <= currentoffset) {
-            currentoffset += delta;
-            pseudoindex += delta;
-            if (currentbucket == self->firstbucket
-                && currentoffset < self->first) goto no_match;
-            break;
-        }
-        /* Move to end of previous bucket. */
-        if (currentbucket == self->firstbucket) goto no_match;
-        status = PreviousBucket(&currentbucket, self->firstbucket);
-        if (status == 0)
-            goto no_match;
-        else if (status < 0)
-            return -1;
-        pseudoindex -= currentoffset + 1;
-        delta += currentoffset + 1;
-        PER_USE_OR_RETURN(currentbucket, -1);
-        currentoffset = currentbucket->len - 1;
-        PER_UNUSE(currentbucket);
-    }
-
-    assert(pseudoindex == i);
-
-    /* Alas, the user may have mutated the bucket since the last time we
-     * were called, and if they deleted stuff, we may be pointing into
-     * trash memory now.
-     */
-    PER_USE_OR_RETURN(currentbucket, -1);
-    error = currentoffset < 0 || currentoffset >= currentbucket->len;
-    PER_UNUSE(currentbucket);
-    if (error) {
-	PyErr_SetString(PyExc_RuntimeError,
-	                "the bucket being iterated changed size");
-	return -1;
-    }
-
-    Py_INCREF(currentbucket);
-    Py_DECREF(self->currentbucket);
-    self->currentbucket = currentbucket;
-    self->currentoffset = currentoffset;
-    self->pseudoindex = pseudoindex;
-    return 0;
-
-no_match:
-    IndexError(i);
-    return -1;
-}
-
-
-/* Return the right kind ('k','v','i') of entry from bucket b at offset i.
- *  b must be activated.  Returns NULL on error.
- */
-static PyObject *
-getBucketEntry(Bucket *b, int i, char kind)
-{
-    PyObject *result = NULL;
-
-    assert(b);
-    assert(0 <= i && i < b->len);
-
-    switch (kind) {
-
-        case 'k':
-            COPY_KEY_TO_OBJECT(result, b->keys[i]);
-            break;
-
-        case 'v':
-            COPY_VALUE_TO_OBJECT(result, b->values[i]);
-            break;
-
-        case 'i': {
-            PyObject *key;
-            PyObject *value;;
-
-            COPY_KEY_TO_OBJECT(key, b->keys[i]);
-            if (!key) break;
-
-            COPY_VALUE_TO_OBJECT(value, b->values[i]);
-            if (!value) {
-                Py_DECREF(key);
-                break;
-            }
-
-            result = PyTuple_New(2);
-            if (result) {
-                PyTuple_SET_ITEM(result, 0, key);
-                PyTuple_SET_ITEM(result, 1, value);
-            }
-            else {
-                Py_DECREF(key);
-                Py_DECREF(value);
-            }
-            break;
-        }
-
-        default:
-            PyErr_SetString(PyExc_AssertionError,
-                            "getBucketEntry: unknown kind");
-            break;
-    }
-    return result;
-}
-
-/*
-** BTreeItems_item
-**
-** Arguments:	self	a BTreeItems structure
-**		i	Which item to inspect
-**
-** Returns:	the BTreeItems_item_BTree of self->kind, i
-**		(ie pulls the ith item out)
-*/
-static PyObject *
-BTreeItems_item(BTreeItems *self, int i)
-{
-    PyObject *result;
-
-    if (BTreeItems_seek(self, i) < 0) return NULL;
-
-    PER_USE_OR_RETURN(self->currentbucket, NULL);
-    result = getBucketEntry(self->currentbucket, self->currentoffset,
-                            self->kind);
-    PER_UNUSE(self->currentbucket);
-    return result;
-}
-
-/*
-** BTreeItems_slice
-**
-** Creates a new BTreeItems structure representing the slice
-** between the low and high range
-**
-** Arguments:	self	The old BTreeItems structure
-**		ilow	The start index
-**		ihigh	The end index
-**
-** Returns:	BTreeItems item
-*/
-static PyObject *
-BTreeItems_slice(BTreeItems *self, int ilow, int ihigh)
-{
-  Bucket *lowbucket;
-  Bucket *highbucket;
-  int lowoffset;
-  int highoffset;
-  int length = -1;  /* len(self), but computed only if needed */
-
-  /* Complications:
-   * A Python slice never raises IndexError, but BTreeItems_seek does.
-   * Python did only part of index normalization before calling this:
-   *     ilow may be < 0 now, and ihigh may be arbitrarily large.  It's
-   *     our responsibility to clip them.
-   * A Python slice is exclusive of the high index, but a BTreeItems
-   *     struct is inclusive on both ends.
-   */
-
-  /* First adjust ilow and ihigh to be legit endpoints in the Python
-   * sense (ilow inclusive, ihigh exclusive).  This block duplicates the
-   * logic from Python's list_slice function (slicing for builtin lists).
-   */
-  if (ilow < 0)
-      ilow = 0;
-  else {
-      if (length < 0)
-          length = BTreeItems_length(self);
-      if (ilow > length)
-          ilow = length;
-  }
-
-  if (ihigh < ilow)
-      ihigh = ilow;
-  else {
-      if (length < 0)
-          length = BTreeItems_length(self);
-      if (ihigh > length)
-          ihigh = length;
-  }
-  assert(0 <= ilow && ilow <= ihigh);
-  assert(length < 0 || ihigh <= length);
-
-  /* Now adjust for that our struct is inclusive on both ends.  This is
-   * easy *except* when the slice is empty:  there's no good way to spell
-   * that in an inclusive-on-both-ends scheme.  For example, if the
-   * slice is btree.items([:0]), ilow == ihigh == 0 at this point, and if
-   * we were to subtract 1 from ihigh that would get interpreted by
-   * BTreeItems_seek as meaning the *entire* set of items.  Setting ilow==1
-   * and ihigh==0 doesn't work either, as BTreeItems_seek raises IndexError
-   * if we attempt to seek to ilow==1 when the underlying sequence is empty.
-   * It seems simplest to deal with empty slices as a special case here.
-   */
-   if (ilow == ihigh) {
-       /* empty slice */
-       lowbucket = highbucket = NULL;
-       lowoffset = 1;
-       highoffset = 0;
-   }
-   else {
-       assert(ilow < ihigh);
-       --ihigh;  /* exclusive -> inclusive */
-
-       if (BTreeItems_seek(self, ilow) < 0) return NULL;
-       lowbucket = self->currentbucket;
-       lowoffset = self->currentoffset;
-
-       if (BTreeItems_seek(self, ihigh) < 0) return NULL;
-
-       highbucket = self->currentbucket;
-       highoffset = self->currentoffset;
-  }
-  return newBTreeItems(self->kind,
-                       lowbucket, lowoffset, highbucket, highoffset);
-}
-
-static PySequenceMethods BTreeItems_as_sequence = {
-  (inquiry) BTreeItems_length,
-  (binaryfunc)0,
-  (intargfunc)0,
-  (intargfunc) BTreeItems_item,
-  (intintargfunc) BTreeItems_slice,
-};
-
-/* Number Method items (just for nb_nonzero!) */
-
-static int
-BTreeItems_nonzero(BTreeItems *self)
-{
-  return BTreeItems_length_or_nonzero(self, 1);
-}
-
-static PyNumberMethods BTreeItems_as_number_for_nonzero = {
-  0,0,0,0,0,0,0,0,0,0,
-   (inquiry)BTreeItems_nonzero};
-
-static PyTypeObject BTreeItemsType = {
-  PyObject_HEAD_INIT(NULL)
-  0,					/*ob_size*/
-  MOD_NAME_PREFIX "BTreeItems",	        /*tp_name*/
-  sizeof(BTreeItems),		        /*tp_basicsize*/
-  0,					/*tp_itemsize*/
-  /* methods */
-  (destructor) BTreeItems_dealloc,	/*tp_dealloc*/
-  (printfunc)0,				/*tp_print*/
-  (getattrfunc)0,			/*obsolete tp_getattr*/
-  (setattrfunc)0,			/*obsolete tp_setattr*/
-  (cmpfunc)0,				/*tp_compare*/
-  (reprfunc)0,				/*tp_repr*/
-  &BTreeItems_as_number_for_nonzero,	/*tp_as_number*/
-  &BTreeItems_as_sequence,		/*tp_as_sequence*/
-  0,					/*tp_as_mapping*/
-  (hashfunc)0,				/*tp_hash*/
-  (ternaryfunc)0,			/*tp_call*/
-  (reprfunc)0,				/*tp_str*/
-  0,					/*tp_getattro*/
-  0,					/*tp_setattro*/
-
-  /* Space for future expansion */
-  0L,0L,
-  "Sequence type used to iterate over BTree items." /* Documentation string */
-};
-
-/* Returns a new BTreeItems object representing the contiguous slice from
- * offset lowoffset in bucket lowbucket through offset highoffset in bucket
- * highbucket, inclusive.  Pass lowbucket == NULL for an empty slice.
- * The currentbucket is set to lowbucket, currentoffset ot lowoffset, and
- * pseudoindex to 0.  kind is 'k', 'v' or 'i' (see BTreeItems struct docs).
- */
-static PyObject *
-newBTreeItems(char kind,
-              Bucket *lowbucket, int lowoffset,
-              Bucket *highbucket, int highoffset)
-{
-  BTreeItems *self;
-
-  UNLESS (self = PyObject_NEW(BTreeItems, &BTreeItemsType)) return NULL;
-  self->kind=kind;
-
-  self->first=lowoffset;
-  self->last=highoffset;
-
-  if (! lowbucket || ! highbucket
-      || (lowbucket == highbucket && lowoffset > highoffset))
-    {
-      self->firstbucket   = 0;
-      self->lastbucket    = 0;
-      self->currentbucket = 0;
-    }
-  else
-    {
-      Py_INCREF(lowbucket);
-      self->firstbucket = lowbucket;
-      Py_INCREF(highbucket);
-      self->lastbucket = highbucket;
-      Py_INCREF(lowbucket);
-      self->currentbucket = lowbucket;
-    }
-
-  self->currentoffset = lowoffset;
-  self->pseudoindex = 0;
-
-  return OBJECT(self);
-}
-
-static int
-nextBTreeItems(SetIteration *i)
-{
-  if (i->position >= 0)
-    {
-      if (i->position)
-        {
-          DECREF_KEY(i->key);
-          DECREF_VALUE(i->value);
-        }
-
-      if (BTreeItems_seek(ITEMS(i->set), i->position) >= 0)
-        {
-          Bucket *currentbucket;
-
-          currentbucket = BUCKET(ITEMS(i->set)->currentbucket);
-          UNLESS(PER_USE(currentbucket))
-            {
-              /* Mark iteration terminated, so that finiSetIteration doesn't
-               * try to redundantly decref the key and value
-               */
-              i->position = -1;
-              return -1;
-            }
-
-          COPY_KEY(i->key, currentbucket->keys[ITEMS(i->set)->currentoffset]);
-          INCREF_KEY(i->key);
-
-          COPY_VALUE(i->value,
-                     currentbucket->values[ITEMS(i->set)->currentoffset]);
-          INCREF_VALUE(i->value);
-
-          i->position ++;
-
-          PER_UNUSE(currentbucket);
-        }
-      else
-        {
-          i->position = -1;
-          PyErr_Clear();
-        }
-    }
-  return 0;
-}
-
-static int
-nextTreeSetItems(SetIteration *i)
-{
-  if (i->position >= 0)
-    {
-      if (i->position)
-        {
-          DECREF_KEY(i->key);
-        }
-
-      if (BTreeItems_seek(ITEMS(i->set), i->position) >= 0)
-        {
-          Bucket *currentbucket;
-
-          currentbucket = BUCKET(ITEMS(i->set)->currentbucket);
-          UNLESS(PER_USE(currentbucket))
-            {
-              /* Mark iteration terminated, so that finiSetIteration doesn't
-               * try to redundantly decref the key and value
-               */
-              i->position = -1;
-              return -1;
-            }
-
-          COPY_KEY(i->key, currentbucket->keys[ITEMS(i->set)->currentoffset]);
-          INCREF_KEY(i->key);
-
-          i->position ++;
-
-          PER_UNUSE(currentbucket);
-        }
-      else
-        {
-          i->position = -1;
-          PyErr_Clear();
-        }
-    }
-  return 0;
-}
-
-/* Support for the iteration protocol new in Python 2.2. */
-
-static PyTypeObject BTreeIter_Type;
-
-/* The type of iterator objects, returned by e.g. iter(IIBTree()). */
-typedef struct {
-    PyObject_HEAD
-    /* We use a BTreeItems object because it's convenient and flexible.
-     * We abuse it two ways:
-     *     1. We set currentbucket to NULL when the iteration is finished.
-     *     2. We don't bother keeping pseudoindex in synch.
-     */
-    BTreeItems *pitems;
-} BTreeIter;
-
-/* Return a new iterator object, to traverse the keys and/or values
- * represented by pitems.  pitems must not be NULL.  Returns NULL if error.
- */
-static BTreeIter *
-BTreeIter_new(BTreeItems *pitems)
-{
-    BTreeIter *result;
-
-    assert(pitems != NULL);
-    result = PyObject_New(BTreeIter, &BTreeIter_Type);
-    if (result) {
-        Py_INCREF(pitems);
-        result->pitems = pitems;
-    }
-    return result;
-}
-
-/* The iterator's tp_dealloc slot. */
-static void
-BTreeIter_dealloc(BTreeIter *bi)
-{
-	Py_DECREF(bi->pitems);
-	PyObject_Del(bi);
-}
-
-/* The implementation of the iterator's tp_iternext slot.  Returns "the next"
- * item; returns NULL if error; returns NULL without setting an error if the
- * iteration is exhausted (that's the way to terminate the iteration protocol).
- */
-static PyObject *
-BTreeIter_next(BTreeIter *bi, PyObject *args)
-{
-	PyObject *result = NULL;        /* until proven innocent */
-        BTreeItems *items = bi->pitems;
-        int i = items->currentoffset;
-	Bucket *bucket = items->currentbucket;
-
-        if (bucket == NULL)	/* iteration termination is sticky */
-	    return NULL;
-
-        PER_USE_OR_RETURN(bucket, NULL);
-        if (i >= bucket->len) {
-            /* We never leave this routine normally with i >= len:  somebody
-             * else mutated the current bucket.
-             */
-	    PyErr_SetString(PyExc_RuntimeError,
-		            "the bucket being iterated changed size");
-	    /* Arrange for that this error is sticky too. */
-	    items->currentoffset = INT_MAX;
-	    goto Done;
-	}
-
-        /* Build the result object, from bucket at offset i. */
-        result = getBucketEntry(bucket, i, items->kind);
-
-        /* Advance position for next call. */
-        if (bucket == items->lastbucket && i >= items->last) {
-            /* Next call should terminate the iteration. */
-            Py_DECREF(items->currentbucket);
-            items->currentbucket = NULL;
-        }
-        else {
-            ++i;
-            if (i >= bucket->len) {
-                Py_XINCREF(bucket->next);
-                items->currentbucket = bucket->next;
-                Py_DECREF(bucket);
-                i = 0;
-            }
-            items->currentoffset = i;
-        }
-
-Done:
-    PER_UNUSE(bucket);
-    return result;
-}
-
-static PyObject *
-BTreeIter_getiter(PyObject *it)
-{
-    Py_INCREF(it);
-    return it;
-}
-
-static PyTypeObject BTreeIter_Type = {
-        PyObject_HEAD_INIT(NULL)
-	0,					/* ob_size */
-	MOD_NAME_PREFIX "-iterator",		/* tp_name */
-	sizeof(BTreeIter),			/* tp_basicsize */
-	0,					/* tp_itemsize */
-	/* methods */
-	(destructor)BTreeIter_dealloc,          /* tp_dealloc */
-	0,					/* tp_print */
-	0,					/* tp_getattr */
-	0,					/* tp_setattr */
-	0,					/* tp_compare */
-	0,					/* tp_repr */
-	0,					/* tp_as_number */
-	0,					/* tp_as_sequence */
-	0,					/* tp_as_mapping */
-	0,					/* tp_hash */
-	0,					/* tp_call */
-	0,					/* tp_str */
-	0, /*PyObject_GenericGetAttr,*/		/* tp_getattro */
-	0,					/* tp_setattro */
-	0,					/* tp_as_buffer */
-	Py_TPFLAGS_DEFAULT,			/* tp_flags */
- 	0,					/* tp_doc */
- 	0,					/* tp_traverse */
- 	0,					/* tp_clear */
-	0,					/* tp_richcompare */
-	0,					/* tp_weaklistoffset */
-	(getiterfunc)BTreeIter_getiter,		/* tp_iter */
-	(iternextfunc)BTreeIter_next,	        /* tp_iternext */
-	0,					/* tp_methods */
-	0,					/* tp_members */
-	0,					/* tp_getset */
-	0,					/* tp_base */
-	0,					/* tp_dict */
-	0,					/* tp_descr_get */
-	0,					/* tp_descr_set */
-};
diff --git a/branches/bug1734/src/BTrees/BTreeModuleTemplate.c b/branches/bug1734/src/BTrees/BTreeModuleTemplate.c
deleted file mode 100755
index 8238cec8..00000000
--- a/branches/bug1734/src/BTrees/BTreeModuleTemplate.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#include "Python.h"
-/* include structmember.h for offsetof */
-#include "structmember.h"
-
-#ifdef PERSISTENT
-#include "cPersistence.h"
-#else
-#define PER_USE_OR_RETURN(self, NULL)
-#define PER_ALLOW_DEACTIVATION(self)
-#define PER_PREVENT_DEACTIVATION(self)
-#define PER_DEL(self)
-#define PER_USE(O) 1
-#define PER_ACCESSED(O) 1
-#endif
-
-/* So sue me.  This pair gets used all over the place, so much so that it
- * interferes with understanding non-persistence parts of algorithms.
- * PER_UNUSE can be used after a successul PER_USE or PER_USE_OR_RETURN.
- * It allows the object to become ghostified, and tells the persistence
- * machinery that the object's fields were used recently.
- */
-#define PER_UNUSE(OBJ) do {             \
-    PER_ALLOW_DEACTIVATION(OBJ);        \
-    PER_ACCESSED(OBJ);                  \
-} while (0)
-
-/*
-  The tp_name slots of the various BTree types contain the fully
-  qualified names of the types, e.g. zodb.btrees.OOBTree.OOBTree.
-  The full name is usd to support pickling and because it is not
-  possible to modify the __module__ slot of a type dynamically.  (This
-  may be a bug in Python 2.2).
-*/
-
-#define MODULE_NAME "BTrees._" MOD_NAME_PREFIX "BTree."
-
-static PyObject *sort_str, *reverse_str, *__setstate___str,
-    *_bucket_type_str;
-static PyObject *ConflictError = NULL;
-
-static void PyVar_Assign(PyObject **v, PyObject *e) { Py_XDECREF(*v); *v=e;}
-#define ASSIGN(V,E) PyVar_Assign(&(V),(E))
-#define UNLESS(E) if (!(E))
-#define OBJECT(O) ((PyObject*)(O))
-
-#define MIN_BUCKET_ALLOC 16
-#define MAX_BTREE_SIZE(B) DEFAULT_MAX_BTREE_SIZE
-#define MAX_BUCKET_SIZE(B) DEFAULT_MAX_BUCKET_SIZE
-
-#define SameType_Check(O1, O2) ((O1)->ob_type==(O2)->ob_type)
-
-#define ASSERT(C, S, R) if (! (C)) { \
-  PyErr_SetString(PyExc_AssertionError, (S)); return (R); }
-
-/* Various kinds of BTree and Bucket structs are instances of
- * "sized containers", and have a common initial layout:
- *     The stuff needed for all Python objects, or all Persistent objects.
- *     int size:  The maximum number of things that could be contained
- *                without growing the container.
- *     int len:   The number of things currently contained.
- *
- * Invariant:  0 <= len <= size.
- *
- * A sized container typically goes on to declare one or more pointers
- * to contiguous arrays with 'size' elements each, the initial 'len' of
- * which are currently in use.
- */
-#ifdef PERSISTENT
-#define sizedcontainer_HEAD         \
-    cPersistent_HEAD                \
-    int size;                       \
-    int len;
-#else
-#define sizedcontainer_HEAD         \
-    PyObject_HEAD                   \
-    int size;                       \
-    int len;
-#endif
-
-/* Nothing is actually of type Sized, but (pointers to) BTree nodes and
- * Buckets can be cast to Sized* in contexts that only need to examine
- * the members common to all sized containers.
- */
-typedef struct Sized_s {
-    sizedcontainer_HEAD
-} Sized;
-
-#define SIZED(O) ((Sized*)(O))
-
-/* A Bucket wraps contiguous vectors of keys and values.  Keys are unique,
- * and stored in sorted order.  The 'values' pointer may be NULL if the
- * Bucket is used to implement a set.  Buckets serving as leafs of BTrees
- * are chained together via 'next', so that the entire BTree contents
- * can be traversed in sorted order quickly and easily.
- */
-typedef struct Bucket_s {
-  sizedcontainer_HEAD
-  struct Bucket_s *next;    /* the bucket with the next-larger keys */
-  KEY_TYPE *keys;           /* 'len' keys, in increasing order */
-  VALUE_TYPE *values;       /* 'len' corresponding values; NULL if a set */
-} Bucket;
-
-#define BUCKET(O) ((Bucket*)(O))
-
-/* A BTree is complicated.  See Maintainer.txt.
- */
-
-typedef struct BTreeItem_s {
-  KEY_TYPE key;
-  Sized *child; /* points to another BTree, or to a Bucket of some sort */
-} BTreeItem;
-
-typedef struct BTree_s {
-  sizedcontainer_HEAD
-
-  /* firstbucket points to the bucket containing the smallest key in
-   * the BTree.  This is found by traversing leftmost child pointers
-   * (data[0].child) until reaching a Bucket.
-   */
-  Bucket *firstbucket;
-
-  /* The BTree points to 'len' children, via the "child" fields of the data
-   * array.  There are len-1 keys in the 'key' fields, stored in increasing
-   * order.  data[0].key is unused.  For i in 0 .. len-1, all keys reachable
-   * from data[i].child are >= data[i].key and < data[i+1].key, at the
-   * endpoints pretending that data[0].key is minus infinity and
-   * data[len].key is positive infinity.
-   */
-  BTreeItem *data;
-} BTree;
-
-static PyTypeObject BTreeType;
-static PyTypeObject BucketType;
-
-#define BTREE(O) ((BTree*)(O))
-
-/* Use BTREE_SEARCH to find which child pointer to follow.
- * RESULT   An int lvalue to hold the index i such that SELF->data[i].child
- *          is the correct node to search next.
- * SELF     A pointer to a BTree node.
- * KEY      The key you're looking for, of type KEY_TYPE.
- * ONERROR  What to do if key comparison raises an exception; for example,
- *          perhaps 'return NULL'.
- *
- * See Maintainer.txt for discussion:  this is optimized in subtle ways.
- * It's recommended that you call this at the start of a routine, waiting
- * to check for self->len == 0 after.
- */
-#define BTREE_SEARCH(RESULT, SELF, KEY, ONERROR) {          \
-    int _lo = 0;                                            \
-    int _hi = (SELF)->len;                                  \
-    int _i, _cmp;                                           \
-    for (_i = _hi >> 1; _i > _lo; _i = (_lo + _hi) >> 1) {  \
-        TEST_KEY_SET_OR(_cmp, (SELF)->data[_i].key, (KEY))  \
-            ONERROR;                                        \
-        if      (_cmp < 0) _lo = _i;                        \
-        else if (_cmp > 0) _hi = _i;                        \
-        else   /* equal */ break;                           \
-    }                                                       \
-    (RESULT) = _i;                                          \
-}
-
-/* SetIteration structs are used in the internal set iteration protocol.
- * When you want to iterate over a set or bucket or BTree (even an
- * individual key!),
- * 1. Declare a new iterator:
- *        SetIteration si = {0,0,0};
- *    Using "{0,0,0}" or "{0,0}" appear most common.  Only one {0} is
- *    necssary.  At least one must be given so that finiSetIteration() works
- *    correctly even if you don't get around to calling initSetIteration().
- * 2. Initialize it via
- *        initSetIteration(&si, PyObject *s, useValues)
- *    It's an error if that returns an int < 0.  In case of error on the
- *    init call, calling finiSetIteration(&si) is optional.  But if the
- *    init call succeeds, you must eventually call finiSetIteration(),
- *    and whether or not subsequent calls to si.next() fail.
- * 3. Get the first element:
- *        if (si.next(&si) < 0) { there was an error }
- *    If the set isn't empty, this sets si.position to an int >= 0,
- *    si.key to the element's key (of type KEY_TYPE), and maybe si.value to
- *    the element's value (of type VALUE_TYPE).  si.value is defined
- *    iff si.usesValue is true.
- * 4. Process all the elements:
- *        while (si.position >= 0) {
- *            do something with si.key and/or si.value;
- *            if (si.next(&si) < 0) { there was an error; }
- *        }
- * 5. Finalize the SetIterator:
- *        finiSetIteration(&si);
- *    This is mandatory!  si may contain references to iterator objects,
- *    keys and values, and they must be cleaned up else they'll leak.  If
- *    this were C++ we'd hide that in the destructor, but in C you have to
- *    do it by hand.
- */
-typedef struct SetIteration_s
-{
-  PyObject *set;    /* the set, bucket, BTree, ..., being iterated */
-  int position;     /* initialized to 0; set to -1 by next() when done */
-  int usesValue;    /* true iff 'set' has values & we iterate them */
-  KEY_TYPE key;     /* next() sets to next key */
-  VALUE_TYPE value; /* next() may set to next value */
-  int (*next)(struct SetIteration_s*);  /* function to get next key+value */
-} SetIteration;
-
-/* Finish the set iteration protocol.  This MUST be called by everyone
- * who starts a set iteration, unless the initial call to initSetIteration
- * failed; in that case, and only that case, calling finiSetIteration is
- * optional.
- */
-static void
-finiSetIteration(SetIteration *i)
-{
-    assert(i != NULL);
-    if (i->set == NULL)
-        return;
-    Py_DECREF(i->set);
-    i->set = NULL;      /* so it doesn't hurt to call this again */
-
-    if (i->position > 0) {
-        /* next() was called at least once, but didn't finish iterating
-         * (else position would be negative).  So the cached key and
-         * value need to be cleaned up.
-         */
-        DECREF_KEY(i->key);
-        if (i->usesValue) {
-            DECREF_VALUE(i->value);
-        }
-    }
-    i->position = -1;   /* stop any stray next calls from doing harm */
-}
-
-static PyObject *
-IndexError(int i)
-{
-    PyObject *v;
-
-    v = PyInt_FromLong(i);
-    if (!v) {
-	v = Py_None;
-	Py_INCREF(v);
-    }
-    PyErr_SetObject(PyExc_IndexError, v);
-    Py_DECREF(v);
-    return NULL;
-}
-
-/* Search for the bucket immediately preceding *current, in the bucket chain
- * starting at first.  current, *current and first must not be NULL.
- *
- * Return:
- *     1    *current holds the correct bucket; this is a borrowed reference
- *     0    no such bucket exists; *current unaltered
- *    -1    error; *current unaltered
- */
-static int
-PreviousBucket(Bucket **current, Bucket *first)
-{
-    Bucket *trailing = NULL;    /* first travels; trailing follows it */
-    int result = 0;
-
-    assert(current && *current && first);
-    if (first == *current)
-        return 0;
-
-    do {
-        trailing = first;
-	PER_USE_OR_RETURN(first, -1);
-        first = first->next;
-	PER_UNUSE(trailing);
-
-	if (first == *current) {
-	    *current = trailing;
-	    result = 1;
-	    break;
-	}
-    } while (first);
-
-    return result;
-}
-
-static void *
-BTree_Malloc(size_t sz)
-{
-    void *r;
-
-    ASSERT(sz > 0, "non-positive size malloc", NULL);
-
-    r = malloc(sz);
-    if (r)
-	return r;
-
-    PyErr_NoMemory();
-    return NULL;
-}
-
-static void *
-BTree_Realloc(void *p, size_t sz)
-{
-    void *r;
-
-    ASSERT(sz > 0, "non-positive size realloc", NULL);
-
-    if (p)
-	r = realloc(p, sz);
-    else
-	r = malloc(sz);
-
-    UNLESS (r)
-	PyErr_NoMemory();
-
-    return r;
-}
-
-/* Shared keyword-argument list for BTree/Bucket
- * (iter)?(keys|values|items)
- */
-static char *search_keywords[] = {"min", "max",
-				  "excludemin", "excludemax",
-				  0};
-
-#include "BTreeItemsTemplate.c"
-#include "BucketTemplate.c"
-#include "SetTemplate.c"
-#include "BTreeTemplate.c"
-#include "TreeSetTemplate.c"
-#include "SetOpTemplate.c"
-#include "MergeTemplate.c"
-
-static struct PyMethodDef module_methods[] = {
-  {"difference", (PyCFunction) difference_m,	METH_VARARGS,
-   "difference(o1, o2) -- "
-   "compute the difference between o1 and o2"
-  },
-  {"union", (PyCFunction) union_m,	METH_VARARGS,
-   "union(o1, o2) -- compute the union of o1 and o2\n"
-  },
-  {"intersection", (PyCFunction) intersection_m,	METH_VARARGS,
-   "intersection(o1, o2) -- "
-   "compute the intersection of o1 and o2"
-  },
-#ifdef MERGE
-  {"weightedUnion", (PyCFunction) wunion_m,	METH_VARARGS,
-   "weightedUnion(o1, o2 [, w1, w2]) -- compute the union of o1 and o2\n"
-   "\nw1 and w2 are weights."
-  },
-  {"weightedIntersection", (PyCFunction) wintersection_m,	METH_VARARGS,
-   "weightedIntersection(o1, o2 [, w1, w2]) -- "
-   "compute the intersection of o1 and o2\n"
-   "\nw1 and w2 are weights."
-  },
-#endif
-#ifdef MULTI_INT_UNION
-  {"multiunion", (PyCFunction) multiunion_m, METH_VARARGS,
-   "multiunion(seq) -- compute union of a sequence of integer sets.\n"
-   "\n"
-   "Each element of seq must be an integer set, or convertible to one\n"
-   "via the set iteration protocol.  The union returned is an IISet."
-  },
-#endif
-  {NULL,		NULL}		/* sentinel */
-};
-
-static char BTree_module_documentation[] =
-"\n"
-MASTER_ID
-BTREEITEMSTEMPLATE_C
-"$Id$\n"
-BTREETEMPLATE_C
-BUCKETTEMPLATE_C
-KEYMACROS_H
-MERGETEMPLATE_C
-SETOPTEMPLATE_C
-SETTEMPLATE_C
-TREESETTEMPLATE_C
-VALUEMACROS_H
-BTREEITEMSTEMPLATE_C
-;
-
-int
-init_persist_type(PyTypeObject *type)
-{
-    type->ob_type = &PyType_Type;
-    type->tp_base = cPersistenceCAPI->pertype;
-
-    if (PyType_Ready(type) < 0)
-	return 0;
-
-    return 1;
-}
-
-void
-INITMODULE (void)
-{
-    PyObject *m, *d, *c;
-
-    sort_str = PyString_InternFromString("sort");
-    if (!sort_str)
-	return;
-    reverse_str = PyString_InternFromString("reverse");
-    if (!reverse_str)
-	return;
-    __setstate___str = PyString_InternFromString("__setstate__");
-    if (!__setstate___str)
-	return;
-    _bucket_type_str = PyString_InternFromString("_bucket_type");
-    if (!_bucket_type_str)
-	return;
-
-    /* Grab the ConflictError class */
-    m = PyImport_ImportModule("ZODB.POSException");
-    if (m != NULL) {
-  	c = PyObject_GetAttrString(m, "BTreesConflictError");
-  	if (c != NULL)
-	    ConflictError = c;
-	Py_DECREF(m);
-    }
-
-    if (ConflictError == NULL) {
-  	Py_INCREF(PyExc_ValueError);
-	ConflictError=PyExc_ValueError;
-    }
-
-    /* Initialize the PyPersist_C_API and the type objects. */
-    cPersistenceCAPI = PyCObject_Import("persistent.cPersistence", "CAPI");
-    if (cPersistenceCAPI == NULL)
-	return;
-
-    BTreeItemsType.ob_type = &PyType_Type;
-    BTreeIter_Type.ob_type = &PyType_Type;
-    BTreeIter_Type.tp_getattro = PyObject_GenericGetAttr;
-    BucketType.tp_new = PyType_GenericNew;
-    SetType.tp_new = PyType_GenericNew;
-    BTreeType.tp_new = PyType_GenericNew;
-    TreeSetType.tp_new = PyType_GenericNew;
-    if (!init_persist_type(&BucketType))
-	return;
-    if (!init_persist_type(&BTreeType))
-	return;
-    if (!init_persist_type(&SetType))
-	return;
-    if (!init_persist_type(&TreeSetType))
-	return;
-
-    if (PyDict_SetItem(BTreeType.tp_dict, _bucket_type_str,
-		       (PyObject *)&BucketType) < 0) {
-	fprintf(stderr, "btree failed\n");
-	return;
-    }
-    if (PyDict_SetItem(TreeSetType.tp_dict, _bucket_type_str,
-		       (PyObject *)&SetType) < 0) {
-	fprintf(stderr, "bucket failed\n");
-	return;
-    }
-
-    /* Create the module and add the functions */
-    m = Py_InitModule4("_" MOD_NAME_PREFIX "BTree",
-		       module_methods, BTree_module_documentation,
-		       (PyObject *)NULL, PYTHON_API_VERSION);
-
-    /* Add some symbolic constants to the module */
-    d = PyModule_GetDict(m);
-    if (PyDict_SetItemString(d, MOD_NAME_PREFIX "Bucket",
-			     (PyObject *)&BucketType) < 0)
-	return;
-    if (PyDict_SetItemString(d, MOD_NAME_PREFIX "BTree",
-			     (PyObject *)&BTreeType) < 0)
-	return;
-    if (PyDict_SetItemString(d, MOD_NAME_PREFIX "Set",
-			     (PyObject *)&SetType) < 0)
-	return;
-    if (PyDict_SetItemString(d, MOD_NAME_PREFIX "TreeSet",
-			     (PyObject *)&TreeSetType) < 0)
-	return;
-    if (PyDict_SetItemString(d, MOD_NAME_PREFIX "TreeIterator",
-			     (PyObject *)&BTreeIter_Type) < 0)
-	return;
-}
diff --git a/branches/bug1734/src/BTrees/BTreeTemplate.c b/branches/bug1734/src/BTrees/BTreeTemplate.c
deleted file mode 100755
index 6cf50cdc..00000000
--- a/branches/bug1734/src/BTrees/BTreeTemplate.c
+++ /dev/null
@@ -1,2078 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define BTREETEMPLATE_C "$Id$\n"
-
-/* Sanity-check a BTree.  This is a private helper for BTree_check.  Return:
- *      -1         Error.  If it's an internal inconsistency in the BTree,
- *                 AssertionError is set.
- *       0         No problem found.
- *
- * nextbucket is the bucket "one beyond the end" of the BTree; the last bucket
- * directly reachable from following right child pointers *should* be linked
- * to nextbucket (and this is checked).
- */
-static int
-BTree_check_inner(BTree *self, Bucket *nextbucket)
-{
-    int i;
-    Bucket *bucketafter;
-    Sized *child;
-    char *errormsg = "internal error";  /* someone should have overriden */
-    Sized *activated_child = NULL;
-    int result = -1;    /* until proved innocent */
-
-#define CHECK(CONDITION, ERRORMSG)          \
-    if (!(CONDITION)) {                     \
-        errormsg = (ERRORMSG);              \
-        goto Error;                         \
-    }
-
-    PER_USE_OR_RETURN(self, -1);
-    CHECK(self->len >= 0, "BTree len < 0");
-    CHECK(self->len <= self->size, "BTree len > size");
-    if (self->len == 0) {
-        /* Empty BTree. */
-        CHECK(self->firstbucket == NULL,
-              "Empty BTree has non-NULL firstbucket");
-        result = 0;
-        goto Done;
-    }
-    /* Non-empty BTree. */
-    CHECK(self->firstbucket != NULL, "Non-empty BTree has NULL firstbucket");
-
-    /* Obscure:  The first bucket is pointed to at least by self->firstbucket
-     * and data[0].child of whichever BTree node it's a child of.  However,
-     * if persistence is enabled then the latter BTree node may be a ghost
-     * at this point, and so its pointers "don't count":  we can only rely
-     * on self's pointers being intact.
-     */
-#ifdef PERSISTENT
-    CHECK(self->firstbucket->ob_refcnt >= 1,
-          "Non-empty BTree firstbucket has refcount < 1");
-#else
-    CHECK(self->firstbucket->ob_refcnt >= 2,
-          "Non-empty BTree firstbucket has refcount < 2");
-#endif
-
-    for (i = 0; i < self->len; ++i) {
-        CHECK(self->data[i].child != NULL, "BTree has NULL child");
-    }
-
-    if (SameType_Check(self, self->data[0].child)) {
-        /* Our children are also BTrees. */
-        child = self->data[0].child;
-        UNLESS (PER_USE(child)) goto Done;
-        activated_child = child;
-        CHECK(self->firstbucket == BTREE(child)->firstbucket,
-               "BTree has firstbucket different than "
-               "its first child's firstbucket");
-        PER_ALLOW_DEACTIVATION(child);
-        activated_child = NULL;
-        for (i = 0; i < self->len; ++i) {
-            child = self->data[i].child;
-            CHECK(SameType_Check(self, child),
-                  "BTree children have different types");
-            if (i == self->len - 1)
-                bucketafter = nextbucket;
-            else {
-                BTree *child2 = BTREE(self->data[i+1].child);
-                UNLESS (PER_USE(child2)) goto Done;
-                bucketafter = child2->firstbucket;
-                PER_ALLOW_DEACTIVATION(child2);
-            }
-            if (BTree_check_inner(BTREE(child), bucketafter) < 0) goto Done;
-        }
-    }
-    else {
-        /* Our children are buckets. */
-        CHECK(self->firstbucket == BUCKET(self->data[0].child),
-              "Bottom-level BTree node has inconsistent firstbucket belief");
-        for (i = 0; i < self->len; ++i) {
-            child = self->data[i].child;
-            UNLESS (PER_USE(child)) goto Done;
-            activated_child = child;
-            CHECK(!SameType_Check(self, child),
-                  "BTree children have different types");
-            CHECK(child->len >= 1, "Bucket length < 1"); /* no empty buckets! */
-            CHECK(child->len <= child->size, "Bucket len > size");
-#ifdef PERSISTENT
-            CHECK(child->ob_refcnt >= 1, "Bucket has refcount < 1");
-#else
-            CHECK(child->ob_refcnt >= 2, "Bucket has refcount < 2");
-#endif
-            if (i == self->len - 1)
-                bucketafter = nextbucket;
-            else
-                bucketafter = BUCKET(self->data[i+1].child);
-            CHECK(BUCKET(child)->next == bucketafter,
-                  "Bucket next pointer is damaged");
-            PER_ALLOW_DEACTIVATION(child);
-            activated_child = NULL;
-        }
-    }
-    result = 0;
-    goto Done;
-
-Error:
-    PyErr_SetString(PyExc_AssertionError, errormsg);
-    result = -1;
-Done:
-    /* No point updating access time -- this isn't a "real" use. */
-    PER_ALLOW_DEACTIVATION(self);
-    if (activated_child) {
-        PER_ALLOW_DEACTIVATION(activated_child);
-    }
-    return result;
-
-#undef CHECK
-}
-
-/* Sanity-check a BTree.  This is the ._check() method.  Return:
- *      NULL       Error.  If it's an internal inconsistency in the BTree,
- *                 AssertionError is set.
- *      Py_None    No problem found.
- */
-static PyObject*
-BTree_check(BTree *self)
-{
-    PyObject *result = NULL;
-    int i = BTree_check_inner(self, NULL);
-
-    if (i >= 0) {
-        result = Py_None;
-        Py_INCREF(result);
-    }
-    return result;
-}
-
-/*
-** _BTree_get
-**
-** Search a BTree.
-**
-** Arguments
-**      self        a pointer to a BTree
-**      keyarg      the key to search for, as a Python object
-**      has_key     true/false; when false, try to return the associated
-**                  value; when true, return a boolean
-** Return
-**      When has_key false:
-**          If key exists, its associated value.
-**          If key doesn't exist, NULL and KeyError is set.
-**      When has_key true:
-**          A Python int is returned in any case.
-**          If key exists, the depth of the bucket in which it was found.
-**          If key doesn't exist, 0.
-*/
-static PyObject *
-_BTree_get(BTree *self, PyObject *keyarg, int has_key)
-{
-    KEY_TYPE key;
-    PyObject *result = NULL;    /* guilty until proved innocent */
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    UNLESS (copied) return NULL;
-
-    PER_USE_OR_RETURN(self, NULL);
-    if (self->len == 0) {
-        /* empty BTree */
-        if (has_key)
-            result = PyInt_FromLong(0);
-        else
-            PyErr_SetObject(PyExc_KeyError, keyarg);
-    }
-    else {
-        for (;;) {
-            int i;
-            Sized *child;
-
-            BTREE_SEARCH(i, self, key, goto Done);
-            child = self->data[i].child;
-            has_key += has_key != 0;    /* bump depth counter, maybe */
-            if (SameType_Check(self, child)) {
-                PER_UNUSE(self);
-                self = BTREE(child);
-                PER_USE_OR_RETURN(self, NULL);
-            }
-            else {
-                result = _bucket_get(BUCKET(child), keyarg, has_key);
-                break;
-            }
-        }
-    }
-
-Done:
-    PER_UNUSE(self);
-    return result;
-}
-
-static PyObject *
-BTree_get(BTree *self, PyObject *key)
-{
-  return _BTree_get(self, key, 0);
-}
-
-/* Create a new bucket for the BTree or TreeSet using the class attribute
-   _bucket_type, which is normally initialized to BucketType or SetType
-   as appropriate.
-*/
-static Sized *
-BTree_newBucket(BTree *self)
-{
-    PyObject *factory;
-    Sized *result;
-
-    /* _bucket_type_str defined in BTreeModuleTemplate.c */
-    factory = PyObject_GetAttr((PyObject *)self->ob_type, _bucket_type_str);
-    if (factory == NULL)
-	return NULL;
-    /* TODO: Should we check that the factory actually returns something
-       of the appropriate type? How?  The C code here is going to
-       depend on any custom bucket type having the same layout at the
-       C level.
-    */
-    result = SIZED(PyObject_CallObject(factory, NULL));
-    Py_DECREF(factory);
-    return result;
-}
-
-/*
- * Move data from the current BTree, from index onward, to the newly created
- * BTree 'next'.  self and next must both be activated.  If index is OOB (< 0
- * or >= self->len), use self->len / 2 as the index (i.e., split at the
- * midpoint).  self must have at least 2 children on entry, and index must
- * be such that self and next each have at least one child at exit.  self's
- * accessed time is updated.
- *
- * Return:
- *    -1    error
- *     0    OK
- */
-static int
-BTree_split(BTree *self, int index, BTree *next)
-{
-    int next_size;
-    Sized *child;
-
-    if (index < 0 || index >= self->len)
-	index = self->len / 2;
-
-    next_size = self->len - index;
-    ASSERT(index > 0, "split creates empty tree", -1);
-    ASSERT(next_size > 0, "split creates empty tree", -1);
-
-    next->data = BTree_Malloc(sizeof(BTreeItem) * next_size);
-    if (!next->data)
-	return -1;
-    memcpy(next->data, self->data + index, sizeof(BTreeItem) * next_size);
-    next->size = next_size;  /* but don't set len until we succeed */
-
-    /* Set next's firstbucket.  self->firstbucket is still correct. */
-    child = next->data[0].child;
-    if (SameType_Check(self, child)) {
-        PER_USE_OR_RETURN(child, -1);
-	next->firstbucket = BTREE(child)->firstbucket;
-	PER_UNUSE(child);
-    }
-    else
-	next->firstbucket = BUCKET(child);
-    Py_INCREF(next->firstbucket);
-
-    next->len = next_size;
-    self->len = index;
-    return PER_CHANGED(self) >= 0 ? 0 : -1;
-}
-
-
-/* Fwd decl -- BTree_grow and BTree_split_root reference each other. */
-static int BTree_grow(BTree *self, int index, int noval);
-
-/* Split the root.  This is a little special because the root isn't a child
- * of anything else, and the root needs to retain its object identity.  So
- * this routine moves the root's data into a new child, and splits the
- * latter.  This leaves the root with two children.
- *
- * Return:
- *      0   OK
- *     -1   error
- *
- * CAUTION:  The caller must call PER_CHANGED on self.
- */
-static int
-BTree_split_root(BTree *self, int noval)
-{
-    BTree *child;
-    BTreeItem *d;
-
-    /* Create a child BTree, and a new data vector for self. */
-    child = BTREE(PyObject_CallObject(OBJECT(self->ob_type), NULL));
-    if (!child) return -1;
-
-    d = BTree_Malloc(sizeof(BTreeItem) * 2);
-    if (!d) {
-        Py_DECREF(child);
-        return -1;
-    }
-
-    /* Move our data to new BTree. */
-    child->size = self->size;
-    child->len = self->len;
-    child->data = self->data;
-    child->firstbucket = self->firstbucket;
-    Py_INCREF(child->firstbucket);
-
-    /* Point self to child and split the child. */
-    self->data = d;
-    self->len = 1;
-    self->size = 2;
-    self->data[0].child = SIZED(child); /* transfers reference ownership */
-    return BTree_grow(self, 0, noval);
-}
-
-/*
-** BTree_grow
-**
-** Grow a BTree
-**
-** Arguments:	self	The BTree
-**		index	self->data[index].child needs to be split.  index
-**                      must be 0 if self is empty (len == 0), and a new
-**                      empty bucket is created then.
-**              noval   Boolean; is this a set (true) or mapping (false)?
-**
-** Returns:	 0	on success
-**		-1	on failure
-**
-** CAUTION:  If self is empty on entry, this routine adds an empty bucket.
-** That isn't a legitimate BTree; if the caller doesn't put something in
-** in the bucket (say, because of a later error), the BTree must be cleared
-** to get rid of the empty bucket.
-*/
-static int
-BTree_grow(BTree *self, int index, int noval)
-{
-  int i;
-  Sized *v, *e = 0;
-  BTreeItem *d;
-
-  if (self->len == self->size) {
-      if (self->size) {
-          d = BTree_Realloc(self->data, sizeof(BTreeItem) * self->size * 2);
-	  if (d == NULL)
-	      return -1;
-          self->data = d;
-          self->size *= 2;
-      }
-      else {
-          d = BTree_Malloc(sizeof(BTreeItem) * 2);
-	  if (d == NULL)
-	      return -1;
-          self->data = d;
-          self->size = 2;
-      }
-  }
-
-  if (self->len) {
-      d = self->data + index;
-      v = d->child;
-      /* Create a new object of the same type as the target value */
-      e = (Sized *)PyObject_CallObject((PyObject *)v->ob_type, NULL);
-      if (e == NULL)
-	  return -1;
-
-      UNLESS(PER_USE(v)) {
-          Py_DECREF(e);
-          return -1;
-      }
-
-      /* Now split between the original (v) and the new (e) at the midpoint*/
-      if (SameType_Check(self, v))
-          i = BTree_split((BTree *)v, -1, (BTree *)e);
-      else
-          i = bucket_split((Bucket *)v, -1, (Bucket *)e);
-      PER_ALLOW_DEACTIVATION(v);
-
-      if (i < 0) {
-          Py_DECREF(e);
-	  assert(PyErr_Occurred());
-          return -1;
-      }
-
-      index++;
-      d++;
-      if (self->len > index)	/* Shift up the old values one array slot */
-	  memmove(d+1, d, sizeof(BTreeItem)*(self->len-index));
-
-      if (SameType_Check(self, v)) {
-          COPY_KEY(d->key, BTREE(e)->data->key);
-
-          /* We take the unused reference from e, so there's no
-             reason to INCREF!
-          */
-          /* INCREF_KEY(self->data[1].key); */
-      }
-      else {
-          COPY_KEY(d->key, BUCKET(e)->keys[0]);
-          INCREF_KEY(d->key);
-      }
-      d->child = e;
-      self->len++;
-
-      if (self->len >= MAX_BTREE_SIZE(self) * 2)    /* the root is huge */
-	  return BTree_split_root(self, noval);
-  }
-  else {
-      /* The BTree is empty.  Create an empty bucket.  See CAUTION in
-       * the comments preceding.
-       */
-      assert(index == 0);
-      d = self->data;
-      d->child = BTree_newBucket(self);
-      if (d->child == NULL)
-	  return -1;
-      self->len = 1;
-      Py_INCREF(d->child);
-      self->firstbucket = (Bucket *)d->child;
-  }
-
-  return 0;
-}
-
-/* Return the rightmost bucket reachable from following child pointers
- * from self.  The caller gets a new reference to this bucket.  Note that
- * bucket 'next' pointers are not followed:  if self is an interior node
- * of a BTree, this returns the rightmost bucket in that node's subtree.
- * In case of error, returns NULL.
- *
- * self must not be a ghost; this isn't checked.  The result may be a ghost.
- *
- * Pragmatics:  Note that the rightmost bucket's last key is the largest
- * key in self's subtree.
- */
-static Bucket *
-BTree_lastBucket(BTree *self)
-{
-    Sized *pchild;
-    Bucket *result;
-
-    UNLESS (self->data && self->len) {
-        IndexError(-1); /* is this the best action to take? */
-        return NULL;
-    }
-
-    pchild = self->data[self->len - 1].child;
-    if (SameType_Check(self, pchild)) {
-        self = BTREE(pchild);
-        PER_USE_OR_RETURN(self, NULL);
-        result = BTree_lastBucket(self);
-        PER_UNUSE(self);
-    }
-    else {
-        Py_INCREF(pchild);
-        result = BUCKET(pchild);
-    }
-    return result;
-}
-
-static int
-BTree_deleteNextBucket(BTree *self)
-{
-    Bucket *b;
-
-    UNLESS (PER_USE(self)) return -1;
-
-    b = BTree_lastBucket(self);
-    if (b == NULL)
-	goto err;
-    if (Bucket_deleteNextBucket(b) < 0)
-	goto err;
-
-    Py_DECREF(b);
-    PER_UNUSE(self);
-
-    return 0;
-
- err:
-    Py_XDECREF(b);
-    PER_ALLOW_DEACTIVATION(self);
-    return -1;
-}
-
-/*
-** _BTree_clear
-**
-** Clears out all of the values in the BTree (firstbucket, keys, and children);
-** leaving self an empty BTree.
-**
-** Arguments:	self	The BTree
-**
-** Returns:	 0	on success
-**		-1	on failure
-**
-** Internal:  Deallocation order is important.  The danger is that a long
-** list of buckets may get freed "at once" via decref'ing the first bucket,
-** in which case a chain of consequenct Py_DECREF calls may blow the stack.
-** Luckily, every bucket has a refcount of at least two, one due to being a
-** BTree node's child, and another either because it's not the first bucket in
-** the chain (so the preceding bucket points to it), or because firstbucket
-** points to it.  By clearing in the natural depth-first, left-to-right
-** order, the BTree->bucket child pointers prevent Py_DECREF(bucket->next)
-** calls from freeing bucket->next, and the maximum stack depth is equal
-** to the height of the tree.
-**/
-static int
-_BTree_clear(BTree *self)
-{
-    const int len = self->len;
-
-    if (self->firstbucket) {
-        /* Obscure:  The first bucket is pointed to at least by
-         * self->firstbucket and data[0].child of whichever BTree node it's
-         * a child of.  However, if persistence is enabled then the latter
-         * BTree node may be a ghost at this point, and so its pointers "don't
-         * count":  we can only rely on self's pointers being intact.
-         */
-#ifdef PERSISTENT
-	ASSERT(self->firstbucket->ob_refcnt > 0,
-	       "Invalid firstbucket pointer", -1);
-#else
-	ASSERT(self->firstbucket->ob_refcnt > 1,
-	       "Invalid firstbucket pointer", -1);
-#endif
-	Py_DECREF(self->firstbucket);
-	self->firstbucket = NULL;
-    }
-
-    if (self->data) {
-        int i;
-        if (len > 0) { /* 0 is special because key 0 is trash */
-            Py_DECREF(self->data[0].child);
-	}
-
-        for (i = 1; i < len; i++) {
-#ifdef KEY_TYPE_IS_PYOBJECT
-	    DECREF_KEY(self->data[i].key);
-#endif
-            Py_DECREF(self->data[i].child);
-        }
-	free(self->data);
-	self->data = NULL;
-    }
-
-    self->len = self->size = 0;
-    return 0;
-}
-
-/*
-  Set (value != 0) or delete (value=0) a tree item.
-
-  If unique is non-zero, then only change if the key is
-  new.
-
-  If noval is non-zero, then don't set a value (the tree
-  is a set).
-
-  Return:
-    -1  error
-     0  successful, and number of entries didn't change
-    >0  successful, and number of entries did change
-
-  Internal
-     There are two distinct return values > 0:
-
-     1  Successful, number of entries changed, but firstbucket did not go away.
-
-     2  Successful, number of entries changed, firstbucket did go away.
-        This can only happen on a delete (value == NULL).  The caller may
-        need to change its own firstbucket pointer, and in any case *someone*
-        needs to adjust the 'next' pointer of the bucket immediately preceding
-        the bucket that went away (it needs to point to the bucket immediately
-        following the bucket that went away).
-*/
-static int
-_BTree_set(BTree *self, PyObject *keyarg, PyObject *value,
-           int unique, int noval)
-{
-    int changed = 0;    /* did I mutate? */
-    int min;            /* index of child I searched */
-    BTreeItem *d;       /* self->data[min] */
-    int childlength;    /* len(self->data[min].child) */
-    int status;         /* our return value; and return value from callee */
-    int self_was_empty; /* was self empty at entry? */
-
-    KEY_TYPE key;
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    if (!copied) return -1;
-
-    PER_USE_OR_RETURN(self, -1);
-
-    self_was_empty = self->len == 0;
-    if (self_was_empty) {
-        /* We're empty.  Make room. */
-	if (value) {
-	    if (BTree_grow(self, 0, noval) < 0)
-		goto Error;
-	}
-	else {
-	    /* Can't delete a key from an empty BTree. */
-	    PyErr_SetObject(PyExc_KeyError, keyarg);
-	    goto Error;
-	}
-    }
-
-    /* Find the right child to search, and hand the work off to it. */
-    BTREE_SEARCH(min, self, key, goto Error);
-    d = self->data + min;
-
-    if (SameType_Check(self, d->child))
-	status = _BTree_set(BTREE(d->child), keyarg, value, unique, noval);
-    else {
-        int bucket_changed = 0;
-	status = _bucket_set(BUCKET(d->child), keyarg,
-	                     value, unique, noval, &bucket_changed);
-#ifdef PERSISTENT
-	/* If a BTree contains only a single bucket, BTree.__getstate__()
-	 * includes the bucket's entire state, and the bucket doesn't get
-	 * an oid of its own.  So if we have a single oid-less bucket that
-	 * changed, it's *our* oid that should be marked as changed -- the
-	 * bucket doesn't have one.
-	 */
-	if (bucket_changed
-	    && self->len == 1
-	    && self->data[0].child->oid == NULL)
-	{
-	    changed = 1;
-	}
-#endif
-    }
-    if (status == 0) goto Done;
-    if (status < 0) goto Error;
-    assert(status == 1 || status == 2);
-
-    /* The child changed size.  Get its new size.  Note that since the tree
-     * rooted at the child changed size, so did the tree rooted at self:
-     * our status must be >= 1 too.
-     */
-    UNLESS(PER_USE(d->child)) goto Error;
-    childlength = d->child->len;
-    PER_UNUSE(d->child);
-
-    if (value) {
-        /* A bucket got bigger -- if it's "too big", split it. */
-        int toobig;
-
-        assert(status == 1);    /* can be 2 only on deletes */
-        if (SameType_Check(self, d->child))
-            toobig = childlength > MAX_BTREE_SIZE(d->child);
-        else
-            toobig = childlength > MAX_BUCKET_SIZE(d->child);
-
-        if (toobig) {
-            if (BTree_grow(self, min, noval) < 0) goto Error;
-            changed = 1;        /* BTree_grow mutated self */
-        }
-        goto Done;      /* and status still == 1 */
-    }
-
-    /* A bucket got smaller.  This is much harder, and despite that we
-     * don't try to rebalance the tree.
-     */
-    if (status == 2) {  /*  this is the last reference to child status */
-        /* Two problems to solve:  May have to adjust our own firstbucket,
-         * and the bucket that went away needs to get unlinked.
-         */
-        if (min) {
-            /* This wasn't our firstbucket, so no need to adjust ours (note
-             * that it can't be the firstbucket of any node above us either).
-             * Tell "the tree to the left" to do the unlinking.
-             */
-            if (BTree_deleteNextBucket(BTREE(d[-1].child)) < 0) goto Error;
-            status = 1;     /* we solved the child's firstbucket problem */
-        }
-        else {
-            /* This was our firstbucket.  Update to new firstbucket value. */
-            Bucket *nextbucket;
-            UNLESS(PER_USE(d->child)) goto Error;
-            nextbucket = BTREE(d->child)->firstbucket;
-            PER_UNUSE(d->child);
-
-            Py_XINCREF(nextbucket);
-            Py_DECREF(self->firstbucket);
-            self->firstbucket = nextbucket;
-            changed = 1;
-
-            /* The caller has to do the unlinking -- we can't.  Also, since
-             * it was our firstbucket, it may also be theirs.
-             */
-            assert(status == 2);
-        }
-    }
-
-    /* If the child isn't empty, we're done!  We did all that was possible for
-     * us to do with the firstbucket problems the child gave us, and since the
-     * child isn't empty don't create any new firstbucket problems of our own.
-     */
-    if (childlength) goto Done;
-
-    /* The child became empty:  we need to remove it from self->data.
-     * But first, if we're a bottom-level node, we've got more bucket-fiddling
-     * to set up.
-     */
-    if (!SameType_Check(self, d->child)) {
-        /* We're about to delete a bucket. */
-        if (min) {
-            /* It's not our first bucket, so we can tell the previous
-             * bucket to adjust its reference to it.  It can't be anyone
-             * else's first bucket either, so the caller needn't do anything.
-             */
-            if (Bucket_deleteNextBucket(BUCKET(d[-1].child)) < 0) goto Error;
-            /* status should be 1, and already is:  if it were 2, the
-             * block above would have set it to 1 in its min != 0 branch.
-             */
-            assert(status == 1);
-        }
-        else {
-            Bucket *nextbucket;
-            /* It's our first bucket.  We can't unlink it directly. */
-            /* 'changed' will be set true by the deletion code following. */
-            UNLESS(PER_USE(d->child)) goto Error;
-            nextbucket = BUCKET(d->child)->next;
-            PER_UNUSE(d->child);
-
-            Py_XINCREF(nextbucket);
-            Py_DECREF(self->firstbucket);
-            self->firstbucket = nextbucket;
-
-            status = 2; /* we're giving our caller a new firstbucket problem */
-         }
-    }
-
-    /* Remove the child from self->data. */
-    Py_DECREF(d->child);
-#ifdef KEY_TYPE_IS_PYOBJECT
-    if (min) {
-        DECREF_KEY(d->key);
-    }
-    else if (self->len > 1) {
-	/* We're deleting the first child of a BTree with more than one
-	 * child.  The key at d+1 is about to be shifted into slot 0,
-	 * and hence never to be referenced again (the key in slot 0 is
-	 * trash).
-	 */
-	DECREF_KEY((d+1)->key);
-    }
-    /* Else min==0 and len==1:  we're emptying the BTree entirely, and
-     * there is no key in need of decrefing.
-     */
-#endif
-    --self->len;
-    if (min < self->len)
-        memmove(d, d+1, (self->len - min) * sizeof(BTreeItem));
-    changed = 1;
-
-Done:
-#ifdef PERSISTENT
-    if (changed) {
-        if (PER_CHANGED(self) < 0) goto Error;
-    }
-#endif
-    PER_UNUSE(self);
-    return status;
-
-Error:
-    assert(PyErr_Occurred());
-    if (self_was_empty) {
-        /* BTree_grow may have left the BTree in an invalid state.  Make
-         * sure the tree is a legitimate empty tree.
-         */
-        _BTree_clear(self);
-    }
-    PER_UNUSE(self);
-    return -1;
-}
-
-/*
-** BTree_setitem
-**
-** wrapper for _BTree_set
-**
-** Arguments:	self	The BTree
-**		key	The key to insert
-**		v	The value to insert
-**
-** Returns	-1	on failure
-**		 0	on success
-*/
-static int
-BTree_setitem(BTree *self, PyObject *key, PyObject *v)
-{
-    if (_BTree_set(self, key, v, 0, 0) < 0)
-	return -1;
-    return 0;
-}
-
-#ifdef PERSISTENT
-static PyObject *
-BTree__p_deactivate(BTree *self, PyObject *args, PyObject *keywords)
-{
-    int ghostify = 1;
-    PyObject *force = NULL;
-
-    if (args && PyTuple_GET_SIZE(args) > 0) {
-	PyErr_SetString(PyExc_TypeError,
-			"_p_deactivate takes not positional arguments");
-	return NULL;
-    }
-    if (keywords) {
-	int size = PyDict_Size(keywords);
-	force = PyDict_GetItemString(keywords, "force");
-	if (force)
-	    size--;
-	if (size) {
-	    PyErr_SetString(PyExc_TypeError,
-			    "_p_deactivate only accepts keyword arg force");
-	    return NULL;
-	}
-    }
-
-    if (self->jar && self->oid) {
-	ghostify = self->state == cPersistent_UPTODATE_STATE;
-	if (!ghostify && force) {
-	    if (PyObject_IsTrue(force))
-		ghostify = 1;
-	    if (PyErr_Occurred())
-		return NULL;
-	}
-	if (ghostify) {
-	    if (_BTree_clear(self) < 0)
-		return NULL;
-	    PER_GHOSTIFY(self);
-	}
-    }
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-#endif
-
-static PyObject *
-BTree_clear(BTree *self)
-{
-  UNLESS (PER_USE(self)) return NULL;
-
-  if (self->len)
-    {
-      if (_BTree_clear(self) < 0)
-	  goto err;
-      if (PER_CHANGED(self) < 0)
-	  goto err;
-    }
-
-  PER_UNUSE(self);
-
-  Py_INCREF(Py_None);
-  return Py_None;
-
-err:
-  PER_UNUSE(self);
-  return NULL;
-}
-
-/*
- * Return:
- *
- * For an empty BTree (self->len == 0), None.
- *
- * For a BTree with one child (self->len == 1), and that child is a bucket,
- * and that bucket has a NULL oid, a one-tuple containing a one-tuple
- * containing the bucket's state:
- *
- *     (
- *         (
- *              child[0].__getstate__(),
- *         ),
- *     )
- *
- * Else a two-tuple.  The first element is a tuple interleaving the BTree's
- * keys and direct children, of size 2*self->len - 1 (key[0] is unused and
- * is not saved).  The second element is the firstbucket:
- *
- *     (
- *          (child[0], key[1], child[1], key[2], child[2], ...,
- *                                       key[len-1], child[len-1]),
- *          self->firstbucket
- *     )
- *
- * In the above, key[i] means self->data[i].key, and similarly for child[i].
- */
-static PyObject *
-BTree_getstate(BTree *self)
-{
-    PyObject *r = NULL;
-    PyObject *o;
-    int i, l;
-
-    UNLESS (PER_USE(self)) return NULL;
-
-    if (self->len) {
-	r = PyTuple_New(self->len * 2 - 1);
-	if (r == NULL)
-	    goto err;
-
-	if (self->len == 1
-	    && self->data->child->ob_type != self->ob_type
-#ifdef PERSISTENT
-	    && BUCKET(self->data->child)->oid == NULL
-#endif
-	    ) {
-	    /* We have just one bucket. Save its data directly. */
-	    o = bucket_getstate((Bucket *)self->data->child);
-	    if (o == NULL)
-		goto err;
-	    PyTuple_SET_ITEM(r, 0, o);
-	    ASSIGN(r, Py_BuildValue("(O)", r));
-        }
-        else {
-	    for (i=0, l=0; i < self->len; i++) {
-		if (i) {
-		    COPY_KEY_TO_OBJECT(o, self->data[i].key);
-		    PyTuple_SET_ITEM(r, l, o);
-		    l++;
-                }
-		o = (PyObject *)self->data[i].child;
-		Py_INCREF(o);
-		PyTuple_SET_ITEM(r,l,o);
-		l++;
-            }
-	    ASSIGN(r, Py_BuildValue("OO", r, self->firstbucket));
-        }
-
-    }
-    else {
-	r = Py_None;
-	Py_INCREF(r);
-    }
-
-    PER_UNUSE(self);
-
-    return r;
-
- err:
-    PER_UNUSE(self);
-    Py_XDECREF(r);
-    return NULL;
-}
-
-static int
-_BTree_setstate(BTree *self, PyObject *state, int noval)
-{
-    PyObject *items, *firstbucket = NULL;
-    BTreeItem *d;
-    int len, l, i, copied=1;
-
-    if (_BTree_clear(self) < 0)
-	return -1;
-
-    /* The state of a BTree can be one of the following:
-       None -- an empty BTree
-       A one-tuple -- a single bucket btree
-       A two-tuple -- a BTree with more than one bucket
-       See comments for BTree_getstate() for the details.
-    */
-
-    if (state == Py_None)
-	return 0;
-
-    if (!PyArg_ParseTuple(state, "O|O:__setstate__", &items, &firstbucket))
-	return -1;
-
-    len = PyTuple_Size(items);
-    if (len < 0)
-	return -1;
-    len = (len + 1) / 2;
-
-    assert(len > 0); /* If the BTree is empty, it's state is None. */
-    assert(self->size == 0); /* We called _BTree_clear(). */
-
-    self->data = BTree_Malloc(sizeof(BTreeItem) * len);
-    if (self->data == NULL)
-	return -1;
-    self->size = len;
-
-    for (i = 0, d = self->data, l = 0; i < len; i++, d++) {
-	PyObject *v;
-	if (i) { /* skip the first key slot */
-	    COPY_KEY_FROM_ARG(d->key, PyTuple_GET_ITEM(items, l), copied);
-	    l++;
-	    if (!copied)
-		return -1;
-	    INCREF_KEY(d->key);
-	}
-	v = PyTuple_GET_ITEM(items, l);
-	if (PyTuple_Check(v)) {
-	    /* Handle the special case in __getstate__() for a BTree
-	       with a single bucket. */
-	    d->child = BTree_newBucket(self);
-	    if (!d->child)
-		return -1;
-	    if (noval) {
-		if (_set_setstate(BUCKET(d->child), v) < 0)
-		    return -1;
-	    }
-	    else {
-		if (_bucket_setstate(BUCKET(d->child), v) < 0)
-		    return -1;
-	    }
-	}
-	else {
-	    d->child = (Sized *)v;
-	    Py_INCREF(v);
-	}
-	l++;
-    }
-
-    if (!firstbucket)
-	firstbucket = (PyObject *)self->data->child;
-
-    if (!PyObject_IsInstance(firstbucket, (PyObject *)
-			     (noval ? &SetType : &BucketType))) {
-	PyErr_SetString(PyExc_TypeError,
-			"No firstbucket in non-empty BTree");
-	return -1;
-    }
-    self->firstbucket = BUCKET(firstbucket);
-    Py_INCREF(firstbucket);
-#ifndef PERSISTENT
-    /* firstbucket is also the child of some BTree node, but that node may
-     * be a ghost if persistence is enabled.
-     */
-    assert(self->firstbucket->ob_refcnt > 1);
-#endif
-    self->len = len;
-
-    return 0;
-}
-
-static PyObject *
-BTree_setstate(BTree *self, PyObject *arg)
-{
-    int r;
-
-    PER_PREVENT_DEACTIVATION(self);
-    r = _BTree_setstate(self, arg, 0);
-    PER_UNUSE(self);
-
-    if (r < 0)
-	return NULL;
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-#ifdef PERSISTENT
-
-/* Recognize the special cases of a BTree that's empty or contains a single
- * bucket.  In the former case, return a borrowed reference to Py_None.
- * In this single-bucket case, the bucket state is embedded directly in the
- * BTree state, like so:
- *
- *     (
- *         (
- *              thebucket.__getstate__(),
- *         ),
- *     )
- *
- * When this obtains, return a borrowed reference to thebucket.__getstate__().
- * Else return NULL with an exception set.  The exception should always be
- * ConflictError then, but may be TypeError if the state makes no sense at all
- * for a BTree (corrupted or hostile state).
- */
-PyObject *
-get_bucket_state(PyObject *t)
-{
-	if (t == Py_None)
-		return Py_None;		/* an empty BTree */
-	if (! PyTuple_Check(t)) {
-		PyErr_SetString(PyExc_TypeError,
-			"_p_resolveConflict: expected tuple or None for state");
-		return NULL;
-	}
-
-	if (PyTuple_GET_SIZE(t) == 2) {
-		/* A non-degenerate BTree. */
-		return merge_error(-1, -1, -1, 11);
-	}
-
-	/* We're in the one-bucket case. */
-
-	if (PyTuple_GET_SIZE(t) != 1) {
-		PyErr_SetString(PyExc_TypeError,
-			"_p_resolveConflict: expected 1- or 2-tuple for state");
-		return NULL;
-	}
-
-	t = PyTuple_GET_ITEM(t, 0);
-	if (! PyTuple_Check(t) || PyTuple_GET_SIZE(t) != 1) {
-		PyErr_SetString(PyExc_TypeError,
-			"_p_resolveConflict: expected 1-tuple containing "
-			"bucket state");
-		return NULL;
-	}
-
-	t = PyTuple_GET_ITEM(t, 0);
-	if (! PyTuple_Check(t)) {
-		PyErr_SetString(PyExc_TypeError,
-			"_p_resolveConflict: expected tuple for bucket state");
-		return NULL;
-	}
-
-	return t;
-}
-
-/* Tricky.  The only kind of BTree conflict we can actually potentially
- * resolve is the special case of a BTree containing a single bucket,
- * in which case this becomes a fancy way of calling the bucket conflict
- * resolution code.
- */
-static PyObject *
-BTree__p_resolveConflict(BTree *self, PyObject *args)
-{
-    PyObject *s[3];
-    PyObject *x, *y, *z;
-
-    if (!PyArg_ParseTuple(args, "OOO", &x, &y, &z))
-	return NULL;
-
-    s[0] = get_bucket_state(x);
-    if (s[0] == NULL)
-	return NULL;
-    s[1] = get_bucket_state(y);
-    if (s[1] == NULL)
-	return NULL;
-    s[2] = get_bucket_state(z);
-    if (s[2] == NULL)
-	return NULL;
-
-    if (PyObject_IsInstance((PyObject *)self, (PyObject *)&BTreeType))
-	x = _bucket__p_resolveConflict(OBJECT(&BucketType), s);
-    else
-	x = _bucket__p_resolveConflict(OBJECT(&SetType), s);
-
-    if (x == NULL)
-	return NULL;
-
-    return Py_BuildValue("((N))", x);
-}
-#endif
-
-/*
- BTree_findRangeEnd -- Find one end, expressed as a bucket and
- position, for a range search.
-
- If low, return bucket and index of the smallest item >= key,
- otherwise return bucket and index of the largest item <= key.
-
- If exclude_equal, exact matches aren't acceptable; if one is found,
- move right if low, or left if !low (this is for range searches exclusive
- of an endpoint).
-
- Return:
-    -1      Error; offset and bucket unchanged
-     0      Not found; offset and bucket unchanged
-     1      Correct bucket and offset stored; the caller owns a new reference
-            to the bucket.
-
- Internal:
-    We do binary searches in BTree nodes downward, at each step following
-    C(i) where K(i) <= key < K(i+1).  As always, K(i) <= C(i) < K(i+1) too.
-    (See Maintainer.txt for the meaning of that notation.)  That eventually
-    leads to a bucket where we do Bucket_findRangeEnd.  That usually works,
-    but there are two cases where it can fail to find the correct answer:
-
-    1. On a low search, we find a bucket with keys >= K(i), but that doesn't
-       imply there are keys in the bucket >= key.  For example, suppose
-       a bucket has keys in 1..100, its successor's keys are in 200..300, and
-       we're doing a low search on 150.  We'll end up in the first bucket,
-       but there are no keys >= 150 in it.  K(i+1) > key, though, and all
-       the keys in C(i+1) >= K(i+1) > key, so the first key in the next
-       bucket (if any) is the correct result.  This is easy to find by
-       following the bucket 'next' pointer.
-
-    2. On a high search, again that the keys in the bucket are >= K(i)
-       doesn't imply that any key in the bucket is <= key, but it's harder
-       for this to fail (and an earlier version of this routine didn't
-       catch it):  if K(i) itself is in the bucket, it works (then
-       K(i) <= key is *a* key in the bucket that's in the desired range).
-       But when keys get deleted from buckets, they aren't also deleted from
-       BTree nodes, so there's no guarantee that K(i) is in the bucket.
-       For example, delete the smallest key S from some bucket, and S
-       remains in the interior BTree nodes.  Do a high search for S, and
-       the BTree nodes direct the search to the bucket S used to be in,
-       but all keys remaining in that bucket are > S.  The largest key in
-       the *preceding* bucket (if any) is < K(i), though, and K(i) <= key,
-       so the largest key in the preceding bucket is < key and so is the
-       proper result.
-
-       This is harder to get at efficiently, as buckets are linked only in
-       the increasing direction.  While we're searching downward,
-       deepest_smaller is set to the  node deepest in the tree where
-       we *could* have gone to the left of C(i).  The rightmost bucket in
-       deepest_smaller's subtree is the bucket preceding the bucket we find
-       at first.  This is clumsy to get at, but efficient.
-*/
-static int
-BTree_findRangeEnd(BTree *self, PyObject *keyarg, int low, int exclude_equal,
-                   Bucket **bucket, int *offset) {
-    Sized *deepest_smaller = NULL;      /* last possibility to move left */
-    int deepest_smaller_is_btree = 0;   /* Boolean; if false, it's a bucket */
-    Bucket *pbucket;
-    int self_got_rebound = 0;   /* Boolean; when true, deactivate self */
-    int result = -1;            /* Until proven innocent */
-    int i;
-    KEY_TYPE key;
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    UNLESS (copied) return -1;
-
-    /* We don't need to: PER_USE_OR_RETURN(self, -1);
-       because the caller does. */
-    UNLESS (self->data && self->len) return 0;
-
-    /* Search downward until hitting a bucket, stored in pbucket. */
-    for (;;) {
-        Sized *pchild;
-        int pchild_is_btree;
-
-        BTREE_SEARCH(i, self, key, goto Done);
-        pchild = self->data[i].child;
-        pchild_is_btree = SameType_Check(self, pchild);
-        if (i) {
-            deepest_smaller = self->data[i-1].child;
-            deepest_smaller_is_btree = pchild_is_btree;
-        }
-
-        if (pchild_is_btree) {
-            if (self_got_rebound) {
-                PER_UNUSE(self);
-            }
-            self = BTREE(pchild);
-            self_got_rebound = 1;
-            PER_USE_OR_RETURN(self, -1);
-        }
-        else {
-            pbucket = BUCKET(pchild);
-            break;
-        }
-    }
-
-    /* Search the bucket for a suitable key. */
-    i = Bucket_findRangeEnd(pbucket, keyarg, low, exclude_equal, offset);
-    if (i < 0)
-        goto Done;
-    if (i > 0) {
-        Py_INCREF(pbucket);
-        *bucket = pbucket;
-        result = 1;
-        goto Done;
-    }
-    /* This may be one of the two difficult cases detailed in the comments. */
-    if (low) {
-        Bucket *next;
-
-        UNLESS(PER_USE(pbucket)) goto Done;
-        next = pbucket->next;
-        if (next) {
-                result = 1;
-                Py_INCREF(next);
-                *bucket = next;
-                *offset = 0;
-        }
-        else
-                result = 0;
-        PER_UNUSE(pbucket);
-    }
-    /* High-end search:  if it's possible to go left, do so. */
-    else if (deepest_smaller) {
-        if (deepest_smaller_is_btree) {
-            UNLESS(PER_USE(deepest_smaller)) goto Done;
-            /* We own the reference this returns. */
-            pbucket = BTree_lastBucket(BTREE(deepest_smaller));
-            PER_UNUSE(deepest_smaller);
-            if (pbucket == NULL) goto Done;   /* error */
-        }
-        else {
-            pbucket = BUCKET(deepest_smaller);
-            Py_INCREF(pbucket);
-        }
-        UNLESS(PER_USE(pbucket)) goto Done;
-        result = 1;
-        *bucket = pbucket;  /* transfer ownership to caller */
-        *offset = pbucket->len - 1;
-        PER_UNUSE(pbucket);
-    }
-    else
-        result = 0;     /* simply not found */
-
-Done:
-    if (self_got_rebound) {
-        PER_UNUSE(self);
-    }
-    return result;
-}
-
-static PyObject *
-BTree_maxminKey(BTree *self, PyObject *args, int min)
-{
-  PyObject *key=0;
-  Bucket *bucket = NULL;
-  int offset, rc;
-
-  UNLESS (PyArg_ParseTuple(args, "|O", &key)) return NULL;
-
-  UNLESS (PER_USE(self)) return NULL;
-
-  UNLESS (self->data && self->len) goto empty;
-
-  /* Find the  range */
-
-  if (key)
-    {
-      if ((rc = BTree_findRangeEnd(self, key, min, 0, &bucket, &offset)) <= 0)
-        {
-          if (rc < 0) goto err;
-          goto empty;
-        }
-      PER_UNUSE(self);
-      UNLESS (PER_USE(bucket))
-        {
-          Py_DECREF(bucket);
-          return NULL;
-        }
-    }
-  else if (min)
-    {
-      bucket = self->firstbucket;
-      PER_UNUSE(self);
-      PER_USE_OR_RETURN(bucket, NULL);
-      Py_INCREF(bucket);
-      offset = 0;
-    }
-  else
-    {
-      bucket = BTree_lastBucket(self);
-      PER_UNUSE(self);
-      UNLESS (PER_USE(bucket))
-        {
-          Py_DECREF(bucket);
-          return NULL;
-        }
-      assert(bucket->len);
-      offset = bucket->len - 1;
-     }
-
-  COPY_KEY_TO_OBJECT(key, bucket->keys[offset]);
-  PER_UNUSE(bucket);
-  Py_DECREF(bucket);
-
-  return key;
-
- empty:
-  PyErr_SetString(PyExc_ValueError, "empty tree");
-
- err:
-  PER_UNUSE(self);
-  if (bucket)
-    {
-      PER_UNUSE(bucket);
-      Py_DECREF(bucket);
-    }
-  return NULL;
-}
-
-static PyObject *
-BTree_minKey(BTree *self, PyObject *args)
-{
-  return BTree_maxminKey(self, args, 1);
-}
-
-static PyObject *
-BTree_maxKey(BTree *self, PyObject *args)
-{
-  return BTree_maxminKey(self, args, 0);
-}
-
-/*
-** BTree_rangeSearch
-**
-** Generates a BTreeItems object based on the two indexes passed in,
-** being the range between them.
-**
-*/
-static PyObject *
-BTree_rangeSearch(BTree *self, PyObject *args, PyObject *kw, char type)
-{
-    PyObject *min = Py_None;
-    PyObject *max = Py_None;
-    int excludemin = 0;
-    int excludemax = 0;
-    int rc;
-    Bucket *lowbucket = NULL;
-    Bucket *highbucket = NULL;
-    int lowoffset;
-    int highoffset;
-    PyObject *result;
-
-    if (args) {
-        if (! PyArg_ParseTupleAndKeywords(args, kw, "|OOii", search_keywords,
-  					  &min,
-  					  &max,
-  					  &excludemin,
-  					  &excludemax))
-	    return NULL;
-    }
-
-    UNLESS (PER_USE(self)) return NULL;
-
-    UNLESS (self->data && self->len) goto empty;
-
-    /* Find the low range */
-    if (min != Py_None) {
-        if ((rc = BTree_findRangeEnd(self, min, 1, excludemin,
-        			      &lowbucket, &lowoffset)) <= 0) {
-            if (rc < 0) goto err;
-            goto empty;
-        }
-    }
-    else {
-        lowbucket = self->firstbucket;
-        lowoffset = 0;
-        if (excludemin) {
-            int bucketlen;
-            UNLESS (PER_USE(lowbucket)) goto err;
-            bucketlen = lowbucket->len;
-            PER_UNUSE(lowbucket);
-            if (bucketlen > 1)
-            	lowoffset = 1;
-	    else if (self->len < 2)
-	    	goto empty;
-            else {	/* move to first item in next bucket */
-                Bucket *next;
-                UNLESS (PER_USE(lowbucket)) goto err;
-                next = lowbucket->next;
-                PER_UNUSE(lowbucket);
-                assert(next != NULL);
-                lowbucket = next;
-                /* and lowoffset is still 0 */
-                assert(lowoffset == 0);
-            }
-	}
-        Py_INCREF(lowbucket);
-    }
-
-    /* Find the high range */
-    if (max != Py_None) {
-        if ((rc = BTree_findRangeEnd(self, max, 0, excludemax,
-        			      &highbucket, &highoffset)) <= 0) {
-            Py_DECREF(lowbucket);
-            if (rc < 0) goto err;
-            goto empty;
-        }
-    }
-    else {
-    	int bucketlen;
-        highbucket = BTree_lastBucket(self);
-        assert(highbucket != NULL);  /* we know self isn't empty */
-        UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets;
-        bucketlen = highbucket->len;
-    	PER_UNUSE(highbucket);
-        highoffset = bucketlen - 1;
-        if (excludemax) {
-	    if (highoffset > 0)
-	    	--highoffset;
-	    else if (self->len < 2)
-	    	goto empty_and_decref_buckets;
-	    else {	/* move to last item of preceding bucket */
-	    	int status;
-	    	assert(highbucket != self->firstbucket);
-	    	Py_DECREF(highbucket);
-	    	status = PreviousBucket(&highbucket, self->firstbucket);
-	    	if (status < 0) {
-	    	    Py_DECREF(lowbucket);
-	    	    goto err;
-	    	}
-	    	assert(status > 0);
-	    	Py_INCREF(highbucket);
-	        UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets;
-	        highoffset = highbucket->len - 1;
-    	    	PER_UNUSE(highbucket);
-	    }
-        }
-    	assert(highoffset >= 0);
-    }
-
-    /* It's still possible that the range is empty, even if min < max.  For
-     * example, if min=3 and max=4, and 3 and 4 aren't in the BTree, but 2 and
-     * 5 are, then the low position points to the 5 now and the high position
-     * points to the 2 now.  They're not necessarily even in the same bucket,
-     * so there's no trick we can play with pointer compares to get out
-     * cheap in general.
-     */
-    if (lowbucket == highbucket && lowoffset > highoffset)
-	goto empty_and_decref_buckets;      /* definitely empty */
-
-    /* The buckets differ, or they're the same and the offsets show a non-
-     * empty range.
-     */
-    if (min != Py_None && max != Py_None && /* both args user-supplied */
-        lowbucket != highbucket)   /* and different buckets */ {
-        KEY_TYPE first;
-        KEY_TYPE last;
-        int cmp;
-
-        /* Have to check the hard way:  see how the endpoints compare. */
-        UNLESS (PER_USE(lowbucket)) goto err_and_decref_buckets;
-        COPY_KEY(first, lowbucket->keys[lowoffset]);
-        PER_UNUSE(lowbucket);
-
-        UNLESS (PER_USE(highbucket)) goto err_and_decref_buckets;
-        COPY_KEY(last, highbucket->keys[highoffset]);
-        PER_UNUSE(highbucket);
-
-        TEST_KEY_SET_OR(cmp, first, last) goto err_and_decref_buckets;
-        if (cmp > 0) goto empty_and_decref_buckets;
-    }
-
-    PER_UNUSE(self);
-
-    result = newBTreeItems(type, lowbucket, lowoffset, highbucket, highoffset);
-    Py_DECREF(lowbucket);
-    Py_DECREF(highbucket);
-    return result;
-
- err_and_decref_buckets:
-    Py_DECREF(lowbucket);
-    Py_DECREF(highbucket);
-
- err:
-    PER_UNUSE(self);
-    return NULL;
-
- empty_and_decref_buckets:
-    Py_DECREF(lowbucket);
-    Py_DECREF(highbucket);
-
- empty:
-    PER_UNUSE(self);
-    return newBTreeItems(type, 0, 0, 0, 0);
-}
-
-/*
-** BTree_keys
-*/
-static PyObject *
-BTree_keys(BTree *self, PyObject *args, PyObject *kw)
-{
-    return BTree_rangeSearch(self, args, kw, 'k');
-}
-
-/*
-** BTree_values
-*/
-static PyObject *
-BTree_values(BTree *self, PyObject *args, PyObject *kw)
-{
-    return BTree_rangeSearch(self, args, kw, 'v');
-}
-
-/*
-** BTree_items
-*/
-static PyObject *
-BTree_items(BTree *self, PyObject *args, PyObject *kw)
-{
-    return BTree_rangeSearch(self, args, kw, 'i');
-}
-
-static PyObject *
-BTree_byValue(BTree *self, PyObject *omin)
-{
-  PyObject *r=0, *o=0, *item=0;
-  VALUE_TYPE min;
-  VALUE_TYPE v;
-  int copied=1;
-  SetIteration it = {0, 0, 1};
-
-  UNLESS (PER_USE(self)) return NULL;
-
-  COPY_VALUE_FROM_ARG(min, omin, copied);
-  UNLESS(copied) return NULL;
-
-  UNLESS (r=PyList_New(0)) goto err;
-
-  it.set=BTree_rangeSearch(self, NULL, NULL, 'i');
-  UNLESS(it.set) goto err;
-
-  if (nextBTreeItems(&it) < 0) goto err;
-
-  while (it.position >= 0)
-    {
-      if (TEST_VALUE(it.value, min) >= 0)
-        {
-          UNLESS (item = PyTuple_New(2)) goto err;
-
-          COPY_KEY_TO_OBJECT(o, it.key);
-          UNLESS (o) goto err;
-          PyTuple_SET_ITEM(item, 1, o);
-
-          COPY_VALUE(v, it.value);
-          NORMALIZE_VALUE(v, min);
-          COPY_VALUE_TO_OBJECT(o, v);
-          DECREF_VALUE(v);
-          UNLESS (o) goto err;
-          PyTuple_SET_ITEM(item, 0, o);
-
-          if (PyList_Append(r, item) < 0) goto err;
-          Py_DECREF(item);
-          item = 0;
-        }
-      if (nextBTreeItems(&it) < 0) goto err;
-    }
-
-  item=PyObject_GetAttr(r,sort_str);
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_CallObject(item, NULL));
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_GetAttr(r, reverse_str));
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_CallObject(item, NULL));
-  UNLESS (item) goto err;
-  Py_DECREF(item);
-
-  finiSetIteration(&it);
-  PER_UNUSE(self);
-  return r;
-
- err:
-  PER_UNUSE(self);
-  Py_XDECREF(r);
-  finiSetIteration(&it);
-  Py_XDECREF(item);
-  return NULL;
-}
-
-/*
-** BTree_getm
-*/
-static PyObject *
-BTree_getm(BTree *self, PyObject *args)
-{
-  PyObject *key, *d=Py_None, *r;
-
-  UNLESS (PyArg_ParseTuple(args, "O|O", &key, &d)) return NULL;
-  if ((r=_BTree_get(self, key, 0))) return r;
-  UNLESS (PyErr_ExceptionMatches(PyExc_KeyError)) return NULL;
-  PyErr_Clear();
-  Py_INCREF(d);
-  return d;
-}
-
-static PyObject *
-BTree_has_key(BTree *self, PyObject *key)
-{
-    return _BTree_get(self, key, 1);
-}
-
-/* Search BTree self for key.  This is the sq_contains slot of the
- * PySequenceMethods.
- *
- * Return:
- *     -1     error
- *      0     not found
- *      1     found
- */
-static int
-BTree_contains(BTree *self, PyObject *key)
-{
-    PyObject *asobj = _BTree_get(self, key, 1);
-    int result = -1;
-
-    if (asobj != NULL) {
-        result = PyInt_AsLong(asobj) ? 1 : 0;
-        Py_DECREF(asobj);
-    }
-    return result;
-}
-
-static PyObject *
-BTree_addUnique(BTree *self, PyObject *args)
-{
-  int grew;
-  PyObject *key, *v;
-
-  UNLESS (PyArg_ParseTuple(args, "OO", &key, &v)) return NULL;
-
-  if ((grew=_BTree_set(self, key, v, 1, 0)) < 0) return NULL;
-  return PyInt_FromLong(grew);
-}
-
-/**************************************************************************/
-/* Iterator support. */
-
-/* A helper to build all the iterators for BTrees and TreeSets.
- * If args is NULL, the iterator spans the entire structure.  Else it's an
- * argument tuple, with optional low and high arguments.
- * kind is 'k', 'v' or 'i'.
- * Returns a BTreeIter object, or NULL if error.
- */
-static PyObject *
-buildBTreeIter(BTree *self, PyObject *args, PyObject *kw, char kind)
-{
-    BTreeIter *result = NULL;
-    BTreeItems *items = (BTreeItems *)BTree_rangeSearch(self, args, kw, kind);
-
-    if (items) {
-        result = BTreeIter_new(items);
-        Py_DECREF(items);
-    }
-    return (PyObject *)result;
-}
-
-/* The implementation of iter(BTree_or_TreeSet); the BTree tp_iter slot. */
-static PyObject *
-BTree_getiter(BTree *self)
-{
-    return buildBTreeIter(self, NULL, NULL, 'k');
-}
-
-/* The implementation of BTree.iterkeys(). */
-static PyObject *
-BTree_iterkeys(BTree *self, PyObject *args, PyObject *kw)
-{
-    return buildBTreeIter(self, args, kw, 'k');
-}
-
-/* The implementation of BTree.itervalues(). */
-static PyObject *
-BTree_itervalues(BTree *self, PyObject *args, PyObject *kw)
-{
-    return buildBTreeIter(self, args, kw, 'v');
-}
-
-/* The implementation of BTree.iteritems(). */
-static PyObject *
-BTree_iteritems(BTree *self, PyObject *args, PyObject *kw)
-{
-    return buildBTreeIter(self, args, kw, 'i');
-}
-
-/* End of iterator support. */
-
-
-/* Caution:  Even though the _firstbucket attribute is read-only, a program
-   could do arbitrary damage to the btree internals.  For example, it could
-   call clear() on a bucket inside a BTree.
-
-   We need to decide if the convenience for inspecting BTrees is worth
-   the risk.
-*/
-
-static struct PyMemberDef BTree_members[] = {
-    {"_firstbucket", T_OBJECT, offsetof(BTree, firstbucket), RO},
-    {NULL}
-};
-
-static struct PyMethodDef BTree_methods[] = {
-    {"__getstate__", (PyCFunction) BTree_getstate,	METH_NOARGS,
-     "__getstate__() -> state\n\n"
-     "Return the picklable state of the BTree."},
-
-    {"__setstate__", (PyCFunction) BTree_setstate,	METH_O,
-     "__setstate__(state)\n\n"
-     "Set the state of the BTree."},
-
-    {"has_key",	(PyCFunction) BTree_has_key,	METH_O,
-     "has_key(key)\n\n"
-     "Return true if the BTree contains the given key."},
-
-    {"keys",	(PyCFunction) BTree_keys,	METH_KEYWORDS,
-     "keys([min, max]) -> list of keys\n\n"
-     "Returns the keys of the BTree.  If min and max are supplied, only\n"
-     "keys greater than min and less than max are returned."},
-
-    {"values",	(PyCFunction) BTree_values,	METH_KEYWORDS,
-     "values([min, max]) -> list of values\n\n"
-     "Returns the values of the BTree.  If min and max are supplied, only\n"
-     "values corresponding to keys greater than min and less than max are\n"
-     "returned."},
-
-    {"items",	(PyCFunction) BTree_items,	METH_KEYWORDS,
-     "items([min, max]) -> -- list of key, value pairs\n\n"
-     "Returns the items of the BTree.  If min and max are supplied, only\n"
-     "items with keys greater than min and less than max are returned."},
-
-    {"byValue",	(PyCFunction) BTree_byValue,	METH_O,
-     "byValue(min) ->  list of value, key pairs\n\n"
-     "Returns list of value, key pairs where the value is >= min.  The\n"
-     "list is sorted by value.  Note that items() returns keys in the\n"
-     "opposite order."},
-
-    {"get",	(PyCFunction) BTree_getm,	METH_VARARGS,
-     "get(key[, default=None]) -> Value for key or default\n\n"
-     "Return the value or the default if the key is not found."},
-
-    {"maxKey", (PyCFunction) BTree_maxKey,	METH_VARARGS,
-     "maxKey([max]) -> key\n\n"
-     "Return the largest key in the BTree.  If max is specified, return\n"
-     "the largest key <= max."},
-
-    {"minKey", (PyCFunction) BTree_minKey,	METH_VARARGS,
-     "minKey([mi]) -> key\n\n"
-     "Return the smallest key in the BTree.  If min is specified, return\n"
-     "the smallest key >= min."},
-
-    {"clear",	(PyCFunction) BTree_clear,	METH_NOARGS,
-     "clear()\n\nRemove all of the items from the BTree."},
-
-    {"insert", (PyCFunction)BTree_addUnique, METH_VARARGS,
-     "insert(key, value) -> 0 or 1\n\n"
-     "Add an item if the key is not already used. Return 1 if the item was\n"
-     "added, or 0 otherwise."},
-
-    {"update",	(PyCFunction) Mapping_update,	METH_O,
-     "update(collection)\n\n Add the items from the given collection."},
-
-    {"iterkeys", (PyCFunction) BTree_iterkeys,  METH_KEYWORDS,
-     "B.iterkeys([min[,max]]) -> an iterator over the keys of B"},
-
-    {"itervalues", (PyCFunction) BTree_itervalues,  METH_KEYWORDS,
-     "B.itervalues([min[,max]]) -> an iterator over the values of B"},
-
-    {"iteritems", (PyCFunction) BTree_iteritems,    METH_KEYWORDS,
-     "B.iteritems([min[,max]]) -> an iterator over the (key, value) items of B"},
-
-    {"_check", (PyCFunction) BTree_check,       METH_NOARGS,
-     "Perform sanity check on BTree, and raise exception if flawed."},
-
-#ifdef PERSISTENT
-    {"_p_resolveConflict", (PyCFunction) BTree__p_resolveConflict,
-     METH_VARARGS,
-     "_p_resolveConflict() -- Reinitialize from a newly created copy"},
-
-    {"_p_deactivate", (PyCFunction) BTree__p_deactivate,	METH_KEYWORDS,
-     "_p_deactivate()\n\nReinitialize from a newly created copy."},
-#endif
-    {NULL, NULL}
-};
-
-static int
-BTree_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    PyObject *v = NULL;
-
-    if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "BTree", &v))
-	return -1;
-
-    if (v)
-	return update_from_seq(self, v);
-    else
-	return 0;
-}
-
-static void
-BTree_dealloc(BTree *self)
-{
-    if (self->state != cPersistent_GHOST_STATE)
-	_BTree_clear(self);
-    cPersistenceCAPI->pertype->tp_dealloc((PyObject *)self);
-}
-
-static int
-BTree_traverse(BTree *self, visitproc visit, void *arg)
-{
-    int err = 0;
-    int i, len;
-
-#define VISIT(SLOT)                             \
-    if (SLOT) {                                 \
-        err = visit((PyObject *)(SLOT), arg);   \
-        if (err)                                \
-            goto Done;                          \
-    }
-
-    if (self->ob_type == &BTreeType)
-	assert(self->ob_type->tp_dictoffset == 0);
-
-    /* Call our base type's traverse function.  Because BTrees are
-     * subclasses of Peristent, there must be one.
-     */
-    err = cPersistenceCAPI->pertype->tp_traverse((PyObject *)self, visit, arg);
-    if (err)
-	goto Done;
-
-    /* If this is registered with the persistence system, cleaning up cycles
-     * is the database's problem.  It would be horrid to unghostify BTree
-     * nodes here just to chase pointers every time gc runs.
-     */
-    if (self->state == cPersistent_GHOST_STATE)
-	goto Done;
-
-    len = self->len;
-#ifdef KEY_TYPE_IS_PYOBJECT
-    /* Keys are Python objects so need to be traversed.  Note that the
-     * key 0 slot is unused and should not be traversed.
-     */
-    for (i = 1; i < len; i++)
-        VISIT(self->data[i].key);
-#endif
-
-    /* Children are always pointers, and child 0 is legit. */
-    for (i = 0; i < len; i++)
-        VISIT(self->data[i].child);
-
-    VISIT(self->firstbucket);
-
-Done:
-    return err;
-
-#undef VISIT
-}
-
-static int
-BTree_tp_clear(BTree *self)
-{
-    if (self->state != cPersistent_GHOST_STATE)
-	_BTree_clear(self);
-    return 0;
-}
-
-/*
- * Return the number of elements in a BTree.  nonzero is a Boolean, and
- * when true requests just a non-empty/empty result.  Testing for emptiness
- * is efficient (constant-time).  Getting the true length takes time
- * proportional to the number of leaves (buckets).
- *
- * Return:
- *     When nonzero true:
- *          -1  error
- *           0  empty
- *           1  not empty
- *     When nonzero false (possibly expensive!):
- *          -1  error
- *        >= 0  number of elements.
- */
-static int
-BTree_length_or_nonzero(BTree *self, int nonzero)
-{
-    int result;
-    Bucket *b;
-    Bucket *next;
-
-    PER_USE_OR_RETURN(self, -1);
-    b = self->firstbucket;
-    PER_UNUSE(self);
-    if (nonzero)
-        return b != NULL;
-
-    result = 0;
-    while (b) {
-        PER_USE_OR_RETURN(b, -1);
-        result += b->len;
-        next = b->next;
-        PER_UNUSE(b);
-        b = next;
-    }
-    return result;
-}
-
-static int
-BTree_length( BTree *self)
-{
-  return BTree_length_or_nonzero(self, 0);
-}
-
-static PyMappingMethods BTree_as_mapping = {
-  (inquiry)BTree_length,		/*mp_length*/
-  (binaryfunc)BTree_get,		/*mp_subscript*/
-  (objobjargproc)BTree_setitem,	        /*mp_ass_subscript*/
-};
-
-static PySequenceMethods BTree_as_sequence = {
-    (inquiry)0,                     /* sq_length */
-    (binaryfunc)0,                  /* sq_concat */
-    (intargfunc)0,                  /* sq_repeat */
-    (intargfunc)0,                  /* sq_item */
-    (intintargfunc)0,               /* sq_slice */
-    (intobjargproc)0,               /* sq_ass_item */
-    (intintobjargproc)0,            /* sq_ass_slice */
-    (objobjproc)BTree_contains,     /* sq_contains */
-    0,                              /* sq_inplace_concat */
-    0,                              /* sq_inplace_repeat */
-};
-
-static int
-BTree_nonzero(BTree *self)
-{
-  return BTree_length_or_nonzero(self, 1);
-}
-
-static PyNumberMethods BTree_as_number_for_nonzero = {
-  0,0,0,0,0,0,0,0,0,0,
-  (inquiry)BTree_nonzero};
-
-static PyTypeObject BTreeType = {
-    PyObject_HEAD_INIT(NULL) /* PyPersist_Type */
-    0,					/* ob_size */
-    MODULE_NAME MOD_NAME_PREFIX "BTree",/* tp_name */
-    sizeof(BTree),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)BTree_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    &BTree_as_number_for_nonzero,	/* tp_as_number */
-    &BTree_as_sequence,			/* tp_as_sequence */
-    &BTree_as_mapping,			/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
-	    Py_TPFLAGS_BASETYPE, 	/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)BTree_traverse,	/* tp_traverse */
-    (inquiry)BTree_tp_clear,		/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    (getiterfunc)BTree_getiter,		/* tp_iter */
-    0,					/* tp_iternext */
-    BTree_methods,			/* tp_methods */
-    BTree_members,			/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    BTree_init,				/* tp_init */
-    0,					/* tp_alloc */
-    0, /*PyType_GenericNew,*/		/* tp_new */
-};
diff --git a/branches/bug1734/src/BTrees/BucketTemplate.c b/branches/bug1734/src/BTrees/BucketTemplate.c
deleted file mode 100755
index ba530d13..00000000
--- a/branches/bug1734/src/BTrees/BucketTemplate.c
+++ /dev/null
@@ -1,1722 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define BUCKETTEMPLATE_C "$Id$\n"
-
-/* Use BUCKET_SEARCH to find the index at which a key belongs.
- * INDEX    An int lvalue to hold the index i such that KEY belongs at
- *          SELF->keys[i].  Note that this will equal SELF->len if KEY
- *          is larger than the bucket's largest key.  Else it's the
- *          smallest i such that SELF->keys[i] >= KEY.
- * ABSENT   An int lvalue to hold a Boolean result, true (!= 0) if the
- *          key is absent, false (== 0) if the key is at INDEX.
- * SELF     A pointer to a Bucket node.
- * KEY      The key you're looking for, of type KEY_TYPE.
- * ONERROR  What to do if key comparison raises an exception; for example,
- *          perhaps 'return NULL'.
- *
- * See Maintainer.txt for discussion:  this is optimized in subtle ways.
- * It's recommended that you call this at the start of a routine, waiting
- * to check for self->len == 0 after (if an empty bucket is special in
- * context; INDEX becomes 0 and ABSENT becomes true if this macro is run
- * with an empty SELF, and that may be all the invoker needs to know).
- */
-#define BUCKET_SEARCH(INDEX, ABSENT, SELF, KEY, ONERROR) {  \
-    int _lo = 0;                                            \
-    int _hi = (SELF)->len;                                  \
-    int _i;                                                 \
-    int _cmp = 1;                                           \
-    for (_i = _hi >> 1; _lo < _hi; _i = (_lo + _hi) >> 1) { \
-        TEST_KEY_SET_OR(_cmp, (SELF)->keys[_i], (KEY))      \
-            ONERROR;                                        \
-        if      (_cmp < 0)  _lo = _i + 1;                   \
-        else if (_cmp == 0) break;                          \
-        else                _hi = _i;                       \
-    }                                                       \
-    (INDEX) = _i;                                           \
-    (ABSENT) = _cmp;                                        \
-}
-
-/*
-** _bucket_get
-**
-** Search a bucket for a given key.
-**
-** Arguments
-**     self	The bucket
-**     keyarg	The key to look for
-**     has_key	Boolean; if true, return a true/false result; else return
-**              the value associated with the key.
-**
-** Return
-**     If has_key:
-**         Returns the Python int 0 if the key is absent, else returns
-**         has_key itself as a Python int.  A BTree caller generally passes
-**         the depth of the bucket for has_key, so a true result returns
-**         the bucket depth then.
-**         Note that has_key should be true when searching set buckets.
-**     If not has_key:
-**         If the key is present, returns the associated value, and the
-**         caller owns the reference.  Else returns NULL and sets KeyError.
-**     Whether or not has_key:
-**         If a comparison sets an exception, returns NULL.
-*/
-static PyObject *
-_bucket_get(Bucket *self, PyObject *keyarg, int has_key)
-{
-    int i, cmp;
-    KEY_TYPE key;
-    PyObject *r = NULL;
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    UNLESS (copied) return NULL;
-
-    UNLESS (PER_USE(self)) return NULL;
-
-    BUCKET_SEARCH(i, cmp, self, key, goto Done);
-    if (has_key)
-    	r = PyInt_FromLong(cmp ? 0 : has_key);
-    else {
-        if (cmp == 0) {
-            COPY_VALUE_TO_OBJECT(r, self->values[i]);
-        }
-        else
-            PyErr_SetObject(PyExc_KeyError, keyarg);
-    }
-
-Done:
-    PER_UNUSE(self);
-    return r;
-
-}
-
-static PyObject *
-bucket_getitem(Bucket *self, PyObject *key)
-{
-  return _bucket_get(self, key, 0);
-}
-
-/*
-** Bucket_grow
-**
-** Resize a bucket.
-**
-** Arguments:   self    The bucket.
-**              newsize The new maximum capacity.  If < 0, double the
-**                      current size unless the bucket is currently empty,
-**                      in which case use MIN_BUCKET_ALLOC.
-**              noval   Boolean; if true, allocate only key space and not
-**                      value space
-**
-** Returns:     -1      on error, and MemoryError exception is set
-**               0      on success
-*/
-static int
-Bucket_grow(Bucket *self, int newsize, int noval)
-{
-    KEY_TYPE *keys;
-    VALUE_TYPE *values;
-
-    if (self->size) {
-        if (newsize < 0)
-            newsize = self->size * 2;
-        if (newsize < 0)    /* int overflow */
-            goto Overflow;
-        UNLESS (keys = BTree_Realloc(self->keys, sizeof(KEY_TYPE) * newsize))
-            return -1;
-
-        UNLESS (noval) {
-            values = BTree_Realloc(self->values, sizeof(VALUE_TYPE) * newsize);
-            if (values == NULL) {
-                free(keys);
-                return -1;
-            }
-            self->values = values;
-        }
-        self->keys = keys;
-    }
-    else {
-        if (newsize < 0)
-            newsize = MIN_BUCKET_ALLOC;
-        UNLESS (self->keys = BTree_Malloc(sizeof(KEY_TYPE) * newsize))
-            return -1;
-        UNLESS (noval) {
-            self->values = BTree_Malloc(sizeof(VALUE_TYPE) * newsize);
-            if (self->values == NULL) {
-                free(self->keys);
-                self->keys = NULL;
-                return -1;
-            }
-        }
-    }
-    self->size = newsize;
-    return 0;
-
-Overflow:
-  PyErr_NoMemory();
-  return -1;
-}
-
-/* So far, bucket_append is called only by multiunion_m(), so is called
- * only when MULTI_INT_UNION is defined.  Flavors of BTree/Bucket that
- * don't support MULTI_INT_UNION don't call bucket_append (yet), and
- * gcc complains if bucket_append is compiled in those cases.  So only
- * compile bucket_append if it's going to be used.
- */
-#ifdef MULTI_INT_UNION
-/*
- * Append a slice of the "from" bucket to self.
- *
- * self         Append (at least keys) to this bucket.  self must be activated
- *              upon entry, and remains activated at exit.  If copyValues
- *              is true, self must be empty or already have a non-NULL values
- *              pointer.  self's access and modification times aren't updated.
- * from         The bucket from which to take keys, and possibly values.  from
- *              must be activated upon entry, and remains activated at exit.
- *              If copyValues is true, from must have a non-NULL values
- *              pointer.  self and from must not be the same.  from's access
- *              time isn't updated.
- * i, n         The slice from[i : i+n] is appended to self.  Must have
- *              i >= 0, n > 0 and i+n <= from->len.
- * copyValues   Boolean.  If true, copy values from the slice as well as keys.
- *              In this case, from must have a non-NULL values pointer, and
- *              self must too (unless self is empty, in which case a values
- *              vector will be allocated for it).
- * overallocate Boolean.  If self doesn't have enough room upon entry to hold
- *              all the appended stuff, then if overallocate is false exactly
- *              enough room will be allocated to hold the new stuff, else if
- *              overallocate is true an excess will be allocated.  overallocate
- *              may be a good idea if you expect to append more stuff to self
- *              later; else overallocate should be false.
- *
- * CAUTION:  If self is empty upon entry (self->size == 0), and copyValues is
- * false, then no space for values will get allocated.  This can be a trap if
- * the caller intends to copy values itself.
- *
- * Return
- *    -1        Error.
- *     0        OK.
- */
-static int
-bucket_append(Bucket *self, Bucket *from, int i, int n,
-              int copyValues, int overallocate)
-{
-    int newlen;
-
-    assert(self && from && self != from);
-    assert(i >= 0);
-    assert(n > 0);
-    assert(i+n <= from->len);
-
-    /* Make room. */
-    newlen = self->len + n;
-    if (newlen > self->size) {
-        int newsize = newlen;
-        if (overallocate)   /* boost by 25% -- pretty arbitrary */
-            newsize += newsize >> 2;
-        if (Bucket_grow(self, newsize, ! copyValues) < 0)
-            return -1;
-    }
-    assert(newlen <= self->size);
-
-    /* Copy stuff. */
-    memcpy(self->keys + self->len, from->keys + i, n * sizeof(KEY_TYPE));
-    if (copyValues) {
-        assert(self->values);
-        assert(from->values);
-        memcpy(self->values + self->len, from->values + i,
-                n * sizeof(VALUE_TYPE));
-    }
-    self->len = newlen;
-
-    /* Bump refcounts. */
-#ifdef KEY_TYPE_IS_PYOBJECT
-    {
-        int j;
-        PyObject **p = from->keys + i;
-        for (j = 0; j < n; ++j, ++p) {
-            Py_INCREF(*p);
-        }
-    }
-#endif
-#ifdef VALUE_TYPE_IS_PYOBJECT
-    if (copyValues) {
-        int j;
-        PyObject **p = from->values + i;
-        for (j = 0; j < n; ++j, ++p) {
-            Py_INCREF(*p);
-        }
-    }
-#endif
-    return 0;
-}
-#endif /* MULTI_INT_UNION */
-
-/*
-** _bucket_set: Assign a value to a key in a bucket, delete a key+value
-**  pair, or just insert a key.
-**
-** Arguments
-**     self     The bucket
-**     keyarg   The key to look for
-**     v        The value to associate with key; NULL means delete the key.
-**              If NULL, it's an error (KeyError) if the key isn't present.
-**              Note that if this is a set bucket, and you want to insert
-**              a new set element, v must be non-NULL although its exact
-**              value will be ignored.  Passing Py_None is good for this.
-**     unique   Boolean; when true, don't replace the value if the key is
-**              already present.
-**     noval    Boolean; when true, operate on keys only (ignore values)
-**     changed  ignored on input
-**
-** Return
-**     -1       on error
-**      0       on success and the # of bucket entries didn't change
-**      1       on success and the # of bucket entries did change
-**  *changed    If non-NULL, set to 1 on any mutation of the bucket.
-*/
-static int
-_bucket_set(Bucket *self, PyObject *keyarg, PyObject *v,
-            int unique, int noval, int *changed)
-{
-    int i, cmp;
-    KEY_TYPE key;
-
-    /* Subtle:  there may or may not be a value.  If there is, we need to
-     * check its type early, so that in case of error we can get out before
-     * mutating the bucket.  But because value isn't used on all paths, if
-     * we don't initialize value then gcc gives a nuisance complaint that
-     * value may be used initialized (it can't be, but gcc doesn't know
-     * that).  So we initialize it.  However, VALUE_TYPE can be various types,
-     * including int, PyObject*, and char[6], so it's a puzzle to spell
-     * initialization.  It so happens that {0} is a valid initializer for all
-     * these types.
-     */
-    VALUE_TYPE value = {0};	/* squash nuisance warning */
-    int result = -1;    /* until proven innocent */
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    UNLESS(copied) return -1;
-
-    /* Copy the value early (if needed), so that in case of error a
-     * pile of bucket mutations don't need to be undone.
-     */
-    if (v && !noval) {
-    	COPY_VALUE_FROM_ARG(value, v, copied);
-    	UNLESS(copied) return -1;
-    }
-
-    UNLESS (PER_USE(self)) return -1;
-
-    BUCKET_SEARCH(i, cmp, self, key, goto Done);
-    if (cmp == 0) {
-        /* The key exists, at index i. */
-
-        if (v) {
-            /* The key exists at index i, and there's a new value.
-             * If unique, we're not supposed to replace it.  If noval, or this
-             * is a set bucket (self->values is NULL), there's nothing to do.
-             */
-            if (unique || noval || self->values == NULL) {
-                result = 0;
-                goto Done;
-            }
-
-            /* The key exists at index i, and we need to replace the value. */
-#ifdef VALUE_SAME
-            /* short-circuit if no change */
-            if (VALUE_SAME(self->values[i], value)) {
-                result = 0;
-                goto Done;
-            }
-#endif
-            if (changed)
-                *changed = 1;
-            DECREF_VALUE(self->values[i]);
-            COPY_VALUE(self->values[i], value);
-            INCREF_VALUE(self->values[i]);
-            if (PER_CHANGED(self) >= 0)
-                result = 0;
-            goto Done;
-        }
-
-        /* The key exists at index i, and should be deleted. */
-        DECREF_KEY(self->keys[i]);
-        self->len--;
-        if (i < self->len)
-            memmove(self->keys + i, self->keys + i+1,
-                    sizeof(KEY_TYPE)*(self->len - i));
-
-        if (self->values) {
-            DECREF_VALUE(self->values[i]);
-            if (i < self->len)
-                memmove(self->values + i, self->values + i+1,
-                        sizeof(VALUE_TYPE)*(self->len - i));
-        }
-
-        if (! self->len) {
-            self->size = 0;
-            free(self->keys);
-            self->keys = NULL;
-            if (self->values) {
-                free(self->values);
-                self->values = NULL;
-            }
-        }
-
-        if (changed)
-            *changed = 1;
-        if (PER_CHANGED(self) >= 0)
-            result = 1;
-        goto Done;
-    }
-
-    /* The key doesn't exist, and belongs at index i. */
-    if (!v) {
-        /* Can't delete a non-existent key. */
-        PyErr_SetObject(PyExc_KeyError, keyarg);
-        goto Done;
-    }
-
-    /* The key doesn't exist and should be inserted at index i. */
-    if (self->len == self->size && Bucket_grow(self, -1, noval) < 0)
-        goto Done;
-
-    if (self->len > i) {
-        memmove(self->keys + i + 1, self->keys + i,
-                sizeof(KEY_TYPE) * (self->len - i));
-        if (self->values) {
-            memmove(self->values + i + 1, self->values + i,
-                    sizeof(VALUE_TYPE) * (self->len - i));
-        }
-    }
-
-    COPY_KEY(self->keys[i], key);
-    INCREF_KEY(self->keys[i]);
-
-    if (! noval) {
-        COPY_VALUE(self->values[i], value);
-        INCREF_VALUE(self->values[i]);
-    }
-
-    self->len++;
-    if (changed)
-        *changed = 1;
-    if (PER_CHANGED(self) >= 0)
-        result = 1;
-
-Done:
-    PER_UNUSE(self);
-    return result;
-}
-
-/*
-** bucket_setitem
-**
-** wrapper for _bucket_setitem (eliminates +1 return code)
-**
-** Arguments:	self	The bucket
-**		key	The key to insert under
-**		v	The value to insert
-**
-** Returns	 0 	on success
-**		-1	on failure
-*/
-static int
-bucket_setitem(Bucket *self, PyObject *key, PyObject *v)
-{
-    if (_bucket_set(self, key, v, 0, 0, 0) < 0)
-	return -1;
-    return 0;
-}
-
-/**
- ** Accepts a sequence of 2-tuples, or any object with an items()
- ** method that returns an iterable object producing 2-tuples.
- */
-static int
-update_from_seq(PyObject *map, PyObject *seq)
-{
-    PyObject *iter, *o, *k, *v;
-    int err = -1;
-
-    /* One path creates a new seq object.  The other path has an
-       INCREF of the seq argument.  So seq must always be DECREFed on
-       the way out.
-     */
-    if (!PySequence_Check(seq)) {
-	PyObject *items;
-	items = PyObject_GetAttrString(seq, "items");
-	if (items == NULL)
-	    return -1;
-	seq = PyObject_CallObject(items, NULL);
-	Py_DECREF(items);
-	if (seq == NULL)
-	    return -1;
-    } else
-	Py_INCREF(seq);
-
-    iter = PyObject_GetIter(seq);
-    if (iter == NULL)
-	goto err;
-    while (1) {
-	o = PyIter_Next(iter);
-	if (o == NULL) {
-	    if (PyErr_Occurred())
-		goto err;
-	    else
-		break;
-	}
-	if (!PyTuple_Check(o) || PyTuple_GET_SIZE(o) != 2) {
-	    Py_DECREF(o);
-	    PyErr_SetString(PyExc_TypeError,
-			    "Sequence must contain 2-item tuples");
-	    goto err;
-	}
-	k = PyTuple_GET_ITEM(o, 0);
-	v = PyTuple_GET_ITEM(o, 1);
-	if (PyObject_SetItem(map, k, v) < 0) {
-	    Py_DECREF(o);
-	    goto err;
-	}
-	Py_DECREF(o);
-    }
-
-    err = 0;
- err:
-    Py_DECREF(iter);
-    Py_DECREF(seq);
-    return err;
-}
-
-static PyObject *
-Mapping_update(PyObject *self, PyObject *seq)
-{
-    if (update_from_seq(self, seq) < 0)
-	return NULL;
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-/*
-** bucket_split
-**
-** Splits one bucket into two
-**
-** Arguments:	self	The bucket
-**		index	the index of the key to split at (O.O.B use midpoint)
-**		next	the new bucket to split into
-**
-** Returns:	 0	on success
-**		-1	on failure
-*/
-static int
-bucket_split(Bucket *self, int index, Bucket *next)
-{
-    int next_size;
-
-    ASSERT(self->len > 1, "split of empty bucket", -1);
-
-    if (index < 0 || index >= self->len)
-	index = self->len / 2;
-
-    next_size = self->len - index;
-
-    next->keys = BTree_Malloc(sizeof(KEY_TYPE) * next_size);
-    if (!next->keys)
-	return -1;
-    memcpy(next->keys, self->keys + index, sizeof(KEY_TYPE) * next_size);
-    if (self->values) {
-	next->values = BTree_Malloc(sizeof(VALUE_TYPE) * next_size);
-	if (!next->values) {
-            free(next->keys);
-            next->keys = NULL;
-            return -1;
-        }
-	memcpy(next->values, self->values + index,
-	       sizeof(VALUE_TYPE) * next_size);
-    }
-    next->size = next_size;
-    next->len = next_size;
-    self->len = index;
-
-    next->next = self->next;
-
-    Py_INCREF(next);
-    self->next = next;
-
-    if (PER_CHANGED(self) < 0)
-        return -1;
-
-    return 0;
-}
-
-/* Set self->next to self->next->next, i.e. unlink self's successor from
- * the chain.
- *
- * Return:
- *     -1       error
- *      0       OK
- */
-static int
-Bucket_deleteNextBucket(Bucket *self)
-{
-    int result = -1;    /* until proven innocent */
-    Bucket *successor;
-
-    PER_USE_OR_RETURN(self, -1);
-    successor = self->next;
-    if (successor) {
-        Bucket *next;
-        /* Before:  self -> successor -> next
-         * After:   self --------------> next
-         */
-        UNLESS (PER_USE(successor)) goto Done;
-        next = successor->next;
-        PER_UNUSE(successor);
-
-        Py_XINCREF(next);       /* it may be NULL, of course */
-        self->next = next;
-        Py_DECREF(successor);
-	if (PER_CHANGED(self) < 0)
-	    goto Done;
-    }
-    result = 0;
-
-Done:
-    PER_UNUSE(self);
-    return result;
-}
-
-/*
- Bucket_findRangeEnd -- Find the index of a range endpoint
- (possibly) contained in a bucket.
-
- Arguments:     self        The bucket
-                keyarg      The key to match against
-                low         Boolean; true for low end of range, false for high
-                exclude_equal  Boolean; if true, don't accept an exact match,
-                	       and if there is one then move right if low and
-                	       left if !low.
-                offset      The output offset
-
- If low true, *offset <- index of the smallest item >= key,
- if low false the index of the largest item <= key.  In either case, if there
- is no such index, *offset is left alone and 0 is returned.
-
- Return:
-      0     No suitable index exists; *offset has not been changed
-      1     The correct index was stored into *offset
-     -1     Error
-
- Example:  Suppose the keys are [2, 4], and exclude_equal is false.  Searching
- for 2 sets *offset to 0 and returns 1 regardless of low.  Searching for 4
- sets *offset to 1 and returns 1 regardless of low.
- Searching for 1:
-     If low true, sets *offset to 0, returns 1.
-     If low false, returns 0.
- Searching for 3:
-     If low true, sets *offset to 1, returns 1.
-     If low false, sets *offset to 0, returns 1.
- Searching for 5:
-     If low true, returns 0.
-     If low false, sets *offset to 1, returns 1.
-
- The 1, 3 and 5 examples are the same when exclude_equal is true.
- */
-static int
-Bucket_findRangeEnd(Bucket *self, PyObject *keyarg, int low, int exclude_equal,
-		    int *offset)
-{
-    int i, cmp;
-    int result = -1;    /* until proven innocent */
-    KEY_TYPE key;
-    int copied = 1;
-
-    COPY_KEY_FROM_ARG(key, keyarg, copied);
-    UNLESS (copied) return -1;
-
-    UNLESS (PER_USE(self)) return -1;
-
-    BUCKET_SEARCH(i, cmp, self, key, goto Done);
-    if (cmp == 0) {
-    	/* exact match at index i */
-    	if (exclude_equal) {
-	    /* but we don't want an exact match */
-            if (low)
-                ++i;
-            else
-                --i;
-        }
-    }
-    /* Else keys[i-1] < key < keys[i], picturing infinities at OOB indices,
-     * and i has the smallest item > key, which is correct for low.
-     */
-    else if (! low)
-        /* i-1 has the largest item < key (unless i-1 is 0OB) */
-        --i;
-
-    result = 0 <= i && i < self->len;
-    if (result)
-        *offset = i;
-
-Done:
-  PER_UNUSE(self);
-  return result;
-}
-
-static PyObject *
-Bucket_maxminKey(Bucket *self, PyObject *args, int min)
-{
-  PyObject *key=0;
-  int rc, offset;
-
-  if (args && ! PyArg_ParseTuple(args, "|O", &key)) return NULL;
-
-  PER_USE_OR_RETURN(self, NULL);
-
-  UNLESS (self->len) goto empty;
-
-  /* Find the low range */
-  if (key)
-    {
-      if ((rc = Bucket_findRangeEnd(self, key, min, 0, &offset)) <= 0)
-        {
-          if (rc < 0) return NULL;
-          goto empty;
-        }
-    }
-  else if (min) offset = 0;
-  else offset = self->len -1;
-
-  COPY_KEY_TO_OBJECT(key, self->keys[offset]);
-  PER_UNUSE(self);
-
-  return key;
-
- empty:
-  PyErr_SetString(PyExc_ValueError, "empty bucket");
-  PER_UNUSE(self);
-  return NULL;
-}
-
-static PyObject *
-Bucket_minKey(Bucket *self, PyObject *args)
-{
-  return Bucket_maxminKey(self, args, 1);
-}
-
-static PyObject *
-Bucket_maxKey(Bucket *self, PyObject *args)
-{
-  return Bucket_maxminKey(self, args, 0);
-}
-
-static int
-Bucket_rangeSearch(Bucket *self, PyObject *args, PyObject *kw,
-		   int *low, int *high)
-{
-    PyObject *min = Py_None;
-    PyObject *max = Py_None;
-    int excludemin = 0;
-    int excludemax = 0;
-    int rc;
-
-    if (args) {
-        if (! PyArg_ParseTupleAndKeywords(args, kw, "|OOii", search_keywords,
-        				  &min,
-        				  &max,
-        				  &excludemin,
-        				  &excludemax))
-	    return -1;
-    }
-
-    UNLESS (self->len) goto empty;
-
-    /* Find the low range */
-    if (min != Py_None) {
-        UNLESS (rc = Bucket_findRangeEnd(self, min, 1, excludemin, low)) {
-            if (rc < 0) return -1;
-            goto empty;
-        }
-    }
-    else {
-    	*low = 0;
-    	if (excludemin) {
-    	    if (self->len < 2)
-    	    	goto empty;
-    	    ++*low;
-    	}
-    }
-
-    /* Find the high range */
-    if (max != Py_None) {
-        UNLESS (rc = Bucket_findRangeEnd(self, max, 0, excludemax, high)) {
-            if (rc < 0) return -1;
-            goto empty;
-	}
-    }
-    else {
-	*high = self->len - 1;
-	if (excludemax) {
-	    if (self->len < 2)
-	    	goto empty;
-	    --*high;
-	}
-    }
-
-    /* If min < max to begin with, it's quite possible that low > high now. */
-    if (*low <= *high)
-        return 0;
-
- empty:
-    *low = 0;
-    *high = -1;
-    return 0;
-}
-
-/*
-** bucket_keys
-**
-** Generate a list of all keys in the bucket
-**
-** Arguments:	self	The Bucket
-**		args	(unused)
-**
-** Returns:	list of bucket keys
-*/
-static PyObject *
-bucket_keys(Bucket *self, PyObject *args, PyObject *kw)
-{
-  PyObject *r = NULL, *key;
-  int i, low, high;
-
-  PER_USE_OR_RETURN(self, NULL);
-
-  if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0)
-      goto err;
-
-  r = PyList_New(high-low+1);
-  if (r == NULL)
-      goto err;
-
-  for (i=low; i <= high; i++) {
-      COPY_KEY_TO_OBJECT(key, self->keys[i]);
-      if (PyList_SetItem(r, i-low , key) < 0)
-	  goto err;
-  }
-
-  PER_UNUSE(self);
-  return r;
-
- err:
-  PER_UNUSE(self);
-  Py_XDECREF(r);
-  return NULL;
-}
-
-/*
-** bucket_values
-**
-** Generate a list of all values in the bucket
-**
-** Arguments:	self	The Bucket
-**		args	(unused)
-**
-** Returns	list of values
-*/
-static PyObject *
-bucket_values(Bucket *self, PyObject *args, PyObject *kw)
-{
-  PyObject *r=0, *v;
-  int i, low, high;
-
-  PER_USE_OR_RETURN(self, NULL);
-
-  if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0) goto err;
-
-  UNLESS (r=PyList_New(high-low+1)) goto err;
-
-  for (i=low; i <= high; i++)
-    {
-      COPY_VALUE_TO_OBJECT(v, self->values[i]);
-      UNLESS (v) goto err;
-      if (PyList_SetItem(r, i-low, v) < 0) goto err;
-    }
-
-  PER_UNUSE(self);
-  return r;
-
- err:
-  PER_UNUSE(self);
-  Py_XDECREF(r);
-  return NULL;
-}
-
-/*
-** bucket_items
-**
-** Returns a list of all items in a bucket
-**
-** Arguments:	self	The Bucket
-**		args	(unused)
-**
-** Returns:	list of all items in the bucket
-*/
-static PyObject *
-bucket_items(Bucket *self, PyObject *args, PyObject *kw)
-{
-  PyObject *r=0, *o=0, *item=0;
-  int i, low, high;
-
-  PER_USE_OR_RETURN(self, NULL);
-
-  if (Bucket_rangeSearch(self, args, kw, &low, &high) < 0) goto err;
-
-  UNLESS (r=PyList_New(high-low+1)) goto err;
-
-  for (i=low; i <= high; i++)
-    {
-      UNLESS (item = PyTuple_New(2)) goto err;
-
-      COPY_KEY_TO_OBJECT(o, self->keys[i]);
-      UNLESS (o) goto err;
-      PyTuple_SET_ITEM(item, 0, o);
-
-      COPY_VALUE_TO_OBJECT(o, self->values[i]);
-      UNLESS (o) goto err;
-      PyTuple_SET_ITEM(item, 1, o);
-
-      if (PyList_SetItem(r, i-low, item) < 0) goto err;
-
-      item = 0;
-    }
-
-  PER_UNUSE(self);
-  return r;
-
- err:
-  PER_UNUSE(self);
-  Py_XDECREF(r);
-  Py_XDECREF(item);
-  return NULL;
-}
-
-static PyObject *
-bucket_byValue(Bucket *self, PyObject *omin)
-{
-  PyObject *r=0, *o=0, *item=0;
-  VALUE_TYPE min;
-  VALUE_TYPE v;
-  int i, l, copied=1;
-
-  PER_USE_OR_RETURN(self, NULL);
-
-  COPY_VALUE_FROM_ARG(min, omin, copied);
-  UNLESS(copied) return NULL;
-
-  for (i=0, l=0; i < self->len; i++)
-    if (TEST_VALUE(self->values[i], min) >= 0)
-      l++;
-
-  UNLESS (r=PyList_New(l)) goto err;
-
-  for (i=0, l=0; i < self->len; i++)
-    {
-      if (TEST_VALUE(self->values[i], min) < 0) continue;
-
-      UNLESS (item = PyTuple_New(2)) goto err;
-
-      COPY_KEY_TO_OBJECT(o, self->keys[i]);
-      UNLESS (o) goto err;
-      PyTuple_SET_ITEM(item, 1, o);
-
-      COPY_VALUE(v, self->values[i]);
-      NORMALIZE_VALUE(v, min);
-      COPY_VALUE_TO_OBJECT(o, v);
-      DECREF_VALUE(v);
-      UNLESS (o) goto err;
-      PyTuple_SET_ITEM(item, 0, o);
-
-      if (PyList_SetItem(r, l, item) < 0) goto err;
-      l++;
-
-      item = 0;
-    }
-
-  item=PyObject_GetAttr(r,sort_str);
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_CallObject(item, NULL));
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_GetAttr(r, reverse_str));
-  UNLESS (item) goto err;
-  ASSIGN(item, PyObject_CallObject(item, NULL));
-  UNLESS (item) goto err;
-  Py_DECREF(item);
-
-  PER_UNUSE(self);
-  return r;
-
- err:
-  PER_UNUSE(self);
-  Py_XDECREF(r);
-  Py_XDECREF(item);
-  return NULL;
-}
-
-static int
-_bucket_clear(Bucket *self)
-{
-    const int len = self->len;
-    /* Don't declare i at this level.  If neither keys nor values are
-     * PyObject*, i won't be referenced, and you'll get a nuisance compiler
-     * wng for declaring it here.
-     */
-    self->len = self->size = 0;
-
-    if (self->next) {
-        Py_DECREF(self->next);
-        self->next = NULL;
-    }
-
-    /* Silence compiler warning about unused variable len for the case
-       when neither key nor value is an object, i.e. II. */
-    (void)len;
-
-    if (self->keys) {
-#ifdef KEY_TYPE_IS_PYOBJECT
-        int i;
-        for (i = 0; i < len; ++i)
-            DECREF_KEY(self->keys[i]);
-#endif
-        free(self->keys);
-        self->keys = NULL;
-    }
-
-    if (self->values) {
-#ifdef VALUE_TYPE_IS_PYOBJECT
-        int i;
-        for (i = 0; i < len; ++i)
-            DECREF_VALUE(self->values[i]);
-#endif
-        free(self->values);
-        self->values = NULL;
-    }
-    return 0;
-}
-
-#ifdef PERSISTENT
-static PyObject *
-bucket__p_deactivate(Bucket *self, PyObject *args, PyObject *keywords)
-{
-    int ghostify = 1;
-    PyObject *force = NULL;
-
-    if (args && PyTuple_GET_SIZE(args) > 0) {
-	PyErr_SetString(PyExc_TypeError,
-			"_p_deactivate takes not positional arguments");
-	return NULL;
-    }
-    if (keywords) {
-	int size = PyDict_Size(keywords);
-	force = PyDict_GetItemString(keywords, "force");
-	if (force)
-	    size--;
-	if (size) {
-	    PyErr_SetString(PyExc_TypeError,
-			    "_p_deactivate only accepts keyword arg force");
-	    return NULL;
-	}
-    }
-
-    if (self->jar && self->oid) {
-	ghostify = self->state == cPersistent_UPTODATE_STATE;
-	if (!ghostify && force) {
-	    if (PyObject_IsTrue(force))
-		ghostify = 1;
-	    if (PyErr_Occurred())
-		return NULL;
-	}
-	if (ghostify) {
-	    if (_bucket_clear(self) < 0)
-		return NULL;
-	    PER_GHOSTIFY(self);
-	}
-    }
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-#endif
-
-static PyObject *
-bucket_clear(Bucket *self, PyObject *args)
-{
-  PER_USE_OR_RETURN(self, NULL);
-
-  if (self->len) {
-      if (_bucket_clear(self) < 0)
-	  return NULL;
-      if (PER_CHANGED(self) < 0)
-	  goto err;
-  }
-  PER_UNUSE(self);
-  Py_INCREF(Py_None);
-  return Py_None;
-
-err:
-  PER_UNUSE(self);
-  return NULL;
-}
-
-/*
- * Return:
- *
- * For a set bucket (self->values is NULL), a one-tuple or two-tuple.  The
- * first element is a tuple of keys, of length self->len.  The second element
- * is the next bucket, present if and only if next is non-NULL:
- *
- *     (
- *          (keys[0], keys[1], ..., keys[len-1]),
- *          <self->next iff non-NULL>
- *     )
- *
- * For a mapping bucket (self->values is not NULL), a one-tuple or two-tuple.
- * The first element is a tuple interleaving keys and values, of length
- * 2 * self->len.  The second element is the next bucket, present iff next is
- * non-NULL:
- *
- *     (
- *          (keys[0], values[0], keys[1], values[1], ...,
- *                               keys[len-1], values[len-1]),
- *          <self->next iff non-NULL>
- *     )
- */
-static PyObject *
-bucket_getstate(Bucket *self)
-{
-    PyObject *o = NULL, *items = NULL, *state;
-    int i, len, l;
-
-    PER_USE_OR_RETURN(self, NULL);
-
-    len = self->len;
-
-    if (self->values) { /* Bucket */
-	items = PyTuple_New(len * 2);
-	if (items == NULL)
-	    goto err;
-	for (i = 0, l = 0; i < len; i++) {
-	    COPY_KEY_TO_OBJECT(o, self->keys[i]);
-	    if (o == NULL)
-		goto err;
-	    PyTuple_SET_ITEM(items, l, o);
-	    l++;
-
-	    COPY_VALUE_TO_OBJECT(o, self->values[i]);
-	    if (o == NULL)
-		goto err;
-	    PyTuple_SET_ITEM(items, l, o);
-	    l++;
-        }
-    } else { /* Set */
-	items = PyTuple_New(len);
-	if (items == NULL)
-	    goto err;
-	for (i = 0; i < len; i++) {
-	    COPY_KEY_TO_OBJECT(o, self->keys[i]);
-	    if (o == NULL)
-		goto err;
-	    PyTuple_SET_ITEM(items, i, o);
-        }
-    }
-
-    if (self->next)
-	state = Py_BuildValue("OO", items, self->next);
-    else
-	state = Py_BuildValue("(O)", items);
-    Py_DECREF(items);
-
-    PER_UNUSE(self);
-    return state;
-
- err:
-    PER_UNUSE(self);
-    Py_XDECREF(items);
-    return NULL;
-}
-
-static int
-_bucket_setstate(Bucket *self, PyObject *state)
-{
-    PyObject *k, *v, *items;
-    Bucket *next = NULL;
-    int i, l, len, copied=1;
-    KEY_TYPE *keys;
-    VALUE_TYPE *values;
-
-    if (!PyArg_ParseTuple(state, "O|O:__setstate__", &items, &next))
-	return -1;
-
-    len = PyTuple_Size(items);
-    if (len < 0)
-	return -1;
-    len /= 2;
-
-    for (i = self->len; --i >= 0; ) {
-	DECREF_KEY(self->keys[i]);
-	DECREF_VALUE(self->values[i]);
-    }
-    self->len = 0;
-
-    if (self->next) {
-	Py_DECREF(self->next);
-	self->next = NULL;
-    }
-
-    if (len > self->size) {
-	keys = BTree_Realloc(self->keys, sizeof(KEY_TYPE)*len);
-	if (keys == NULL)
-	    return -1;
-	values = BTree_Realloc(self->values, sizeof(VALUE_TYPE)*len);
-	if (values == NULL)
-	    return -1;
-	self->keys = keys;
-	self->values = values;
-	self->size = len;
-    }
-
-    for (i=0, l=0; i < len; i++) {
-	k = PyTuple_GET_ITEM(items, l);
-	l++;
-	v = PyTuple_GET_ITEM(items, l);
-	l++;
-
-	COPY_KEY_FROM_ARG(self->keys[i], k, copied);
-	if (!copied)
-	    return -1;
-	COPY_VALUE_FROM_ARG(self->values[i], v, copied);
-	if (!copied)
-	    return -1;
-	INCREF_KEY(self->keys[i]);
-	INCREF_VALUE(self->values[i]);
-    }
-
-    self->len = len;
-
-    if (next) {
-	self->next = next;
-	Py_INCREF(next);
-    }
-
-    return 0;
-}
-
-static PyObject *
-bucket_setstate(Bucket *self, PyObject *state)
-{
-    int r;
-
-    PER_PREVENT_DEACTIVATION(self);
-    r = _bucket_setstate(self, state);
-    PER_UNUSE(self);
-
-    if (r < 0)
-	return NULL;
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static PyObject *
-bucket_has_key(Bucket *self, PyObject *key)
-{
-    return _bucket_get(self, key, 1);
-}
-
-
-/* Search bucket self for key.  This is the sq_contains slot of the
- * PySequenceMethods.
- *
- * Return:
- *     -1     error
- *      0     not found
- *      1     found
- */
-static int
-bucket_contains(Bucket *self, PyObject *key)
-{
-    PyObject *asobj = _bucket_get(self, key, 1);
-    int result = -1;
-
-    if (asobj != NULL) {
-        result = PyInt_AsLong(asobj) ? 1 : 0;
-        Py_DECREF(asobj);
-    }
-    return result;
-}
-
-/*
-** bucket_getm
-**
-*/
-static PyObject *
-bucket_getm(Bucket *self, PyObject *args)
-{
-    PyObject *key, *d=Py_None, *r;
-
-    if (!PyArg_ParseTuple(args, "O|O:get", &key, &d))
-	return NULL;
-    r = _bucket_get(self, key, 0);
-    if (r)
-	return r;
-    if (!PyErr_ExceptionMatches(PyExc_KeyError))
-	return NULL;
-    PyErr_Clear();
-    Py_INCREF(d);
-    return d;
-}
-
-/**************************************************************************/
-/* Iterator support. */
-
-/* A helper to build all the iterators for Buckets and Sets.
- * If args is NULL, the iterator spans the entire structure.  Else it's an
- * argument tuple, with optional low and high arguments.
- * kind is 'k', 'v' or 'i'.
- * Returns a BTreeIter object, or NULL if error.
- */
-static PyObject *
-buildBucketIter(Bucket *self, PyObject *args, PyObject *kw, char kind)
-{
-    BTreeItems *items;
-    int lowoffset, highoffset;
-    BTreeIter *result = NULL;
-
-    PER_USE_OR_RETURN(self, NULL);
-    if (Bucket_rangeSearch(self, args, kw, &lowoffset, &highoffset) < 0)
-        goto Done;
-
-    items = (BTreeItems *)newBTreeItems(kind, self, lowoffset,
-                                              self, highoffset);
-    if (items == NULL) goto Done;
-
-    result = BTreeIter_new(items);      /* win or lose, we're done */
-    Py_DECREF(items);
-
-Done:
-    PER_UNUSE(self);
-    return (PyObject *)result;
-}
-
-/* The implementation of iter(Bucket_or_Set); the Bucket tp_iter slot. */
-static PyObject *
-Bucket_getiter(Bucket *self)
-{
-    return buildBucketIter(self, NULL, NULL, 'k');
-}
-
-/* The implementation of Bucket.iterkeys(). */
-static PyObject *
-Bucket_iterkeys(Bucket *self, PyObject *args, PyObject *kw)
-{
-    return buildBucketIter(self, args, kw, 'k');
-}
-
-/* The implementation of Bucket.itervalues(). */
-static PyObject *
-Bucket_itervalues(Bucket *self, PyObject *args, PyObject *kw)
-{
-    return buildBucketIter(self, args, kw, 'v');
-}
-
-/* The implementation of Bucket.iteritems(). */
-static PyObject *
-Bucket_iteritems(Bucket *self, PyObject *args, PyObject *kw)
-{
-    return buildBucketIter(self, args, kw, 'i');
-}
-
-/* End of iterator support. */
-
-#ifdef PERSISTENT
-static PyObject *merge_error(int p1, int p2, int p3, int reason);
-static PyObject *bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3);
-
-static PyObject *
-_bucket__p_resolveConflict(PyObject *ob_type, PyObject *s[3])
-{
-    PyObject *result = NULL;	/* guilty until proved innocent */
-    Bucket *b[3] = {NULL, NULL, NULL};
-    PyObject *meth = NULL;
-    PyObject *a = NULL;
-    int i;
-
-    for (i = 0; i < 3; i++) {
-	PyObject *r;
-
-	b[i] = (Bucket*)PyObject_CallObject((PyObject *)ob_type, NULL);
-	if (b[i] == NULL)
-	    goto Done;
-	if (s[i] == Py_None) /* None is equivalent to empty, for BTrees */
-	    continue;
-	meth = PyObject_GetAttr((PyObject *)b[i], __setstate___str);
-	if (meth == NULL)
-	    goto Done;
-	a = PyTuple_New(1);
-	if (a == NULL)
-	    goto Done;
-	PyTuple_SET_ITEM(a, 0, s[i]);
-	Py_INCREF(s[i]);
-	r = PyObject_CallObject(meth, a);  /* b[i].__setstate__(s[i]) */
-	if (r == NULL)
-	    goto Done;
-	Py_DECREF(r);
-	Py_DECREF(a);
-	Py_DECREF(meth);
-	a = meth = NULL;
-    }
-
-    if (b[0]->next != b[1]->next || b[0]->next != b[2]->next)
-	merge_error(-1, -1, -1, 0);
-    else
-	result = bucket_merge(b[0], b[1], b[2]);
-
-Done:
-    Py_XDECREF(meth);
-    Py_XDECREF(a);
-    Py_XDECREF(b[0]);
-    Py_XDECREF(b[1]);
-    Py_XDECREF(b[2]);
-
-    return result;
-}
-
-static PyObject *
-bucket__p_resolveConflict(Bucket *self, PyObject *args)
-{
-    PyObject *s[3];
-
-    if (!PyArg_ParseTuple(args, "OOO", &s[0], &s[1], &s[2]))
-	return NULL;
-
-    return _bucket__p_resolveConflict((PyObject *)self->ob_type, s);
-}
-#endif
-
-/* Caution:  Even though the _next attribute is read-only, a program could
-   do arbitrary damage to the btree internals.  For example, it could call
-   clear() on a bucket inside a BTree.
-
-   We need to decide if the convenience for inspecting BTrees is worth
-   the risk.
-*/
-
-static struct PyMemberDef Bucket_members[] = {
-    {"_next", T_OBJECT, offsetof(Bucket, next)},
-    {NULL}
-};
-
-static struct PyMethodDef Bucket_methods[] = {
-    {"__getstate__", (PyCFunction) bucket_getstate,	METH_NOARGS,
-     "__getstate__() -- Return the picklable state of the object"},
-
-    {"__setstate__", (PyCFunction) bucket_setstate,	METH_O,
-     "__setstate__() -- Set the state of the object"},
-
-    {"keys",	(PyCFunction) bucket_keys,	METH_KEYWORDS,
-     "keys([min, max]) -- Return the keys"},
-
-    {"has_key",	(PyCFunction) bucket_has_key,	METH_O,
-     "has_key(key) -- Test whether the bucket contains the given key"},
-
-    {"clear",	(PyCFunction) bucket_clear,	METH_VARARGS,
-     "clear() -- Remove all of the items from the bucket"},
-
-    {"update",	(PyCFunction) Mapping_update,	METH_O,
-     "update(collection) -- Add the items from the given collection"},
-
-    {"maxKey", (PyCFunction) Bucket_maxKey,	METH_VARARGS,
-     "maxKey([key]) -- Find the maximum key\n\n"
-     "If an argument is given, find the maximum <= the argument"},
-
-    {"minKey", (PyCFunction) Bucket_minKey,	METH_VARARGS,
-     "minKey([key]) -- Find the minimum key\n\n"
-     "If an argument is given, find the minimum >= the argument"},
-
-    {"values",	(PyCFunction) bucket_values,	METH_KEYWORDS,
-     "values([min, max]) -- Return the values"},
-
-    {"items",	(PyCFunction) bucket_items,	METH_KEYWORDS,
-     "items([min, max])) -- Return the items"},
-
-    {"byValue",	(PyCFunction) bucket_byValue,	METH_O,
-     "byValue(min) -- "
-     "Return value-keys with values >= min and reverse sorted by values"},
-
-    {"get",	(PyCFunction) bucket_getm,	METH_VARARGS,
-     "get(key[,default]) -- Look up a value\n\n"
-     "Return the default (or None) if the key is not found."},
-
-    {"iterkeys", (PyCFunction) Bucket_iterkeys,  METH_KEYWORDS,
-     "B.iterkeys([min[,max]]) -> an iterator over the keys of B"},
-
-    {"itervalues", (PyCFunction) Bucket_itervalues,  METH_KEYWORDS,
-     "B.itervalues([min[,max]]) -> an iterator over the values of B"},
-
-    {"iteritems", (PyCFunction) Bucket_iteritems,    METH_KEYWORDS,
-     "B.iteritems([min[,max]]) -> an iterator over the (key, value) items of B"},
-
-#ifdef PERSISTENT
-    {"_p_resolveConflict", (PyCFunction) bucket__p_resolveConflict,
-     METH_VARARGS,
-     "_p_resolveConflict() -- Reinitialize from a newly created copy"},
-
-    {"_p_deactivate", (PyCFunction) bucket__p_deactivate, METH_KEYWORDS,
-     "_p_deactivate() -- Reinitialize from a newly created copy"},
-#endif
-    {NULL, NULL}
-};
-
-static int
-Bucket_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    PyObject *v = NULL;
-
-    if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "Bucket", &v))
-	return -1;
-
-    if (v)
-	return update_from_seq(self, v);
-    else
-	return 0;
-}
-
-static void
-bucket_dealloc(Bucket *self)
-{
-    if (self->state != cPersistent_GHOST_STATE)
-	_bucket_clear(self);
-
-    cPersistenceCAPI->pertype->tp_dealloc((PyObject *)self);
-}
-
-static int
-bucket_traverse(Bucket *self, visitproc visit, void *arg)
-{
-    int err = 0;
-    int i, len;
-
-#define VISIT(SLOT)                             \
-    if (SLOT) {                                 \
-        err = visit((PyObject *)(SLOT), arg);   \
-        if (err)                                \
-            goto Done;                          \
-    }
-
-    /* Call our base type's traverse function.  Because buckets are
-     * subclasses of Peristent, there must be one.
-     */
-    err = cPersistenceCAPI->pertype->tp_traverse((PyObject *)self, visit, arg);
-    if (err)
-	goto Done;
-
-    /* If this is registered with the persistence system, cleaning up cycles
-     * is the database's problem.  It would be horrid to unghostify buckets
-     * here just to chase pointers every time gc runs.
-     */
-    if (self->state == cPersistent_GHOST_STATE)
-        goto Done;
-
-    len = self->len;
-    (void)i;    /* if neither keys nor values are PyObject*, "i" is otherwise
-                   unreferenced and we get a nuisance compiler wng */
-#ifdef KEY_TYPE_IS_PYOBJECT
-    /* Keys are Python objects so need to be traversed. */
-    for (i = 0; i < len; i++)
-        VISIT(self->keys[i]);
-#endif
-
-#ifdef VALUE_TYPE_IS_PYOBJECT
-    if (self->values != NULL) {
-        /* self->values exists (this is a mapping bucket, not a set bucket),
-         * and are Python objects, so need to be traversed. */
-        for (i = 0; i < len; i++)
-            VISIT(self->values[i]);
-    }
-#endif
-
-    VISIT(self->next);
-
-Done:
-    return err;
-
-#undef VISIT
-}
-
-static int
-bucket_tp_clear(Bucket *self)
-{
-    if (self->state != cPersistent_GHOST_STATE)
-	_bucket_clear(self);
-    return 0;
-}
-
-/* Code to access Bucket objects as mappings */
-static int
-Bucket_length( Bucket *self)
-{
-    int r;
-    UNLESS (PER_USE(self)) return -1;
-    r = self->len;
-    PER_UNUSE(self);
-    return r;
-}
-
-static PyMappingMethods Bucket_as_mapping = {
-  (inquiry)Bucket_length,		/*mp_length*/
-  (binaryfunc)bucket_getitem,		/*mp_subscript*/
-  (objobjargproc)bucket_setitem,	/*mp_ass_subscript*/
-};
-
-static PySequenceMethods Bucket_as_sequence = {
-    (inquiry)0,                     /* sq_length */
-    (binaryfunc)0,                  /* sq_concat */
-    (intargfunc)0,                  /* sq_repeat */
-    (intargfunc)0,                  /* sq_item */
-    (intintargfunc)0,               /* sq_slice */
-    (intobjargproc)0,               /* sq_ass_item */
-    (intintobjargproc)0,            /* sq_ass_slice */
-    (objobjproc)bucket_contains,    /* sq_contains */
-    0,                              /* sq_inplace_concat */
-    0,                              /* sq_inplace_repeat */
-};
-
-static PyObject *
-bucket_repr(Bucket *self)
-{
-    PyObject *i, *r;
-    char repr[10000];
-    int rv;
-
-    i = bucket_items(self, NULL, NULL);
-    if (!i)
-	return NULL;
-    r = PyObject_Repr(i);
-    Py_DECREF(i);
-    if (!r) {
-	return NULL;
-    }
-    rv = PyOS_snprintf(repr, sizeof(repr),
-		       "%s(%s)", self->ob_type->tp_name,
-		       PyString_AS_STRING(r));
-    if (rv > 0 && rv < sizeof(repr)) {
-	Py_DECREF(r);
-	return PyString_FromStringAndSize(repr, strlen(repr));
-    }
-    else {
-	/* The static buffer wasn't big enough */
-	int size;
-	PyObject *s;
-
-	/* 3 for the parens and the null byte */
-	size = strlen(self->ob_type->tp_name) + PyString_GET_SIZE(r) + 3;
-	s = PyString_FromStringAndSize(NULL, size);
-	if (!s) {
-	    Py_DECREF(r);
-	    return r;
-	}
-	PyOS_snprintf(PyString_AS_STRING(s), size,
-		      "%s(%s)", self->ob_type->tp_name, PyString_AS_STRING(r));
-	Py_DECREF(r);
-	return s;
-    }
-}
-
-static PyTypeObject BucketType = {
-    PyObject_HEAD_INIT(NULL) /* PyPersist_Type */
-    0,					/* ob_size */
-    MODULE_NAME MOD_NAME_PREFIX "Bucket",/* tp_name */
-    sizeof(Bucket),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)bucket_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    (reprfunc)bucket_repr,		/* tp_repr */
-    0,					/* tp_as_number */
-    &Bucket_as_sequence,		/* tp_as_sequence */
-    &Bucket_as_mapping,			/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
-	    Py_TPFLAGS_BASETYPE, 	/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)bucket_traverse,	/* tp_traverse */
-    (inquiry)bucket_tp_clear,		/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    (getiterfunc)Bucket_getiter,	/* tp_iter */
-    0,					/* tp_iternext */
-    Bucket_methods,			/* tp_methods */
-    Bucket_members,			/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    Bucket_init,			/* tp_init */
-    0,					/* tp_alloc */
-    0, /*PyType_GenericNew,*/		/* tp_new */
-};
-
-static int
-nextBucket(SetIteration *i)
-{
-  if (i->position >= 0)
-    {
-      UNLESS(PER_USE(BUCKET(i->set))) return -1;
-
-      if (i->position)
-        {
-          DECREF_KEY(i->key);
-          DECREF_VALUE(i->value);
-        }
-
-      if (i->position < BUCKET(i->set)->len)
-        {
-          COPY_KEY(i->key, BUCKET(i->set)->keys[i->position]);
-          INCREF_KEY(i->key);
-          COPY_VALUE(i->value, BUCKET(i->set)->values[i->position]);
-          INCREF_VALUE(i->value);
-          i->position ++;
-        }
-      else
-        {
-          i->position = -1;
-          PER_ACCESSED(BUCKET(i->set));
-        }
-
-      PER_ALLOW_DEACTIVATION(BUCKET(i->set));
-    }
-
-
-  return 0;
-}
diff --git a/branches/bug1734/src/BTrees/DEPENDENCIES.cfg b/branches/bug1734/src/BTrees/DEPENDENCIES.cfg
deleted file mode 100644
index 29d8d652..00000000
--- a/branches/bug1734/src/BTrees/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-persistent
-transaction
diff --git a/branches/bug1734/src/BTrees/IFBTree.py b/branches/bug1734/src/BTrees/IFBTree.py
deleted file mode 100644
index 7afe0e77..00000000
--- a/branches/bug1734/src/BTrees/IFBTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# hack to overcome dynamic-linking headache.
-from _IFBTree import *
diff --git a/branches/bug1734/src/BTrees/IIBTree.py b/branches/bug1734/src/BTrees/IIBTree.py
deleted file mode 100644
index ababe719..00000000
--- a/branches/bug1734/src/BTrees/IIBTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# hack to overcome dynamic-linking headache.
-from _IIBTree import *
diff --git a/branches/bug1734/src/BTrees/IOBTree.py b/branches/bug1734/src/BTrees/IOBTree.py
deleted file mode 100644
index 79771306..00000000
--- a/branches/bug1734/src/BTrees/IOBTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# hack to overcome dynamic-linking headache.
-from _IOBTree import *
diff --git a/branches/bug1734/src/BTrees/Interfaces.py b/branches/bug1734/src/BTrees/Interfaces.py
deleted file mode 100644
index c4711cdd..00000000
--- a/branches/bug1734/src/BTrees/Interfaces.py
+++ /dev/null
@@ -1,402 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-from zope.interface import Interface
-
-
-class ICollection(Interface):
-
-    def clear():
-        """Remove all of the items from the collection"""
-
-    def __nonzero__():
-        """Check if the collection is non-empty.
-
-        Return a true value if the collection is non-empty and a
-        false otherwise.
-        """
-
-
-class IReadSequence(Interface):
-
-    def __getitem__(index):
-        """Return a value at the given index
-
-        An IndexError is raised if the index cannot be found.
-        """
-
-    def __getslice__(index1, index2):
-        """Return a subsequence from the original sequence
-
-        Such that the subsequence includes the items from index1 up
-        to, but not including, index2.
-        """
-
-
-class IKeyed(ICollection):
-
-    def has_key(key):
-        """Check whether the object has an item with the given key.
-
-        Return a true value if the key is present, else a false value.
-        """
-
-    def keys(min=None, max=None, excludemin=False, excludemax=False):
-        """Return an IReadSequence containing the keys in the collection.
-
-        The type of the IReadSequence is not specified. It could be a list
-        or a tuple or some other type.
-
-        All arguments are optional, and may be specified as keyword
-        arguments, or by position.
-
-        If a min is specified, then output is constrained to keys greater
-        than or equal to the given min, and, if excludemin is specified and
-        true, is further constrained to keys strictly greater than min.  A
-        min value of None is ignored.  If min is None or not specified, and
-        excludemin is true, the smallest key is excluded.
-
-        If a max is specified, then output is constrained to keys less than
-        or equal to the given max, and, if excludemax is specified and
-        true, is further constrained to keys strictly less than max.  A max
-        value of None is ignored.  If max is None or not specified, and
-        excludemax is true, the largest key is excluded.
-        """
-
-    def maxKey(key=None):
-        """Return the maximum key
-
-        If a key argument if provided, return the largest key that is
-        less than or equal to the argument.
-        """
-
-    def minKey(key=None):
-        """Return the minimum key
-
-        If a key argument if provided, return the smallest key that is
-        greater than or equal to the argument.
-        """
-
-
-class ISetMutable(IKeyed):
-
-    def insert(key):
-        """Add the key (value) to the set.
-
-        If the key was already in the set, return 0, otherwise return 1.
-        """
-
-    def remove(key):
-        """Remove the key from the set."""
-
-    def update(seq):
-        """Add the items from the given sequence to the set."""
-
-
-class ISized(Interface):
-    """An object that supports __len__."""
-
-    def __len__():
-        """Return the number of items in the container."""
-
-
-class IKeySequence(IKeyed, ISized):
-
-    def __getitem__(index):
-        """Return the key in the given index position.
-
-        This allows iteration with for loops and use in functions,
-        like map and list, that read sequences.
-        """
-
-
-class ISet(IKeySequence, ISetMutable):
-    pass
-
-
-class ITreeSet(IKeyed, ISetMutable):
-    pass
-
-class IMinimalDictionary(ISized, IKeyed):
-
-    def get(key, default):
-        """Get the value for the given key
-
-        Return the default if the key is not in the collection.
-        """
-
-    def __setitem__(key, value):
-        """Set the value for the given key."""
-
-    def __delitem__(key):
-        """Delete the value for the given key.
-
-        Raise KeyError if the key if not in the collection.
-        """
-
-    def values(min=None, max=None, excludemin=False, excludemax=False):
-        """Return an IReadSequence containing the values in the collection.
-
-        The type of the IReadSequence is not specified. It could be a list
-        or a tuple or some other type.
-
-        All arguments are optional, and may be specified as keyword
-        arguments, or by position.
-
-        If a min is specified, then output is constrained to values whose
-        keys are greater than or equal to the given min, and, if excludemin
-        is specified and true, is further constrained to values whose keys
-        are strictly greater than min.  A min value of None is ignored.  If
-        min is None or not specified, and excludemin is true, the value
-        corresponding to the smallest key is excluded.
-
-        If a max is specified, then output is constrained to values whose
-        keys are less than or equal to the given max, and, if excludemax is
-        specified and true, is further constrained to values whose keys are
-        strictly less than max.  A max value of None is ignored.  If max is
-        None or not specified, and excludemax is true, the value
-        corresponding to the largest key is excluded.
-        """
-
-    def items(min=None, max=None, excludemin=False, excludemax=False):
-        """Return an IReadSequence containing the items in the collection.
-
-        An item is a 2-tuple, a (key, value) pair.
-
-        The type of the IReadSequence is not specified.  It could be a list
-        or a tuple or some other type.
-
-        All arguments are optional, and may be specified as keyword
-        arguments, or by position.
-
-        If a min is specified, then output is constrained to items whose
-        keys are greater than or equal to the given min, and, if excludemin
-        is specified and true, is further constrained to items whose keys
-        are strictly greater than min.  A min value of None is ignored.  If
-        min is None or not specified, and excludemin is true, the item with
-        the smallest key is excluded.
-
-        If a max is specified, then output is constrained to items whose
-        keys are less than or equal to the given max, and, if excludemax is
-        specified and true, is further constrained to items whose keys are
-        strictly less than max.  A max value of None is ignored.  If max is
-        None or not specified, and excludemax is true, the item with the
-        largest key is excluded.
-        """
-
-class IDictionaryIsh(IMinimalDictionary):
-
-    def update(collection):
-        """Add the items from the given collection object to the collection.
-
-        The input collection must be a sequence of key-value tuples,
-        or an object with an 'items' method that returns a sequence of
-        key-value tuples.
-        """
-
-    def byValue(minValue):
-        """Return a sequence of value-key pairs, sorted by value
-
-        Values < min are ommitted and other values are "normalized" by
-        the minimum value. This normalization may be a noop, but, for
-        integer values, the normalization is division.
-        """
-
-
-class IBTree(IDictionaryIsh):
-
-    def insert(key, value):
-        """Insert a key and value into the collection.
-
-        If the key was already in the collection, then there is no
-        change and 0 is returned.
-
-        If the key was not already in the collection, then the item is
-        added and 1 is returned.
-
-        This method is here to allow one to generate random keys and
-        to insert and test whether the key was there in one operation.
-
-        A standard idiom for generating new keys will be::
-
-          key=generate_key()
-          while not t.insert(key, value):
-              key=generate_key()
-        """
-
-
-class IMerge(Interface):
-    """Object with methods for merging sets, buckets, and trees.
-
-    These methods are supplied in modules that define collection
-    classes with particular key and value types. The operations apply
-    only to collections from the same module.  For example, the
-    IIBTree.union can only be used with IIBTree.IIBTree,
-    IIBTree.IIBucket, IIBTree.IISet, and IIBTree.IITreeSet.
-
-    The implementing module has a value type. The IOBTree and OOBTree
-    modules have object value type. The IIBTree and OIBTree modules
-    have integer value types. Other modules may be defined in the
-    future that have other value types.
-
-    The individual types are classified into set (Set and TreeSet) and
-    mapping (Bucket and BTree) types.
-    """
-
-    def difference(c1, c2):
-        """Return the keys or items in c1 for which there is no key in
-        c2.
-
-        If c1 is None, then None is returned.  If c2 is None, then c1
-        is returned.
-
-        If neither c1 nor c2 is None, the output is a Set if c1 is a Set or
-        TreeSet, and is a Bucket if c1 is a Bucket or BTree.
-        """
-
-    def union(c1, c2):
-        """Compute the Union of c1 and c2.
-
-        If c1 is None, then c2 is returned, otherwise, if c2 is None,
-        then c1 is returned.
-
-        The output is a Set containing keys from the input
-        collections.
-        """
-
-    def intersection(c1, c2):
-        """Compute the intersection of c1 and c2.
-
-        If c1 is None, then c2 is returned, otherwise, if c2 is None,
-        then c1 is returned.
-
-        The output is a Set containing matching keys from the input
-        collections.
-        """
-
-
-class IIMerge(IMerge):
-    """Merge collections with integer value type.
-
-    A primary intent is to support operations with no or integer
-    values, which are used as "scores" to rate indiviual keys. That
-    is, in this context, a BTree or Bucket is viewed as a set with
-    scored keys, using integer scores.
-    """
-
-    def weightedUnion(c1, c2, weight1=1, weight2=1):
-        """Compute the weighted union of c1 and c2.
-
-        If c1 and c2 are None, the output is (0, None).
-
-        If c1 is None and c2 is not None, the output is (weight2, c2).
-
-        If c1 is not None and c2 is None, the output is (weight1, c1).
-
-        Else, and hereafter, c1 is not None and c2 is not None.
-
-        If c1 and c2 are both sets, the output is 1 and the (unweighted)
-        union of the sets.
-
-        Else the output is 1 and a Bucket whose keys are the union of c1 and
-        c2's keys, and whose values are::
-
-          v1*weight1 + v2*weight2
-
-          where:
-
-            v1 is 0        if the key is not in c1
-                  1        if the key is in c1 and c1 is a set
-                  c1[key]  if the key is in c1 and c1 is a mapping
-
-            v2 is 0        if the key is not in c2
-                  1        if the key is in c2 and c2 is a set
-                  c2[key]  if the key is in c2 and c2 is a mapping
-
-        Note that c1 and c2 must be collections.
-        """
-
-    def weightedIntersection(c1, c2, weight1=1, weight2=1):
-        """Compute the weighted intersection of c1 and c2.
-
-        If c1 and c2 are None, the output is (0, None).
-
-        If c1 is None and c2 is not None, the output is (weight2, c2).
-
-        If c1 is not None and c2 is None, the output is (weight1, c1).
-
-        Else, and hereafter, c1 is not None and c2 is not None.
-
-        If c1 and c2 are both sets, the output is the sum of the weights
-        and the (unweighted) intersection of the sets.
-
-        Else the output is 1 and a Bucket whose keys are the intersection of
-        c1 and c2's keys, and whose values are::
-
-          v1*weight1 + v2*weight2
-
-          where:
-
-            v1 is 1        if c1 is a set
-                  c1[key]  if c1 is a mapping
-
-            v2 is 1        if c2 is a set
-                  c2[key]  if c2 is a mapping
-
-        Note that c1 and c2 must be collections.
-        """
-
-
-class IMergeIntegerKey(IMerge):
-    """IMerge-able objects with integer keys.
-
-    Concretely, this means the types in IOBTree and IIBTree.
-    """
-
-    def multiunion(seq):
-        """Return union of (zero or more) integer sets, as an integer set.
-
-        seq is a sequence of objects each convertible to an integer set.
-        These objects are convertible to an integer set:
-
-        + An integer, which is added to the union.
-
-        + A Set or TreeSet from the same module (for example, an
-          IIBTree.TreeSet for IIBTree.multiunion()).  The elements of the
-          set are added to the union.
-
-        + A Bucket or BTree from the same module (for example, an
-          IOBTree.IOBTree for IOBTree.multiunion()).  The keys of the
-          mapping are added to the union.
-
-        The union is returned as a Set from the same module (for example,
-        IIBTree.multiunion() returns an IIBTree.IISet).
-
-        The point to this method is that it can run much faster than
-        doing a sequence of two-input union() calls.  Under the covers,
-        all the integers in all the inputs are sorted via a single
-        linear-time radix sort, then duplicates are removed in a second
-        linear-time pass.
-        """
-
-###############################################################
-# IMPORTANT NOTE
-#
-# Getting the length of a BTree, TreeSet, or output of keys,
-# values, or items of same is expensive. If you need to get the
-# length, you need to maintain this separately.
-#
-# Eventually, I need to express this through the interfaces.
-#
-################################################################
diff --git a/branches/bug1734/src/BTrees/Length.py b/branches/bug1734/src/BTrees/Length.py
deleted file mode 100644
index 449afd72..00000000
--- a/branches/bug1734/src/BTrees/Length.py
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import persistent
-
-class Length(persistent.Persistent):
-    """BTree lengths are too expensive to compute
-
-    Objects that use BTrees need to keep track of lengths themselves.
-    This class provides an object for doing this.
-
-    As a bonus, the object support application-level conflict
-    resolution.
-
-    It is tempting to to assign length objects to __len__ attributes
-    to provide instance-specific __len__ methods. However, this no
-    longer works as expected, because new-style classes cache
-    class-defined slot methods (like __len__) in C type slots.  Thus,
-    instance-define slot fillers are ignores.
-
-    """
-
-    def __init__(self, v=0):
-        self.value = v
-
-    def __getstate__(self):
-        return self.value
-
-    def __setstate__(self, v):
-        self.value = v
-
-    def set(self, v):
-        self.value = v
-
-    def _p_resolveConflict(self, old, s1, s2):
-        return s1 + s2 - old
-
-    def _p_independent(self):
-        # My state doesn't depend on or materially effect the state of
-        # other objects.
-        return 1
-
-    def change(self, delta):
-        self.value += delta
-
-    def __call__(self, *args):
-        return self.value
diff --git a/branches/bug1734/src/BTrees/Maintainer.txt b/branches/bug1734/src/BTrees/Maintainer.txt
deleted file mode 100644
index b6cd92ee..00000000
--- a/branches/bug1734/src/BTrees/Maintainer.txt
+++ /dev/null
@@ -1,374 +0,0 @@
-This document provides information for developers who maintain or
-extend BTrees.
-
-Macros
-======
-BTrees are defined using a "template", roughly akin to a a C++
-template.  To create a new family of BTrees, create a source file that
-defines macros used to handle differences in key and value types:
-
-
-Configuration Macros
-
-MASTER_ID
-A string to hold an RCS/CVS Id key to be included in compiled binaries.
-
-MOD_NAME_PREFIX
-A string (like "IO" or "OO") that provides the prefix used for the
-module.  This gets used to generate type names and the internal module
-name string.
-
-DEFAULT_MAX_BUCKET_SIZE
-An int giving the maximum bucket size (number of key/value pairs).
-When a bucket gets larger than this due to an insertion *into a BTREE*,
-it splits.  Inserting into a bucket directly doesn't split, and
-functions that produce a bucket output (e.g., union()) also have no
-bound on how large a bucket may get.  Someday this will be tunable
-on BTree instances.
-
-DEFAULT_MAX_BTREE_SIZE
-An int giving the maximum size (number of children) of an internal
-btree node.  Someday this will be tunable on BTree instances.
-
-Macros for Keys
-
-KEY_TYPE
-The C type declaration for keys (e.g., int or PyObject*).
-
-KEY_TYPE_IS_PYOBJECT
-Define if KEY_TYPE is a PyObject*, else undef.
-
-KEY_CHECK(K)
-Tests whether the PyObject* K can be converted to the (C) key type
-(KEY_TYPE).  The macro should return a boolean (zero for false,
-non-zero for true).  When it returns false, its caller should probably
-set a TypeError exception.
-
-TEST_KEY_SET_OR(V, K, T)
-Like Python's cmp().  Compares K(ey) to T(arget), where K & T are C
-values of type KEY_TYPE.  V is assigned an int value depending on
-the outcome:
-   < 0 if K < T
-  == 0 if K == T
-   > 0 if K > T
-This macro acts like an 'if', where the following statement is
-executed only if a Python exception has been raised because the
-values could not be compared.
-
-DECREF_KEY(K)
-K is a value of KEY_TYPE.  If KEY_TYPE is a flavor of PyObject*, write
-this to do Py_DECREF(K).  Else (e.g., KEY_TYPE is int) make it a nop.
-
-INCREF_KEY(K)
-K is a value of KEY_TYPE.  If KEY_TYPE is a flavor of PyObject*, write
-this to do Py_INCREF(K).  Else (e.g., KEY_TYPE is int) make it a nop.
-
-COPY_KEY(K, E)
-Like K=E.  Copy a key from E to K, both of KEY_TYPE.  Note that this
-doesn't decref K or incref E when KEY_TYPE is a PyObject*; the caller
-is responsible for keeping refcounts straight.
-
-COPY_KEY_TO_OBJECT(O, K)
-Roughly like O=K.  O is a PyObject*, and the macro must build a Python
-object form of K, assign it to O, and ensure that O owns the reference
-to its new value.  It may do this by creating a new Python object based
-on K (e.g., PyInt_FromLong(K) when KEY_TYPE is int), or simply by doing
-Py_INCREF(K) if KEY_TYPE is a PyObject*.
-
-COPY_KEY_FROM_ARG(TARGET, ARG, STATUS)
-Copy an argument to the target without creating a new reference to ARG.
-ARG is a PyObject*, and TARGET is of type KEY_TYPE.  If this can't be
-done (for example, KEY_CHECK(ARG) returns false), set a Python error
-and set status to 0.  If there is no error, leave status alone.
-
-
-Macros for Values
-
-VALUE_TYPE
-The C type declaration for values (e.g., int or PyObject*).
-
-VALUE_TYPE_IS_PYOBJECT
-Define if VALUE_TYPE is a PyObject*, else undef.
-
-TEST_VALUE(X, Y)
-Like Python's cmp().  Compares X to Y, where X & Y are C values of
-type VALUE_TYPE.  The macro returns an int, with value
-   < 0 if X < Y
-  == 0 if X == Y
-   > 0 if X > Y
-Bug:  There is no provision for determining whether the comparison
-attempt failed (set a Python exception).
-
-DECREF_VALUE(K)
-Like DECREF_KEY, except applied to values of VALUE_TYPE.
-
-INCREF_VALUE(K)
-Like INCREF_KEY, except applied to values of VALUE_TYPE.
-
-COPY_VALUE(K, E)
-Like COPY_KEY, except applied to values of VALUE_TYPE.
-
-COPY_VALUE_TO_OBJECT(O, K)
-Like COPY_KEY_TO_OBJECT, except applied to values of VALUE_TYPE.
-
-COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS)
-Like COPY_KEY_FROM_ARG, except applied to values of VALUE_TYPE.
-
-NORMALIZE_VALUE(V, MIN)
-Normalize the value, V, using the parameter MIN.  This is almost
-certainly a YAGNI.  It is a no op for most types. For integers, V is
-replaced by V/MIN only if MIN > 0.
-
-
-Macros for Set Operations
-
-MERGE_DEFAULT
-A value of VALUE_TYPE specifying the value to associate with set
-elements when sets are merged with mappings via weighed union or
-weighted intersection.
-
-MERGE(O1, w1, O2, w2)
-Performs a weighted merge of two values, O1 and O2, using weights w1
-and w2.  The result must be of VALUE_TYPE.  Note that weighted unions
-and weighted intersections are not enabled if this macro is left
-undefined.
-
-MERGE_WEIGHT(O, w)
-Computes a weighted value for O.  The result must be of VALUE_TYPE.
-This is used for "filling out" weighted unions, i.e. to compute a
-weighted value for keys that appear in only one of the input
-mappings.  If left undefined, MERGE_WEIGHT defaults to
-
-    #define MERGE_WEIGHT(O, w) (O)
-
-MULTI_INT_UNION
-The value doesn't matter.  If defined, SetOpTemplate.c compiles
-code for a multiunion() function (compute a union of many input sets
-at high speed).  This currently makes sense only for structures with
-integer keys.
-
-
-BTree Clues
-===========
-More or less random bits of helpful info.
-
-+ In papers and textbooks, this flavor of BTree is usually called
-  a B+-Tree, where "+" is a superscript.
-
-+ All keys and all values live in the bucket leaf nodes.  Keys in
-  interior (BTree) nodes merely serve to guide a search efficiently
-  toward the correct leaf.
-
-+ When a key is deleted, it's physically removed from the bucket
-  it's in, but this doesn't propagate back up the tree:  since keys
-  in interior nodes only serve to guide searches, it's OK-- and
-  saves time --to leave "stale" keys in interior nodes.
-
-+ No attempt is made to rebalance the tree after a deletion, unless
-  a bucket thereby becomes entirely empty.  "Classic BTrees" do
-  rebalance, keeping all buckets at least half full (provided there
-  are enough keys in the entire tree to fill half a bucket).  The
-  tradeoffs are murky.  Pathological cases in the presence of
-  deletion do exist.  Pathologies include trees tending toward only
-  one key per bucket, and buckets at differing depths (all buckets
-  are at the same depth in a classic BTree).
-
-+ DEFAULT_MAX_BUCKET_SIZE and DEFAULT_MAX_BTREE_SIZE are chosen
-  mostly to "even out" pickle sizes in storage.  That's why, e.g.,
-  an IIBTree has larger values than an OOBTree:  pickles store ints
-  more efficiently than they can store arbitrary Python objects.
-
-+ In a non-empty BTree, every bucket node contains at least one key,
-  and every BTree node contains at least one child and a non-NULL
-  firstbucket pointer.  However, a BTree node may not contain any keys.
-
-+ An empty BTree consists solely of a BTree node with len==0 and
-  firstbucket==NULL.
-
-+ Although a BTree can become unbalanced under a mix of inserts and
-  deletes (meaning both that there's nothing stronger that can be
-  said about buckets than that they're not empty, and that buckets
-  can appear at different depths), a BTree node always has children
-  of the same kind:  they're all buckets, or they're all BTree nodes.
-
-
-The BTREE_SEARCH Macro
-======================
-For notational ease, consider a fixed BTree node x, and let
-
-    K(i) mean x->data.key[i]
-    C(i) mean all the keys reachable from x->data.child[i]
-
-For each i in 0 to x->len-1 inclusive,
-
-    K(i) <= C(i) < K(i+1)
-
-is a BTree node invariant, where we pretend that K(0) holds a key
-smaller than any possible key, and K(x->len) holds a key larger
-than any possible key.  (Note that K(x->len) doesn't actually exist,
-and K(0) is never used although space for it exists in non-empty
-BTree nodes.)
-
-When searching for a key k, then, the child pointer we want to follow
-is the one at index i such that K(i) <= k < K(i+1).  There can be
-at most one such i, since the K(i) are strictly increasing.  And there
-is at least one such i provided the tree isn't empty (so that 0 < len).
-For the moment, assume the tree isn't empty (we'll get back to that
-later).
-
-The macro's chief loop invariant is
-
-    K(lo) < k < K(hi)
-
-This holds trivially at the start, since lo is set to 0, and hi to
-x->len, and we pretend K(0) is minus infinity and K(len) is plus
-infinity.  Inside the loop, if K(i) < k we set lo to i, and if
-K(i) > k we set hi to i.  These obviously preserve the invariant.
-If K(i) == k, the loop breaks and sets the result to i, and since
-K(i) == k in that case i is obviously the correct result.
-
-Other cases depend on how i = floor((lo + hi)/2) works, exactly.
-Suppose lo + d = hi for some d >= 0.  Then i = floor((lo + lo + d)/2) =
-floor(lo + d/2) = lo + floor(d/2).  So:
-
-a. [d == 0] (lo == i == hi) if and only if (lo   == hi).
-b. [d == 1] (lo == i  < hi) if and only if (lo+1 == hi).
-c. [d  > 1] (lo  < i  < hi) if and only if (lo+1  < hi).
-
-If the node is empty (x->len == 0), then lo==i==hi==0 at the start,
-and the loop exits immediately (the first "i > lo" test fails),
-without entering the body.
-
-Else lo < hi at the start, and the invariant K(lo) < k < K(hi) holds.
-
-If lo+1 < hi, we're in case #c:  i is strictly between lo and hi,
-so the loop body is entered, and regardless of whether the body sets
-the new lo or the new hi to i, the new lo is strictly less than the
-new hi, and the difference between the new lo and new hi is strictly
-less than the difference between the old lo and old hi.  So long as
-the new lo + 1 remains < the new hi, we stay in this case.  We can't
-stay in this case forever, though:  because hi-lo decreases on each
-trip but remains > 0, lo+1 == hi must eventually become true.  (In
-fact, it becomes true quickly, in about log2(x->len) trips; the
-point is more that lo doesn't equal hi when the loop ends, it has to
-end with lo+1==hi and i==lo).
-
-Then we're in case #b:  i==lo==hi-1 then, and the loop exits.  The
-invariant still holds, with lo==i and hi==lo+1==i+1:
-
-    K(i) < k < K(i+1)
-
-so i is again the correct answer.
-
-Optimization points:
-
-+ Division by 2 is done via shift rather via "/2".  These are
-  signed ints, and almost all C compilers treat signed int division
-  as truncating, and shifting is not the same as truncation for
-  signed int division.  The compiler has no way to know these values
-  aren't negative, so has to generate longer-winded code for "/2".
-  But we know these values aren't negative, and exploit it.
-
-+ The order of _cmp comparisons matters.  We're in an interior
-  BTree node, and are looking at only a tiny fraction of all the
-  keys that exist.  So finding the key exactly in this node is
-  unlikely, and checking _cmp == 0 is a waste of time to the same
-  extent.  It doesn't matter whether we check for _cmp < 0 or
-  _cmp > 0 first, so long as we do both before worrying about
-  equality.
-
-+ At the start of a routine, it's better to run this macro even
-  if x->len is 0 (check for that afterwards).  We just called a
-  function and so probably drained the pipeline.  If the first thing
-  we do then is read up self->len and check it against 0, we just
-  sit there waiting for the data to get read up, and then another
-  immediate test-and-branch, and for a very unlikely case (BTree
-  nodes are rarely empty).  It's better to get into the loop right
-  away so the normal case makes progress ASAP.
-
-
-The BUCKET_SEARCH Macro
-=======================
-This has a different job than BTREE_SEARCH:  the key 0 slot is
-legitimate in a bucket, and we want to find the index at which the
-key belongs.  If the key is larger than the bucket's largest key, a
-new slot at index len is where it belongs, else it belongs at the
-smallest i with keys[i] >= the key we're looking for.  We also need
-to know whether or not the key is present (BTREE_SEARCH didn't care;
-it only wanted to find the next node to search).
-
-The mechanics of the search are quite similar, though.  The primary
-loop invariant changes to (say we're searching for key k):
-
-    K(lo-1) < k < K(hi)
-
-where K(i) means keys[i], and we pretend K(-1) is minus infinity and
-K(len) is plus infinity.
-
-If the bucket is empty, lo=hi=i=0 at the start, the loop body is never
-entered, and the macro sets INDEX to 0 and ABSENT to true.  That's why
-_cmp is initialized to 1 (_cmp becomes ABSENT).
-
-Else the bucket is not empty, lo<hi at the start, and the loop body
-is entered.  The invariant is obviously satisfied then, as lo=0 and
-hi=len.
-
-If K[i]<k, lo is set to i+1, preserving that K(lo-1) = K[i] < k.
-If K[i]>k, hi is set to i, preserving that K[hi] = K[i] > k.
-If the loop exits after either of those, _cmp != 0, so ABSENT becomes
-true.
-If K[i]=k, the loop breaks, so that INDEX becomes i, and ABSENT
-becomes false (_cmp=0 in this case).
-
-The same case analysis for BTREE_SEARCH on lo and hi holds here:
-
-a. (lo == i == hi) if and only if (lo   == hi).
-b. (lo == i  < hi) if and only if (lo+1 == hi).
-c. (lo  < i  < hi) if and only if (lo+1  < hi).
-
-So long as lo+1 < hi, we're in case #c, and either break with
-equality (in which case the right results are obviously computed) or
-narrow the range.  If equality doesn't obtain, the range eventually
-narrows to cases #a or #b.
-
-To go from #c to #a, we must have lo+2==hi at the start, and
-K[i]=K[lo+1]<k.  Then the new lo gets set to i+1 = lo+2 = hi, and the
-loop exits with lo=hi=i and _cmp<0.  This is correct, because we
-know that k != K(i) (loop invariant! we actually know something
-stronger, that k < K(hi); since i=hi, this implies k != K(i)).
-
-Else #c eventually falls into case #b, lo+1==hi and i==lo.  The
-invariant tells us K(lo-1) < k < K(hi) = K(lo+1), so if the key
-is present it must be at K(lo).  i==lo in this case, so we test
-K(lo) against k.  As always, if equality obtains we do the right
-thing, else case #b becomes case #a.
-
-When #b becomes #a, the last comparison was non-equal, so _cmp is
-non-zero, and the loop exits because lo==hi==i in case #a.  The
-invariant then tells us K(lo-1) < k < K(lo), so the key is in fact
-not present, it's correct to exit with _cmp non-zero, and i==lo is
-again the index at which k belongs.
-
-Optimization points:
-
-+ As for BTREE_SEARCH, shifting of signed ints is cheaper than
-  division.
-
-+ Unlike as for BTREE_SEARCH, there's nothing special about searching
-  an empty bucket, and the macro computes thoroughly sensible results
-  in that case.
-
-+ The order of _cmp comparisons differs from BTREE_SEARCH.  When
-  searching a bucket, it's much more likely (than when searching a
-  BTree node) that the key is present, so testing __cmp==0 isn't a
-  systematic waste of cycles.  At the extreme, if all searches are
-  successful (key present), on average this saves one comparison per
-  search, against leaving the determination of _cmp==0 implicit (as
-  BTREE_SEARCH does).  But even on successful searches, __cmp != 0 is
-  a more popular outcome than __cmp == 0 across iterations (unless
-  the bucket has only a few keys), so it's important to check one
-  of the inequality cases first.  It turns out it's better on average
-  to check K(i) < key (than to check K(i) > key), because when it
-  pays it narrows the range more (we get a little boost from setting
-  lo=i+1 in this case; the other case sets hi=i, which isn't as much
-  of a narrowing).
diff --git a/branches/bug1734/src/BTrees/MergeTemplate.c b/branches/bug1734/src/BTrees/MergeTemplate.c
deleted file mode 100644
index ae672125..00000000
--- a/branches/bug1734/src/BTrees/MergeTemplate.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define MERGETEMPLATE_C "$Id$\n"
-
-/****************************************************************************
- Set operations
- ****************************************************************************/
-
-static int
-merge_output(Bucket *r, SetIteration *i, int mapping)
-{
-    if (r->len >= r->size && Bucket_grow(r, -1, !mapping) < 0)
-	return -1;
-    COPY_KEY(r->keys[r->len], i->key);
-    INCREF_KEY(r->keys[r->len]);
-    if (mapping) {
-	COPY_VALUE(r->values[r->len], i->value);
-	INCREF_VALUE(r->values[r->len]);
-    }
-    r->len++;
-    return 0;
-}
-
-/* The "reason" argument is a little integer giving "a reason" for the
- * error.  In the Zope3 codebase, these are mapped to explanatory strings
- * via zodb/btrees/interfaces.py.
- */
-static PyObject *
-merge_error(int p1, int p2, int p3, int reason)
-{
-  PyObject *r;
-
-  UNLESS (r=Py_BuildValue("iiii", p1, p2, p3, reason)) r=Py_None;
-  if (ConflictError == NULL) {
-  	ConflictError = PyExc_ValueError;
-	Py_INCREF(ConflictError);
-  }
-  PyErr_SetObject(ConflictError, r);
-  if (r != Py_None)
-    {
-      Py_DECREF(r);
-    }
-
-  return NULL;
-}
-
-/* It's hard to explain "the rules" for bucket_merge, in large part because
- * any automatic conflict-resolution scheme is going to be incorrect for
- * some endcases of *some* app.  The scheme here is pretty conservative,
- * and should be OK for most apps.  It's easier to explain what the code
- * allows than what it forbids:
- *
- * Leaving things alone:  it's OK if both s2 and s3 leave a piece of s1
- * alone (don't delete the key, and don't change the value).
- *
- * Key deletion:  a transaction (s2 or s3) can delete a key (from s1), but
- * only if the other transaction (of s2 and s3) doesn't delete the same key.
- * However, it's not OK for s2 and s3 to, between them, end up deleting all
- * the keys.  This is a higher-level constraint, due to that the caller of
- * bucket_merge() doesn't have enough info to unlink the resulting empty
- * bucket from its BTree correctly.  It's also not OK if s2 or s3 are empty,
- * because the transaction that emptied the bucket unlinked the bucket from
- * the tree, and nothing we do here can get it linked back in again.
- *
- * Key insertion:  s2 or s3 can add a new key, provided the other transaction
- * doesn't insert the same key.  It's not OK even if they insert the same
- * <key, value> pair.
- *
- * Mapping value modification:  s2 or s3 can modify the value associated
- * with a key in s1, provided the other transaction doesn't make a
- * modification of the same key to a different value.  It's OK if s2 and s3
- * both give the same new value to the key while it's hard to be precise about
- * why, this doesn't seem consistent with that it's *not* OK for both to add
- * a new key mapping to the same value).
- */
-static PyObject *
-bucket_merge(Bucket *s1, Bucket *s2, Bucket *s3)
-{
-  Bucket *r=0;
-  PyObject *s;
-  SetIteration i1 = {0,0,0}, i2 = {0,0,0}, i3 = {0,0,0};
-  int cmp12, cmp13, cmp23, mapping, set;
-
-  /* If either "after" bucket is empty, punt. */
-  if (s2->len == 0 || s3->len == 0)
-    {
-      merge_error(-1, -1, -1, 12);
-      goto err;
-    }
-
-  if (initSetIteration(&i1, OBJECT(s1), 1) < 0)
-      goto err;
-  if (initSetIteration(&i2, OBJECT(s2), 1) < 0)
-      goto err;
-  if (initSetIteration(&i3, OBJECT(s3), 1) < 0)
-      goto err;
-
-  mapping = i1.usesValue | i2.usesValue | i3.usesValue;
-  set = !mapping;
-
-  if (mapping)
-      r = (Bucket *)PyObject_CallObject((PyObject *)&BucketType, NULL);
-  else
-      r = (Bucket *)PyObject_CallObject((PyObject *)&SetType, NULL);
-  if (r == NULL)
-      goto err;
-
-  if (i1.next(&i1) < 0)
-      goto err;
-  if (i2.next(&i2) < 0)
-      goto err;
-  if (i3.next(&i3) < 0)
-      goto err;
-
-  /* Consult zodb/btrees/interfaces.py for the meaning of the last
-   * argument passed to merge_error().
-   */
-  /* TODO:  This isn't passing on errors raised by value comparisons. */
-  while (i1.position >= 0 && i2.position >= 0 && i3.position >= 0)
-    {
-      TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err;
-      TEST_KEY_SET_OR(cmp13, i1.key, i3.key) goto err;
-      if (cmp12==0)
-        {
-          if (cmp13==0)
-            {
-              if (set || (TEST_VALUE(i1.value, i2.value) == 0))
-                {               /* change in i3 value or all same */
-                  if (merge_output(r, &i3, mapping) < 0) goto err;
-                }
-              else if (set || (TEST_VALUE(i1.value, i3.value) == 0))
-                {               /* change in i2 value */
-                  if (merge_output(r, &i2, mapping) < 0) goto err;
-                }
-              else
-                {               /* conflicting value changes in i2 and i3 */
-                  merge_error(i1.position, i2.position, i3.position, 1);
-                  goto err;
-                }
-              if (i1.next(&i1) < 0) goto err;
-              if (i2.next(&i2) < 0) goto err;
-              if (i3.next(&i3) < 0) goto err;
-            }
-          else if (cmp13 > 0)
-            {                   /* insert i3 */
-              if (merge_output(r, &i3, mapping) < 0) goto err;
-              if (i3.next(&i3) < 0) goto err;
-            }
-          else if (set || (TEST_VALUE(i1.value, i2.value) == 0))
-            {                   /* deleted in i3 */
-              if (i1.next(&i1) < 0) goto err;
-              if (i2.next(&i2) < 0) goto err;
-            }
-          else
-            {                   /* conflicting del in i3 and change in i2 */
-              merge_error(i1.position, i2.position, i3.position, 2);
-              goto err;
-            }
-        }
-      else if (cmp13 == 0)
-        {
-          if (cmp12 > 0)
-            {                   /* insert i2 */
-              if (merge_output(r, &i2, mapping) < 0) goto err;
-              if (i2.next(&i2) < 0) goto err;
-            }
-          else if (set || (TEST_VALUE(i1.value, i3.value) == 0))
-            {                   /* deleted in i2 */
-              if (i1.next(&i1) < 0) goto err;
-              if (i3.next(&i3) < 0) goto err;
-            }
-          else
-            {                   /* conflicting del in i2 and change in i3 */
-              merge_error(i1.position, i2.position, i3.position, 3);
-              goto err;
-            }
-        }
-      else
-        {                       /* Both keys changed */
-          TEST_KEY_SET_OR(cmp23, i2.key, i3.key) goto err;
-          if (cmp23==0)
-            {                   /* dueling inserts or deletes */
-              merge_error(i1.position, i2.position, i3.position, 4);
-              goto err;
-            }
-          if (cmp12 > 0)
-            {                   /* insert i2 */
-              if (cmp23 > 0)
-                {               /* insert i3 first */
-                  if (merge_output(r, &i3, mapping) < 0) goto err;
-                  if (i3.next(&i3) < 0) goto err;
-                }
-              else
-                {               /* insert i2 first */
-                  if (merge_output(r, &i2, mapping) < 0) goto err;
-                  if (i2.next(&i2) < 0) goto err;
-                }
-            }
-          else if (cmp13 > 0)
-            {                   /* Insert i3 */
-              if (merge_output(r, &i3, mapping) < 0) goto err;
-              if (i3.next(&i3) < 0) goto err;
-            }
-          else
-            {                   /* 1<2 and 1<3:  both deleted 1.key */
-	      merge_error(i1.position, i2.position, i3.position, 5);
-              goto err;
-            }
-        }
-    }
-
-  while (i2.position >= 0 && i3.position >= 0)
-    {                           /* New inserts */
-      TEST_KEY_SET_OR(cmp23, i2.key, i3.key) goto err;
-      if (cmp23==0)
-        {                       /* dueling inserts */
-          merge_error(i1.position, i2.position, i3.position, 6);
-          goto err;
-        }
-      if (cmp23 > 0)
-        {                       /* insert i3 */
-          if (merge_output(r, &i3, mapping) < 0) goto err;
-          if (i3.next(&i3) < 0) goto err;
-        }
-      else
-        {                       /* insert i2 */
-          if (merge_output(r, &i2, mapping) < 0) goto err;
-          if (i2.next(&i2) < 0) goto err;
-        }
-    }
-
-  while (i1.position >= 0 && i2.position >= 0)
-    {                           /* remainder of i1 deleted in i3 */
-      TEST_KEY_SET_OR(cmp12, i1.key, i2.key) goto err;
-      if (cmp12 > 0)
-        {                       /* insert i2 */
-          if (merge_output(r, &i2, mapping) < 0) goto err;
-          if (i2.next(&i2) < 0) goto err;
-        }
-      else if (cmp12==0 && (set || (TEST_VALUE(i1.value, i2.value) == 0)))
-        {                       /* delete i3 */
-          if (i1.next(&i1) < 0) goto err;
-          if (i2.next(&i2) < 0) goto err;
-        }
-      else
-        {                       /* Dueling deletes or delete and change */
-          merge_error(i1.position, i2.position, i3.position, 7);
-          goto err;
-        }
-    }
-
-  while (i1.position >= 0 && i3.position >= 0)
-    {                           /* remainder of i1 deleted in i2 */
-      TEST_KEY_SET_OR(cmp13, i1.key, i3.key) goto err;
-      if (cmp13 > 0)
-        {                       /* insert i3 */
-          if (merge_output(r, &i3, mapping) < 0) goto err;
-          if (i3.next(&i3) < 0) goto err;
-        }
-      else if (cmp13==0 && (set || (TEST_VALUE(i1.value, i3.value) == 0)))
-        {                       /* delete i2 */
-          if (i1.next(&i1) < 0) goto err;
-          if (i3.next(&i3) < 0) goto err;
-        }
-      else
-        {                       /* Dueling deletes or delete and change */
-          merge_error(i1.position, i2.position, i3.position, 8);
-          goto err;
-        }
-    }
-
-  if (i1.position >= 0)
-    {                           /* Dueling deletes */
-      merge_error(i1.position, i2.position, i3.position, 9);
-      goto err;
-    }
-
-  while (i2.position >= 0)
-    {                           /* Inserting i2 at end */
-      if (merge_output(r, &i2, mapping) < 0) goto err;
-      if (i2.next(&i2) < 0) goto err;
-    }
-
-  while (i3.position >= 0)
-    {                           /* Inserting i3 at end */
-      if (merge_output(r, &i3, mapping) < 0) goto err;
-      if (i3.next(&i3) < 0) goto err;
-    }
-
-  /* If the output bucket is empty, conflict resolution doesn't have
-   * enough info to unlink it from its containing BTree correctly.
-   */
-  if (r->len == 0)
-    {
-      merge_error(-1, -1, -1, 10);
-      goto err;
-    }
-
-  finiSetIteration(&i1);
-  finiSetIteration(&i2);
-  finiSetIteration(&i3);
-
-  if (s1->next)
-    {
-      Py_INCREF(s1->next);
-      r->next = s1->next;
-    }
-  s = bucket_getstate(r);
-  Py_DECREF(r);
-
-  return s;
-
- err:
-  finiSetIteration(&i1);
-  finiSetIteration(&i2);
-  finiSetIteration(&i3);
-  Py_XDECREF(r);
-  return NULL;
-}
diff --git a/branches/bug1734/src/BTrees/OIBTree.py b/branches/bug1734/src/BTrees/OIBTree.py
deleted file mode 100644
index 5ff6cbb5..00000000
--- a/branches/bug1734/src/BTrees/OIBTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# hack to overcome dynamic-linking headache.
-from _OIBTree import *
diff --git a/branches/bug1734/src/BTrees/OOBTree.py b/branches/bug1734/src/BTrees/OOBTree.py
deleted file mode 100644
index d52a1b48..00000000
--- a/branches/bug1734/src/BTrees/OOBTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# hack to overcome dynamic-linking headache.
-from _OOBTree import *
diff --git a/branches/bug1734/src/BTrees/SETUP.cfg b/branches/bug1734/src/BTrees/SETUP.cfg
deleted file mode 100644
index e2d46a80..00000000
--- a/branches/bug1734/src/BTrees/SETUP.cfg
+++ /dev/null
@@ -1,120 +0,0 @@
-# Extension information for zpkg.
-
-# These extensions depend on a header provided by another component;
-# this header is named using depends-on in the sections for each
-# extension that requires it, but must also be identified as a public
-# header in the component that needs it.
-
-<extension _fsBTree>
-  source     _fsBTree.c
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
-
-
-<extension _IIBTree>
-  source     _IIBTree.c
-
-  # Specialization:
-  depends-on intkeymacros.h
-  depends-on intvaluemacros.h
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
-
-<extension _IFBTree>
-  source     _IFBTree.c
-
-  # Specialization:
-  depends-on intkeymacros.h
-  depends-on floatvaluemacros.h
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
-
-
-<extension _IOBTree>
-  source     _IOBTree.c
-
-  # Specialization:
-  depends-on intkeymacros.h
-  depends-on objectvaluemacros.h
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
-
-
-<extension _OIBTree>
-  source     _OIBTree.c
-
-  # Specialization:
-  depends-on objectkeymacros.h
-  depends-on intvaluemacros.h
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
-
-
-<extension _OOBTree>
-  source     _OOBTree.c
-
-  # Specialization:
-  depends-on objectkeymacros.h
-  depends-on objectvaluemacros.h
-
-  # Common btree includes:
-  depends-on BTreeItemsTemplate.c
-  depends-on BTreeModuleTemplate.c
-  depends-on BTreeTemplate.c
-  depends-on BucketTemplate.c
-  depends-on MergeTemplate.c
-  depends-on SetOpTemplate.c
-  depends-on SetTemplate.c
-  depends-on TreeSetTemplate.c
-  depends-on sorters.c
-</extension>
diff --git a/branches/bug1734/src/BTrees/SetOpTemplate.c b/branches/bug1734/src/BTrees/SetOpTemplate.c
deleted file mode 100644
index 9f2bdc47..00000000
--- a/branches/bug1734/src/BTrees/SetOpTemplate.c
+++ /dev/null
@@ -1,557 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-/****************************************************************************
- Set operations
- ****************************************************************************/
-
-#define SETOPTEMPLATE_C "$Id$\n"
-
-#ifdef KEY_CHECK
-static int
-nextKeyAsSet(SetIteration *i)
-{
-    if (i->position >= 0) {
-        if (i->position) {
-            DECREF_KEY(i->key);
-            i->position = -1;
-        }
-        else
-            i->position = 1;
-    }
-    return 0;
-}
-#endif
-
-/* initSetIteration
- *
- * Start the set iteration protocol.  See the comments at struct SetIteration.
- *
- * Arguments
- *      i           The address of a SetIteration control struct.
- *      s           The address of the set, bucket, BTree, ..., to be iterated.
- *      useValues   Boolean; if true, and s has values (is a mapping), copy
- *                  them into i->value each time i->next() is called; else
- *                  ignore s's values even if s is a mapping.
- *
- * Return
- *      0 on success; -1 and an exception set if error.
- *      i.usesValue is set to 1 (true) if s has values and useValues was
- *          true; else usesValue is set to 0 (false).
- *      i.set gets a new reference to s, or to some other object used to
- *          iterate over s.
- *      i.position is set to 0.
- *      i.next is set to an appropriate iteration function.
- *      i.key and i.value are left alone.
- *
- * Internal
- *      i.position < 0 means iteration terminated.
- *      i.position = 0 means iteration hasn't yet begun (next() hasn't
- *          been called yet).
- *      In all other cases, i.key, and possibly i.value, own references.
- *          These must be cleaned up, either by next() routines, or by
- *          finiSetIteration.
- *      next() routines must ensure the above.  They should return without
- *          doing anything when i.position < 0.
- *      It's the responsibility of {init, fini}setIteration to clean up
- *          the reference in i.set, and to ensure that no stale references
- *          live in i.key or i.value if iteration terminates abnormally.
- *          A SetIteration struct has been cleaned up iff i.set is NULL.
- */
-static int
-initSetIteration(SetIteration *i, PyObject *s, int useValues)
-{
-  i->set = NULL;
-  i->position = -1;     /* set to 0 only on normal return */
-  i->usesValue = 0;     /* assume it's a set or that values aren't iterated */
-
-  if (PyObject_IsInstance(s, (PyObject *)&BucketType))
-    {
-      i->set = s;
-      Py_INCREF(s);
-
-      if (useValues)
-        {
-          i->usesValue = 1;
-          i->next = nextBucket;
-        }
-      else
-        i->next = nextSet;
-    }
-  else if (PyObject_IsInstance(s, (PyObject *)&SetType))
-    {
-      i->set = s;
-      Py_INCREF(s);
-      i->next = nextSet;
-    }
-  else if (PyObject_IsInstance(s, (PyObject *)&BTreeType))
-    {
-      i->set = BTree_rangeSearch(BTREE(s), NULL, NULL, 'i');
-      UNLESS(i->set) return -1;
-
-      if (useValues)
-        {
-          i->usesValue = 1;
-          i->next = nextBTreeItems;
-        }
-      else
-        i->next = nextTreeSetItems;
-    }
-  else if (PyObject_IsInstance(s, (PyObject *)&TreeSetType))
-    {
-      i->set = BTree_rangeSearch(BTREE(s), NULL, NULL, 'k');
-      UNLESS(i->set) return -1;
-      i->next = nextTreeSetItems;
-    }
-#ifdef KEY_CHECK
-  else if (KEY_CHECK(s))
-    {
-      int copied = 1;
-      COPY_KEY_FROM_ARG(i->key, s, copied);
-      UNLESS (copied) return -1;
-
-      INCREF_KEY(i->key);
-      i->set = s;
-      Py_INCREF(s);
-      i->next = nextKeyAsSet;
-    }
-#endif
-  else
-    {
-      PyErr_SetString(PyExc_TypeError, "invalid argument");
-      return -1;
-    }
-
-  i->position = 0;
-
-  return 0;
-}
-
-#ifndef MERGE_WEIGHT
-#define MERGE_WEIGHT(O, w) (O)
-#endif
-
-static int
-copyRemaining(Bucket *r, SetIteration *i, int merge, 
-
-/* See comment # 42 */
-#ifdef MERGE
-              VALUE_TYPE w)
-#else
-              int w)
-#endif
-{
-  while (i->position >= 0)
-    {
-      if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) return -1;
-      COPY_KEY(r->keys[r->len], i->key);
-      INCREF_KEY(r->keys[r->len]);
-
-      if (merge)
-        {
-          COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i->value, w));
-          INCREF_VALUE(r->values[r->len]);
-        }
-      r->len++;
-      if (i->next(i) < 0) return -1;
-    }
-
-  return 0;
-}
-
-/* This is the workhorse for all set merge operations:  the weighted and
- * unweighted flavors of union and intersection, and set difference.  The
- * algorithm is conceptually simple but the code is complicated due to all
- * the options.
- *
- * s1, s2
- *     The input collections to be merged.
- *
- * usevalues1, usevalues2
- *     Booleans.  In the output, should values from s1 (or s2) be used?  This
- *     only makes sense when an operation intends to support mapping outputs;
- *     these should both be false for operations that want pure set outputs.
- *
- * w1, w2
- *     If usevalues1(2) are true, these are the weights to apply to the
- *     input values.
- *
- * c1
- *     Boolean.  Should keys that appear in c1 but not c2 appear in the output?
- * c12
- *     Boolean.  Should keys that appear in both inputs appear in the output?
- * c2
- *     Boolean.  Should keys that appear in c2 but not c1 appear in the output?
- *
- * Returns NULL if error, else a Set or Bucket, depending on whether a set or
- * mapping was requested.
- */
-static PyObject *
-set_operation(PyObject *s1, PyObject *s2,
-              int usevalues1, int usevalues2,
-
-/* Comment # 42
-
-The following ifdef works around a template/type problem
-
-Weights are passed as integers. In particular, the weight passed by
-difference is one.  This works find in the int value and float value
-cases but makes no sense in the object value case.  In the object
-value case, we don't do merging, so we don't use the weights, so it
-doesn't matter what they are. 
-*/
-#ifdef MERGE
-              VALUE_TYPE w1, VALUE_TYPE w2,
-#else
-              int w1, int w2,
-#endif
-              int c1, int c12, int c2)
-
-
-{
-  Bucket *r=0;
-  SetIteration i1 = {0,0,0}, i2 = {0,0,0};
-  int cmp, merge;
-
-  if (initSetIteration(&i1, s1, usevalues1) < 0) goto err;
-  if (initSetIteration(&i2, s2, usevalues2) < 0) goto err;
-  merge = i1.usesValue | i2.usesValue;
-
-  if (merge)
-    {
-#ifndef MERGE
-      if (c12 && i1.usesValue && i2.usesValue) goto invalid_set_operation;
-#endif
-      if (! i1.usesValue&& i2.usesValue)
-        {
-          SetIteration t;
-          int i;
-
-/* See comment # 42 above */
-#ifdef MERGE
-          VALUE_TYPE v;
-#else
-          int v;
-#endif
-
-          t=i1; i1=i2; i2=t;
-          i=c1; c1=c2; c2=i;
-          v=w1; w1=w2; w2=v;
-        }
-#ifdef MERGE_DEFAULT
-      i1.value=MERGE_DEFAULT;
-      i2.value=MERGE_DEFAULT;
-#else
-      if (i1.usesValue)
-        {
-          if (! i2.usesValue && c2) goto invalid_set_operation;
-        }
-      else
-        {
-          if (c1 || c12) goto invalid_set_operation;
-        }
-#endif
-
-      UNLESS(r=BUCKET(PyObject_CallObject(OBJECT(&BucketType), NULL)))
-        goto err;
-    }
-  else
-    {
-      UNLESS(r=BUCKET(PyObject_CallObject(OBJECT(&SetType), NULL)))
-        goto err;
-    }
-
-  if (i1.next(&i1) < 0) goto err;
-  if (i2.next(&i2) < 0) goto err;
-
-  while (i1.position >= 0 && i2.position >= 0)
-    {
-      TEST_KEY_SET_OR(cmp, i1.key, i2.key) goto err;
-      if(cmp < 0)
-	{
-	  if(c1)
-	    {
-	      if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err;
-              COPY_KEY(r->keys[r->len], i1.key);
-              INCREF_KEY(r->keys[r->len]);
-              if (merge)
-                {
-                  COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i1.value, w1));
-                  INCREF_VALUE(r->values[r->len]);
-                }
-	      r->len++;
-	    }
-          if (i1.next(&i1) < 0) goto err;
-	}
-      else if(cmp==0)
-	{
-	  if(c12)
-	    {
-	      if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err;
-              COPY_KEY(r->keys[r->len], i1.key);
-              INCREF_KEY(r->keys[r->len]);
-              if (merge)
-                {
-#ifdef MERGE
-                  r->values[r->len] = MERGE(i1.value, w1, i2.value, w2);
-#else
-                  COPY_VALUE(r->values[r->len], i1.value);
-                  INCREF_VALUE(r->values[r->len]);
-#endif
-                }
-	      r->len++;
-	    }
-          if (i1.next(&i1) < 0) goto err;
-          if (i2.next(&i2) < 0) goto err;
-	}
-      else
-	{
-	  if(c2)
-	    {
-	      if(r->len >= r->size && Bucket_grow(r, -1, ! merge) < 0) goto err;
-              COPY_KEY(r->keys[r->len], i2.key);
-              INCREF_KEY(r->keys[r->len]);
-              if (merge)
-                {
-                  COPY_VALUE(r->values[r->len], MERGE_WEIGHT(i2.value, w2));
-                  INCREF_VALUE(r->values[r->len]);
-                }
-	      r->len++;
-	    }
-          if (i2.next(&i2) < 0) goto err;
-	}
-    }
-  if(c1 && copyRemaining(r, &i1, merge, w1) < 0) goto err;
-  if(c2 && copyRemaining(r, &i2, merge, w2) < 0) goto err;
-
-
-  finiSetIteration(&i1);
-  finiSetIteration(&i2);
-
-  return OBJECT(r);
-
-#ifndef MERGE_DEFAULT
-invalid_set_operation:
-  PyErr_SetString(PyExc_TypeError, "invalid set operation");
-#endif
-
-err:
-  finiSetIteration(&i1);
-  finiSetIteration(&i2);
-  Py_XDECREF(r);
-  return NULL;
-}
-
-static PyObject *
-difference_m(PyObject *ignored, PyObject *args)
-{
-  PyObject *o1, *o2;
-
-  UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL;
-
-
-  if (o1 == Py_None || o2 == Py_None)
-    {
-      /* difference(None, X) -> None; difference(X, None) -> X */
-      Py_INCREF(o1);
-      return o1;
-    }
-
-  return set_operation(o1, o2, 1, 0, /* preserve values from o1, ignore o2's */
-                       1, 0,         /* o1's values multiplied by 1 */
-                       1, 0, 0);     /* take only keys unique to o1 */
-}
-
-static PyObject *
-union_m(PyObject *ignored, PyObject *args)
-{
-  PyObject *o1, *o2;
-
-  UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL;
-
-  if (o1 == Py_None)
-    {
-      Py_INCREF(o2);
-      return o2;
-    }
-  else if (o2 == Py_None)
-    {
-      Py_INCREF(o1);
-      return o1;
-    }
-
-  return set_operation(o1, o2, 0, 0,    /* ignore values in both */
-                       1, 1,            /* the weights are irrelevant */
-                       1, 1, 1);        /* take all keys */
-}
-
-static PyObject *
-intersection_m(PyObject *ignored, PyObject *args)
-{
-  PyObject *o1, *o2;
-
-  UNLESS(PyArg_ParseTuple(args, "OO", &o1, &o2)) return NULL;
-
-  if (o1 == Py_None)
-    {
-      Py_INCREF(o2);
-      return o2;
-    }
-  else if (o2 == Py_None)
-    {
-      Py_INCREF(o1);
-      return o1;
-    }
-
-  return set_operation(o1, o2, 0, 0,    /* ignore values in both */
-                       1, 1,            /* the weights are irrelevant */
-                       0, 1, 0);        /* take only keys common to both */
-}
-
-#ifdef MERGE
-
-static PyObject *
-wunion_m(PyObject *ignored, PyObject *args)
-{
-  PyObject *o1, *o2;
-  VALUE_TYPE w1 = 1, w2 = 1;
-
-  UNLESS(PyArg_ParseTuple(args, "OO|" VALUE_PARSE VALUE_PARSE, 
-                          &o1, &o2, &w1, &w2)
-         ) return NULL;
-
-  if (o1 == Py_None)
-    return Py_BuildValue(VALUE_PARSE "O", (o2 == Py_None ? 0 : w2), o2);
-  else if (o2 == Py_None)
-    return Py_BuildValue(VALUE_PARSE "O", w1, o1);
-
-  o1 = set_operation(o1, o2, 1, 1, w1, w2, 1, 1, 1);
-  if (o1) 
-    ASSIGN(o1, Py_BuildValue(VALUE_PARSE "O", (VALUE_TYPE)1, o1));
-
-  return o1;
-}
-
-static PyObject *
-wintersection_m(PyObject *ignored, PyObject *args)
-{
-  PyObject *o1, *o2;
-  VALUE_TYPE w1 = 1, w2 = 1;
-
-  UNLESS(PyArg_ParseTuple(args, "OO|" VALUE_PARSE VALUE_PARSE, 
-                          &o1, &o2, &w1, &w2)
-         ) return NULL;
-
-  if (o1 == Py_None)
-    return Py_BuildValue(VALUE_PARSE "O", (o2 == Py_None ? 0 : w2), o2);
-  else if (o2 == Py_None)
-    return Py_BuildValue(VALUE_PARSE "O", w1, o1);
-
-  o1 = set_operation(o1, o2, 1, 1, w1, w2, 0, 1, 0);
-  if (o1)
-    ASSIGN(o1, Py_BuildValue(VALUE_PARSE "O",
-            ((o1->ob_type == (PyTypeObject*)(&SetType)) ? w2+w1 : 1),
-                             o1));
-
-  return o1;
-}
-
-#endif
-
-#ifdef MULTI_INT_UNION
-#include "sorters.c"
-
-/* Input is a sequence of integer sets (or convertible to sets by the
-   set iteration protocol).  Output is the union of the sets.  The point
-   is to run much faster than doing pairs of unions.
-*/
-static PyObject *
-multiunion_m(PyObject *ignored, PyObject *args)
-{
-    PyObject *seq;          /* input sequence */
-    int n;                  /* length of input sequence */
-    PyObject *set = NULL;   /* an element of the input sequence */
-    Bucket *result;         /* result set */
-    SetIteration setiter = {0};
-    int i;
-
-    UNLESS(PyArg_ParseTuple(args, "O", &seq))
-        return NULL;
-
-    n = PyObject_Length(seq);
-    if (n < 0)
-        return NULL;
-
-    /* Construct an empty result set. */
-    result = BUCKET(PyObject_CallObject(OBJECT(&SetType), NULL));
-    if (result == NULL)
-        return NULL;
-
-    /* For each set in the input sequence, append its elements to the result
-       set.  At this point, we ignore the possibility of duplicates. */
-    for (i = 0; i < n; ++i) {
-        set = PySequence_GetItem(seq, i);
-        if (set == NULL)
-            goto Error;
-
-        /* If set is a bucket, do a straight resize + memcpy. */
-        if (set->ob_type == (PyTypeObject*)&SetType ||
-            set->ob_type == (PyTypeObject*)&BucketType)
-        {
-            Bucket *b = BUCKET(set);
-            int status = 0;
-
-            UNLESS (PER_USE(b)) goto Error;
-            if (b->len)
-                status = bucket_append(result, b, 0, b->len, 0, i < n-1);
-            PER_UNUSE(b);
-            if (status < 0) goto Error;
-        }
-        else {
-            /* No cheap way:  iterate over set's elements one at a time. */
-            if (initSetIteration(&setiter, set, 0) < 0) goto Error;
-            if (setiter.next(&setiter) < 0) goto Error;
-            while (setiter.position >= 0) {
-                if (result->len >= result->size && Bucket_grow(result, -1, 1) < 0)
-                    goto Error;
-                COPY_KEY(result->keys[result->len], setiter.key);
-                ++result->len;
-                /* We know the key is an int, so no need to incref it. */
-                if (setiter.next(&setiter) < 0) goto Error;
-            }
-            finiSetIteration(&setiter);
-        }
-        Py_DECREF(set);
-        set = NULL;
-    }
-
-    /* Combine, sort, remove duplicates, and reset the result's len.
-       If the set shrinks (which happens if and only if there are
-       duplicates), no point to realloc'ing the set smaller, as we
-       expect the result set to be short-lived.
-    */
-    if (result->len > 0) {
-        size_t newlen;          /* number of elements in final result set */
-        newlen = sort_int4_nodups(result->keys, (size_t)result->len);
-        result->len = (int)newlen;
-    }
-    return (PyObject *)result;
-
-Error:
-    Py_DECREF(result);
-    Py_XDECREF(set);
-    finiSetIteration(&setiter);
-    return NULL;
-}
-#endif
diff --git a/branches/bug1734/src/BTrees/SetTemplate.c b/branches/bug1734/src/BTrees/SetTemplate.c
deleted file mode 100644
index 740ca549..00000000
--- a/branches/bug1734/src/BTrees/SetTemplate.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define SETTEMPLATE_C "$Id$\n"
-
-static PyObject *
-Set_insert(Bucket *self, PyObject *args)
-{
-  PyObject *key;
-  int i;
-
-  UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL;
-  if ( (i=_bucket_set(self, key, Py_None, 1, 1, 0)) < 0) return NULL;
-  return PyInt_FromLong(i);
-}
-
-/* _Set_update and _TreeSet_update are identical except for the
-   function they call to add the element to the set.
-*/
-
-static int
-_Set_update(Bucket *self, PyObject *seq)
-{
-    int n = -1;
-    PyObject *iter, *v;
-    int ind;
-
-    iter = PyObject_GetIter(seq);
-    if (iter == NULL)
-	return -1;
-
-    while (1) {
-	v = PyIter_Next(iter);
-	if (v == NULL) {
-	    if (PyErr_Occurred())
-		goto err;
-	    else
-		break;
-	}
-	ind = _bucket_set(self, v, Py_None, 1, 1, 0);
-	Py_DECREF(v);
-	if (ind < 0)
-	    goto err;
-	else
-	    n += ind;
-    }
-    /* n starts out at -1, which is the error return value.  If
-       this point is reached, then there is no error.  n must be
-       incremented to account for the initial value of -1 instead of
-       0.
-    */
-    n++;
-
- err:
-    Py_DECREF(iter);
-    return n;
-}
-
-static PyObject *
-Set_update(Bucket *self, PyObject *args)
-{
-    PyObject *seq = NULL;
-    int n = 0;
-
-    if (!PyArg_ParseTuple(args, "|O:update", &seq))
-	return NULL;
-
-    if (seq) {
-	n = _Set_update(self, seq);
-	if (n < 0)
-	    return NULL;
-    }
-
-    return PyInt_FromLong(n);
-}
-
-static PyObject *
-Set_remove(Bucket *self, PyObject *args)
-{
-  PyObject *key;
-
-  UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL;
-  if (_bucket_set(self, key, NULL, 0, 1, 0) < 0) return NULL;
-
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static int
-_set_setstate(Bucket *self, PyObject *args)
-{
-  PyObject *k, *items;
-  Bucket *next=0;
-  int i, l, copied=1;
-  KEY_TYPE *keys;
-
-  UNLESS (PyArg_ParseTuple(args, "O|O", &items, &next))
-    return -1;
-
-  if ((l=PyTuple_Size(items)) < 0) return -1;
-
-  for (i=self->len; --i >= 0; )
-    {
-      DECREF_KEY(self->keys[i]);
-    }
-  self->len=0;
-
-  if (self->next)
-    {
-      Py_DECREF(self->next);
-      self->next=0;
-    }
-
-  if (l > self->size)
-    {
-      UNLESS (keys=BTree_Realloc(self->keys, sizeof(KEY_TYPE)*l)) return -1;
-      self->keys=keys;
-      self->size=l;
-    }
-
-  for (i=0; i<l; i++)
-    {
-      k=PyTuple_GET_ITEM(items, i);
-      COPY_KEY_FROM_ARG(self->keys[i], k, copied);
-      UNLESS (copied) return -1;
-      INCREF_KEY(self->keys[i]);
-    }
-
-  self->len=l;
-
-  if (next)
-    {
-      self->next=next;
-      Py_INCREF(next);
-    }
-
-  return 0;
-}
-
-static PyObject *
-set_setstate(Bucket *self, PyObject *args)
-{
-  int r;
-
-  UNLESS (PyArg_ParseTuple(args, "O", &args)) return NULL;
-
-  PER_PREVENT_DEACTIVATION(self);
-  r=_set_setstate(self, args);
-  PER_UNUSE(self);
-
-  if (r < 0) return NULL;
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static struct PyMethodDef Set_methods[] = {
-  {"__getstate__", (PyCFunction) bucket_getstate,	METH_VARARGS,
-   "__getstate__() -- Return the picklable state of the object"},
-
-  {"__setstate__", (PyCFunction) set_setstate,	METH_VARARGS,
-   "__setstate__() -- Set the state of the object"},
-
-  {"keys",	(PyCFunction) bucket_keys,	METH_KEYWORDS,
-     "keys() -- Return the keys"},
-
-  {"has_key",	(PyCFunction) bucket_has_key,	METH_O,
-     "has_key(key) -- Test whether the bucket contains the given key"},
-
-  {"clear",	(PyCFunction) bucket_clear,	METH_VARARGS,
-   "clear() -- Remove all of the items from the bucket"},
-
-  {"maxKey", (PyCFunction) Bucket_maxKey,	METH_VARARGS,
-   "maxKey([key]) -- Find the maximum key\n\n"
-   "If an argument is given, find the maximum <= the argument"},
-
-  {"minKey", (PyCFunction) Bucket_minKey,	METH_VARARGS,
-   "minKey([key]) -- Find the minimum key\n\n"
-   "If an argument is given, find the minimum >= the argument"},
-
-#ifdef PERSISTENT
-  {"_p_resolveConflict", (PyCFunction) bucket__p_resolveConflict, METH_VARARGS,
-   "_p_resolveConflict() -- Reinitialize from a newly created copy"},
-
-  {"_p_deactivate", (PyCFunction) bucket__p_deactivate, METH_KEYWORDS,
-   "_p_deactivate() -- Reinitialize from a newly created copy"},
-#endif
-
-  {"insert",	(PyCFunction)Set_insert,	METH_VARARGS,
-   "insert(id,[ignored]) -- Add a key to the set"},
-
-  {"update",	(PyCFunction)Set_update,	METH_VARARGS,
-   "update(seq) -- Add the items from the given sequence to the set"},
-
-  {"remove",	(PyCFunction)Set_remove,	METH_VARARGS,
-   "remove(id) -- Remove an id from the set"},
-
-  {NULL,		NULL}		/* sentinel */
-};
-
-static int
-Set_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    PyObject *v = NULL;
-
-    if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "Set", &v))
-	return -1;
-
-    if (v)
-	return _Set_update((Bucket *)self, v);
-    else
-	return 0;
-}
-
-
-
-static PyObject *
-set_repr(Bucket *self)
-{
-  static PyObject *format;
-  PyObject *r, *t;
-
-  if (!format)
-      format = PyString_FromString(MOD_NAME_PREFIX "Set(%s)");
-  UNLESS (t = PyTuple_New(1)) return NULL;
-  UNLESS (r = bucket_keys(self, NULL, NULL)) goto err;
-  PyTuple_SET_ITEM(t, 0, r);
-  r = t;
-  ASSIGN(r, PyString_Format(format, r));
-  return r;
-err:
-  Py_DECREF(t);
-  return NULL;
-}
-
-static int
-set_length(Bucket *self)
-{
-  int r;
-
-  PER_USE_OR_RETURN(self, -1);
-  r = self->len;
-  PER_UNUSE(self);
-
-  return r;
-}
-
-static PyObject *
-set_item(Bucket *self, int index)
-{
-  PyObject *r=0;
-
-  PER_USE_OR_RETURN(self, NULL);
-  if (index >= 0 && index < self->len)
-    {
-      COPY_KEY_TO_OBJECT(r, self->keys[index]);
-    }
-  else
-    IndexError(index);
-
-  PER_UNUSE(self);
-
-  return r;
-}
-
-static PySequenceMethods set_as_sequence = {
-	(inquiry)set_length,		/* sq_length */
-	(binaryfunc)0,                  /* sq_concat */
-	(intargfunc)0,                  /* sq_repeat */
-	(intargfunc)set_item,           /* sq_item */
-	(intintargfunc)0,               /* sq_slice */
-	(intobjargproc)0,               /* sq_ass_item */
-	(intintobjargproc)0,            /* sq_ass_slice */
-        (objobjproc)bucket_contains,    /* sq_contains */
-        0,                              /* sq_inplace_concat */
-        0,                              /* sq_inplace_repeat */
-};
-
-static PyTypeObject SetType = {
-    PyObject_HEAD_INIT(NULL) /* PyPersist_Type */
-    0,					/* ob_size */
-    MODULE_NAME MOD_NAME_PREFIX "Set",	/* tp_name */
-    sizeof(Bucket),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)bucket_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    (reprfunc)set_repr,			/* tp_repr */
-    0,					/* tp_as_number */
-    &set_as_sequence,			/* tp_as_sequence */
-    0,					/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
-	    Py_TPFLAGS_BASETYPE, 	/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)bucket_traverse,	/* tp_traverse */
-    (inquiry)bucket_tp_clear,		/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    (getiterfunc)Bucket_getiter,	/* tp_iter */
-    0,					/* tp_iternext */
-    Set_methods,			/* tp_methods */
-    Bucket_members,			/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    Set_init,				/* tp_init */
-    0,					/* tp_alloc */
-    0, /*PyType_GenericNew,*/		/* tp_new */
-};
-
-static int
-nextSet(SetIteration *i)
-{
-
-  if (i->position >= 0)
-    {
-      UNLESS(PER_USE(BUCKET(i->set))) return -1;
-
-      if (i->position)
-        {
-          DECREF_KEY(i->key);
-        }
-
-      if (i->position < BUCKET(i->set)->len)
-        {
-          COPY_KEY(i->key, BUCKET(i->set)->keys[i->position]);
-          INCREF_KEY(i->key);
-          i->position ++;
-        }
-      else
-        {
-          i->position = -1;
-          PER_ACCESSED(BUCKET(i->set));
-        }
-
-      PER_ALLOW_DEACTIVATION(BUCKET(i->set));
-    }
-
-
-  return 0;
-}
diff --git a/branches/bug1734/src/BTrees/TreeSetTemplate.c b/branches/bug1734/src/BTrees/TreeSetTemplate.c
deleted file mode 100644
index 90b3a3a5..00000000
--- a/branches/bug1734/src/BTrees/TreeSetTemplate.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define TREESETTEMPLATE_C "$Id$\n"
-
-static PyObject *
-TreeSet_insert(BTree *self, PyObject *args)
-{
-    PyObject *key;
-    int i;
-
-    if (!PyArg_ParseTuple(args, "O:insert", &key)) 
-	return NULL;
-    i = _BTree_set(self, key, Py_None, 1, 1);
-    if (i < 0) 
-	return NULL;
-    return PyInt_FromLong(i);
-}
-
-/* _Set_update and _TreeSet_update are identical except for the
-   function they call to add the element to the set.
-*/
-
-static int
-_TreeSet_update(BTree *self, PyObject *seq)
-{
-    int n = -1;
-    PyObject *iter, *v;
-    int ind;
-
-    iter = PyObject_GetIter(seq);
-    if (iter == NULL)
-	return -1;
-
-    while (1) {
-	v = PyIter_Next(iter);
-	if (v == NULL) {
-	    if (PyErr_Occurred())
-		goto err;
-	    else
-		break;
-	}
-	ind = _BTree_set(self, v, Py_None, 1, 1);
-	Py_DECREF(v);
-	if (ind < 0)
-	    goto err;
-	else
-	    n += ind;
-    }
-    /* n starts out at -1, which is the error return value.  If
-       this point is reached, then there is no error.  n must be
-       incremented to account for the initial value of -1 instead of
-       0.
-    */
-    n++;
-
- err:
-    Py_DECREF(iter);
-    return n;
-}
-
-static PyObject *
-TreeSet_update(BTree *self, PyObject *args)
-{
-    PyObject *seq = NULL;
-    int n = 0;
-
-    if (!PyArg_ParseTuple(args, "|O:update", &seq))
-	return NULL;
-
-    if (seq) {
-	n = _TreeSet_update(self, seq);
-	if (n < 0)
-	    return NULL;
-    }
-
-    return PyInt_FromLong(n);
-}
-
-
-static PyObject *
-TreeSet_remove(BTree *self, PyObject *args)
-{
-  PyObject *key;
-
-  UNLESS (PyArg_ParseTuple(args, "O", &key)) return NULL;
-  if (_BTree_set(self, key, NULL, 0, 1) < 0) return NULL;
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static PyObject *
-TreeSet_setstate(BTree *self, PyObject *args)
-{
-  int r;
-
-  if (!PyArg_ParseTuple(args,"O",&args)) return NULL;
-
-  PER_PREVENT_DEACTIVATION(self);
-  r=_BTree_setstate(self, args, 1);
-  PER_UNUSE(self);
-
-  if (r < 0) return NULL;
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static struct PyMethodDef TreeSet_methods[] = {
-  {"__getstate__", (PyCFunction) BTree_getstate,	METH_NOARGS,
-   "__getstate__() -> state\n\n"
-   "Return the picklable state of the TreeSet."},
-
-  {"__setstate__", (PyCFunction) TreeSet_setstate,	METH_VARARGS,
-   "__setstate__(state)\n\n"
-   "Set the state of the TreeSet."},
-
-  {"has_key",	(PyCFunction) BTree_has_key,	METH_O,
-   "has_key(key)\n\n"
-   "Return true if the TreeSet contains the given key."},
-
-  {"keys",	(PyCFunction) BTree_keys,	METH_KEYWORDS,
-   "keys([min, max]) -> list of keys\n\n"
-   "Returns the keys of the TreeSet.  If min and max are supplied, only\n"
-   "keys greater than min and less than max are returned."},
-
-  {"maxKey", (PyCFunction) BTree_maxKey,	METH_VARARGS,
-   "maxKey([max]) -> key\n\n"
-   "Return the largest key in the BTree.  If max is specified, return\n"
-   "the largest key <= max."},
-
-  {"minKey", (PyCFunction) BTree_minKey,	METH_VARARGS,
-   "minKey([mi]) -> key\n\n"
-   "Return the smallest key in the BTree.  If min is specified, return\n"
-   "the smallest key >= min."},
-
-  {"clear",	(PyCFunction) BTree_clear,	METH_NOARGS,
-   "clear()\n\nRemove all of the items from the BTree."},
-
-  {"insert",	(PyCFunction)TreeSet_insert,	METH_VARARGS,
-   "insert(id,[ignored]) -- Add an id to the set"},
-
-  {"update",	(PyCFunction)TreeSet_update,	METH_VARARGS,
-   "update(collection)\n\n Add the items from the given collection."},
-
-  {"remove",	(PyCFunction)TreeSet_remove,	METH_VARARGS,
-   "remove(id) -- Remove a key from the set"},
-
-  {"_check", (PyCFunction) BTree_check,       METH_NOARGS,
-   "Perform sanity check on TreeSet, and raise exception if flawed."},
-
-#ifdef PERSISTENT
-  {"_p_resolveConflict", (PyCFunction) BTree__p_resolveConflict, METH_VARARGS,
-   "_p_resolveConflict() -- Reinitialize from a newly created copy"},
-
-  {"_p_deactivate", (PyCFunction) BTree__p_deactivate,	METH_KEYWORDS,
-   "_p_deactivate()\n\nReinitialize from a newly created copy."},
-#endif
-  {NULL,		NULL}		/* sentinel */
-};
-
-static PyMappingMethods TreeSet_as_mapping = {
-  (inquiry)BTree_length,		/*mp_length*/
-};
-
-static PySequenceMethods TreeSet_as_sequence = {
-    (inquiry)0,                     /* sq_length */
-    (binaryfunc)0,                  /* sq_concat */
-    (intargfunc)0,                  /* sq_repeat */
-    (intargfunc)0,                  /* sq_item */
-    (intintargfunc)0,               /* sq_slice */
-    (intobjargproc)0,               /* sq_ass_item */
-    (intintobjargproc)0,            /* sq_ass_slice */
-    (objobjproc)BTree_contains,     /* sq_contains */
-    0,                              /* sq_inplace_concat */
-    0,                              /* sq_inplace_repeat */
-};
-
-static int
-TreeSet_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    PyObject *v = NULL;
-
-    if (!PyArg_ParseTuple(args, "|O:" MOD_NAME_PREFIX "TreeSet", &v))
-	return -1;
-
-    if (v)
-	return _TreeSet_update((BTree *)self, v);
-    else
-	return 0;
-}
-
-static PyTypeObject TreeSetType = {
-    PyObject_HEAD_INIT(NULL) /* PyPersist_Type */
-    0,					/* ob_size */
-    MODULE_NAME MOD_NAME_PREFIX "TreeSet",/* tp_name */
-    sizeof(BTree),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)BTree_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    &BTree_as_number_for_nonzero,	/* tp_as_number */
-    &TreeSet_as_sequence,		/* tp_as_sequence */
-    &TreeSet_as_mapping,		/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
-	    Py_TPFLAGS_BASETYPE, 	/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)BTree_traverse,	/* tp_traverse */
-    (inquiry)BTree_tp_clear,		/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    (getiterfunc)BTree_getiter,		/* tp_iter */
-    0,					/* tp_iternext */
-    TreeSet_methods,			/* tp_methods */
-    BTree_members,			/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    TreeSet_init,			/* tp_init */
-    0,					/* tp_alloc */
-    0, /*PyType_GenericNew,*/		/* tp_new */
-};
diff --git a/branches/bug1734/src/BTrees/_IFBTree.c b/branches/bug1734/src/BTrees/_IFBTree.c
deleted file mode 100644
index c24d6898..00000000
--- a/branches/bug1734/src/BTrees/_IFBTree.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* IIBTree - int key, int value BTree
-
-   Implements a collection using int type keys
-   and int type values
-*/
-
-/* Setup template macros */
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "IF"
-#define INITMODULE init_IFBTree
-#define DEFAULT_MAX_BUCKET_SIZE 120
-#define DEFAULT_MAX_BTREE_SIZE 500
-
-#include "intkeymacros.h"
-#include "floatvaluemacros.h"
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/_IIBTree.c b/branches/bug1734/src/BTrees/_IIBTree.c
deleted file mode 100644
index b4a00a96..00000000
--- a/branches/bug1734/src/BTrees/_IIBTree.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* IIBTree - int key, int value BTree
-
-   Implements a collection using int type keys
-   and int type values
-*/
-
-/* Setup template macros */
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "II"
-#define INITMODULE init_IIBTree
-#define DEFAULT_MAX_BUCKET_SIZE 120
-#define DEFAULT_MAX_BTREE_SIZE 500
-
-#include "intkeymacros.h"
-#include "intvaluemacros.h"
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/_IOBTree.c b/branches/bug1734/src/BTrees/_IOBTree.c
deleted file mode 100644
index 1876cd88..00000000
--- a/branches/bug1734/src/BTrees/_IOBTree.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* IOBTree - int key, object value BTree
-
-   Implements a collection using int type keys
-   and object type values
-*/
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "IO"
-#define DEFAULT_MAX_BUCKET_SIZE 60
-#define DEFAULT_MAX_BTREE_SIZE 500
-#define INITMODULE init_IOBTree
-                                
-#include "intkeymacros.h"
-#include "objectvaluemacros.h"
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/_OIBTree.c b/branches/bug1734/src/BTrees/_OIBTree.c
deleted file mode 100644
index 4ee4b4a3..00000000
--- a/branches/bug1734/src/BTrees/_OIBTree.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* OIBTree - object key, int value BTree
-
-   Implements a collection using object type keys
-   and int type values
-*/
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "OI"
-#define INITMODULE init_OIBTree
-#define DEFAULT_MAX_BUCKET_SIZE 60
-#define DEFAULT_MAX_BTREE_SIZE 250
-                                
-#include "objectkeymacros.h"
-#include "intvaluemacros.h"
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/_OOBTree.c b/branches/bug1734/src/BTrees/_OOBTree.c
deleted file mode 100644
index 92c639d4..00000000
--- a/branches/bug1734/src/BTrees/_OOBTree.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* OOBTree - object key, object value BTree
-
-   Implements a collection using object type keys
-   and object type values
-*/
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "OO"
-#define INITMODULE init_OOBTree
-#define DEFAULT_MAX_BUCKET_SIZE 30
-#define DEFAULT_MAX_BTREE_SIZE 250
-                                
-#include "objectkeymacros.h"
-#include "objectvaluemacros.h"
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/__init__.py b/branches/bug1734/src/BTrees/__init__.py
deleted file mode 100644
index f8981395..00000000
--- a/branches/bug1734/src/BTrees/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This is a Python package.
diff --git a/branches/bug1734/src/BTrees/_fsBTree.c b/branches/bug1734/src/BTrees/_fsBTree.c
deleted file mode 100644
index 1248f411..00000000
--- a/branches/bug1734/src/BTrees/_fsBTree.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-#define MASTER_ID "$Id$\n"
-
-/* fsBTree - FileStorage index BTree
-
-   This BTree implements a mapping from 2-character strings
-   to six-character strings. This allows us to efficiently store
-   a FileStorage index as a nested mapping of 6-character oid prefix
-   to mapping of 2-character oid suffix to 6-character (byte) file
-   positions.
-*/
-
-typedef unsigned char char2[2];
-typedef unsigned char char6[6];
-
-/* Setup template macros */
-
-#define PERSISTENT
-
-#define MOD_NAME_PREFIX "fs"
-#define INITMODULE init_fsBTree
-#define DEFAULT_MAX_BUCKET_SIZE 500
-#define DEFAULT_MAX_BTREE_SIZE 500
-
-/*#include "intkeymacros.h"*/
-
-#define KEYMACROS_H "$Id$\n"
-#define KEY_TYPE char2
-#undef KEY_TYPE_IS_PYOBJECT
-#define KEY_CHECK(K) (PyString_Check(K) && PyString_GET_SIZE(K)==2)
-#define TEST_KEY_SET_OR(V, K, T) if ( ( (V) = ((*(K) < *(T) || (*(K) == *(T) && (K)[1] < (T)[1])) ? -1 : ((*(K) == *(T) && (K)[1] == (T)[1]) ? 0 : 1)) ), 0 )
-#define DECREF_KEY(KEY)
-#define INCREF_KEY(k)
-#define COPY_KEY(KEY, E) (*(KEY)=*(E), (KEY)[1]=(E)[1])
-#define COPY_KEY_TO_OBJECT(O, K) O=PyString_FromStringAndSize(K,2)
-#define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \
-  if (KEY_CHECK(ARG)) memcpy(TARGET, PyString_AS_STRING(ARG), 2); else { \
-      PyErr_SetString(PyExc_TypeError, "expected two-character string key"); \
-      (STATUS)=0; }
-
-/*#include "intvaluemacros.h"*/
-#define VALUEMACROS_H "$Id$\n"
-#define VALUE_TYPE char6
-#undef VALUE_TYPE_IS_PYOBJECT
-#define TEST_VALUE(K, T) memcmp(K,T,6)
-#define DECREF_VALUE(k)
-#define INCREF_VALUE(k)
-#define COPY_VALUE(V, E) (memcpy(V, E, 6))
-#define COPY_VALUE_TO_OBJECT(O, K) O=PyString_FromStringAndSize(K,6)
-#define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \
-  if ((PyString_Check(ARG) && PyString_GET_SIZE(ARG)==6)) \
-      memcpy(TARGET, PyString_AS_STRING(ARG), 6); else { \
-      PyErr_SetString(PyExc_TypeError, "expected six-character string key"); \
-      (STATUS)=0; }
-
-#define NORMALIZE_VALUE(V, MIN)
-#include "BTreeModuleTemplate.c"
diff --git a/branches/bug1734/src/BTrees/check.py b/branches/bug1734/src/BTrees/check.py
deleted file mode 100644
index bdf8288c..00000000
--- a/branches/bug1734/src/BTrees/check.py
+++ /dev/null
@@ -1,424 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""
-Utilities for working with BTrees (TreeSets, Buckets, and Sets) at a low
-level.
-
-The primary function is check(btree), which performs value-based consistency
-checks of a kind btree._check() does not perform.  See the function docstring
-for details.
-
-display(btree) displays the internal structure of a BTree (TreeSet, etc) to
-stdout.
-
-CAUTION:  When a BTree node has only a single bucket child, it can be
-impossible to get at the bucket from Python code (__getstate__() may squash
-the bucket object out of existence, as a pickling storage optimization).  In
-such a case, the code here synthesizes a temporary bucket with the same keys
-(and values, if the bucket is of a mapping type).  This has no first-order
-consequences, but can mislead if you pay close attention to reported object
-addresses and/or object identity (the synthesized bucket has an address
-that doesn't exist in the actual BTree).
-"""
-
-from types import TupleType
-
-from BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet
-from BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet
-from BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet
-from BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet
-from BTrees.IFBTree import IFBTree, IFBucket, IFSet, IFTreeSet
-
-from ZODB.utils import positive_id, oid_repr
-
-TYPE_UNKNOWN, TYPE_BTREE, TYPE_BUCKET = range(3)
-
-_type2kind = {IOBTree: (TYPE_BTREE, True),
-              IIBTree: (TYPE_BTREE, True),
-              IFBTree: (TYPE_BTREE, True),
-              OIBTree: (TYPE_BTREE, True),
-              OOBTree: (TYPE_BTREE, True),
-
-              IOBucket: (TYPE_BUCKET, True),
-              IIBucket: (TYPE_BUCKET, True),
-              IFBucket: (TYPE_BUCKET, True),
-              OIBucket: (TYPE_BUCKET, True),
-              OOBucket: (TYPE_BUCKET, True),
-
-              IOTreeSet: (TYPE_BTREE, False),
-              IITreeSet: (TYPE_BTREE, False),
-              IFTreeSet: (TYPE_BTREE, False),
-              OITreeSet: (TYPE_BTREE, False),
-              OOTreeSet: (TYPE_BTREE, False),
-
-              IOSet: (TYPE_BUCKET, False),
-              IISet: (TYPE_BUCKET, False),
-              IFSet: (TYPE_BUCKET, False),
-              OISet: (TYPE_BUCKET, False),
-              OOSet: (TYPE_BUCKET, False),
-             }
-
-# Return pair
-#
-#     TYPE_BTREE or TYPE_BUCKET, is_mapping
-
-def classify(obj):
-    return _type2kind[type(obj)]
-
-
-BTREE_EMPTY, BTREE_ONE, BTREE_NORMAL = range(3)
-
-# If the BTree is empty, returns
-#
-#     BTREE_EMPTY, [], []
-#
-# If the BTree has only one bucket, sometimes returns
-#
-#     BTREE_ONE, bucket_state, None
-#
-# Else returns
-#
-#     BTREE_NORMAL, list of keys, list of kids
-#
-# and the list of kids has one more entry than the list of keys.
-#
-# BTree.__getstate__() docs:
-#
-# For an empty BTree (self->len == 0), None.
-#
-# For a BTree with one child (self->len == 1), and that child is a bucket,
-# and that bucket has a NULL oid, a one-tuple containing a one-tuple
-# containing the bucket's state:
-#
-#     (
-#         (
-#              child[0].__getstate__(),
-#         ),
-#     )
-#
-# Else a two-tuple.  The first element is a tuple interleaving the BTree's
-# keys and direct children, of size 2*self->len - 1 (key[0] is unused and
-# is not saved).  The second element is the firstbucket:
-#
-#     (
-#          (child[0], key[1], child[1], key[2], child[2], ...,
-#                                       key[len-1], child[len-1]),
-#          self->firstbucket
-#     )
-
-_btree2bucket = {IOBTree: IOBucket,
-                 IOTreeSet: IOSet,
-
-                 IIBTree: IIBucket,
-                 IITreeSet: IISet,
-
-                 IFBTree: IFBucket,
-                 IFTreeSet: IFSet,
-
-                 OIBTree: OIBucket,
-                 OITreeSet: OISet,
-
-                 OOBTree: OOBucket,
-                 OOTreeSet: OOSet}
-
-def crack_btree(t, is_mapping):
-    state = t.__getstate__()
-    if state is None:
-        return BTREE_EMPTY, [], []
-
-    assert isinstance(state, TupleType)
-    if len(state) == 1:
-        state = state[0]
-        assert isinstance(state, TupleType) and len(state) == 1
-        state = state[0]
-        return BTREE_ONE, state, None
-
-    assert len(state) == 2
-    data, firstbucket = state
-    n = len(data)
-    assert n & 1
-    kids = []
-    keys = []
-    i = 0
-    for x in data:
-        if i & 1:
-            keys.append(x)
-        else:
-            kids.append(x)
-        i += 1
-    return BTREE_NORMAL, keys, kids
-
-# Returns
-#
-#     keys, values  # for a mapping; len(keys) == len(values) in this case
-# or
-#     keys, []      # for a set
-#
-# bucket.__getstate__() docs:
-#
-# For a set bucket (self->values is NULL), a one-tuple or two-tuple.  The
-# first element is a tuple of keys, of length self->len.  The second element
-# is the next bucket, present if and only if next is non-NULL:
-#
-#     (
-#          (keys[0], keys[1], ..., keys[len-1]),
-#          <self->next iff non-NULL>
-#     )
-#
-# For a mapping bucket (self->values is not NULL), a one-tuple or two-tuple.
-# The first element is a tuple interleaving keys and values, of length
-# 2 * self->len.  The second element is the next bucket, present iff next is
-# non-NULL:
-#
-#     (
-#          (keys[0], values[0], keys[1], values[1], ...,
-#                               keys[len-1], values[len-1]),
-#          <self->next iff non-NULL>
-#     )
-
-def crack_bucket(b, is_mapping):
-    state = b.__getstate__()
-    assert isinstance(state, TupleType)
-    assert 1 <= len(state) <= 2
-    data = state[0]
-    if not is_mapping:
-        return data, []
-    keys = []
-    values = []
-    n = len(data)
-    assert n & 1 == 0
-    i = 0
-    for x in data:
-        if i & 1:
-            values.append(x)
-        else:
-            keys.append(x)
-        i += 1
-    return keys, values
-
-def type_and_adr(obj):
-    if hasattr(obj, '_p_oid'):
-        oid = oid_repr(obj._p_oid)
-    else:
-        oid = 'None'
-    return "%s (0x%x oid=%s)" % (type(obj).__name__, positive_id(obj), oid)
-
-# Walker implements a depth-first search of a BTree (or TreeSet or Set or
-# Bucket).  Subclasses must implement the visit_btree() and visit_bucket()
-# methods, and arrange to call the walk() method.  walk() calls the
-# visit_XYZ() methods once for each node in the tree, in depth-first
-# left-to-right order.
-
-class Walker:
-    def __init__(self, obj):
-        self.obj = obj
-
-    # obj is the BTree (BTree or TreeSet).
-    # path is a list of indices, from the root.  For example, if a BTree node
-    # is child[5] of child[3] of the root BTree, [3, 5].
-    # parent is the parent BTree object, or None if this is the root BTree.
-    # is_mapping is True for a BTree and False for a TreeSet.
-    # keys is a list of the BTree's internal keys.
-    # kids is a list of the BTree's children.
-    # If the BTree is an empty root node, keys == kids == [].
-    # Else len(kids) == len(keys) + 1.
-    # lo and hi are slice bounds on the values the elements of keys *should*
-    # lie in (lo inclusive, hi exclusive).  lo is None if there is no lower
-    # bound known, and hi is None if no upper bound is known.
-
-    def visit_btree(self, obj, path, parent, is_mapping,
-                    keys, kids, lo, hi):
-        raise NotImplementedError
-
-    # obj is the bucket (Bucket or Set).
-    # path is a list of indices, from the root.  For example, if a bucket
-    # node is child[5] of child[3] of the root BTree, [3, 5].
-    # parent is the parent BTree object.
-    # is_mapping is True for a Bucket and False for a Set.
-    # keys is a list of the bucket's keys.
-    # values is a list of the bucket's values.
-    # If is_mapping is false, values == [].  Else len(keys) == len(values).
-    # lo and hi are slice bounds on the values the elements of keys *should*
-    # lie in (lo inclusive, hi exclusive).  lo is None if there is no lower
-    # bound known, and hi is None if no upper bound is known.
-
-    def visit_bucket(self, obj, path, parent, is_mapping,
-                     keys, values, lo, hi):
-        raise NotImplementedError
-
-    def walk(self):
-        obj = self.obj
-        path = []
-        stack = [(obj, path, None, None, None)]
-        while stack:
-            obj, path, parent, lo, hi = stack.pop()
-            kind, is_mapping = classify(obj)
-            if kind is TYPE_BTREE:
-                bkind, keys, kids = crack_btree(obj, is_mapping)
-                if bkind is BTREE_NORMAL:
-                    # push the kids, in reverse order (so they're popped off
-                    # the stack in forward order)
-                    n = len(kids)
-                    for i in range(len(kids)-1, -1, -1):
-                        newlo, newhi = lo,  hi
-                        if i < n-1:
-                            newhi = keys[i]
-                        if i > 0:
-                            newlo = keys[i-1]
-                        stack.append((kids[i],
-                                      path + [i],
-                                      obj,
-                                      newlo,
-                                      newhi))
-
-                elif bkind is BTREE_EMPTY:
-                    pass
-                else:
-                    assert bkind is BTREE_ONE
-                    # Yuck.  There isn't a bucket object to pass on, as
-                    # the bucket state is embedded directly in the BTree
-                    # state.  Synthesize a bucket.
-                    assert kids is None   # and "keys" is really the bucket
-                                          # state
-                    bucket = _btree2bucket[type(obj)]()
-                    bucket.__setstate__(keys)
-                    stack.append((bucket,
-                                  path + [0],
-                                  obj,
-                                  lo,
-                                  hi))
-                    keys = []
-                    kids = [bucket]
-
-                self.visit_btree(obj,
-                                 path,
-                                 parent,
-                                 is_mapping,
-                                 keys,
-                                 kids,
-                                 lo,
-                                 hi)
-            else:
-                assert kind is TYPE_BUCKET
-                keys, values = crack_bucket(obj, is_mapping)
-                self.visit_bucket(obj,
-                                  path,
-                                  parent,
-                                  is_mapping,
-                                  keys,
-                                  values,
-                                  lo,
-                                  hi)
-
-
-class Checker(Walker):
-    def __init__(self, obj):
-        Walker.__init__(self, obj)
-        self.errors = []
-
-    def check(self):
-        self.walk()
-        if self.errors:
-            s = "Errors found in %s:" % type_and_adr(self.obj)
-            self.errors.insert(0, s)
-            s = "\n".join(self.errors)
-            raise AssertionError(s)
-
-    def visit_btree(self, obj, path, parent, is_mapping,
-                    keys, kids, lo, hi):
-        self.check_sorted(obj, path, keys, lo, hi)
-
-    def visit_bucket(self, obj, path, parent, is_mapping,
-                     keys, values, lo, hi):
-        self.check_sorted(obj, path, keys, lo, hi)
-
-    def check_sorted(self, obj, path, keys, lo, hi):
-        i, n = 0, len(keys)
-        for x in keys:
-            if lo is not None and not lo <= x:
-                s = "key %r < lower bound %r at index %d" % (x, lo, i)
-                self.complain(s, obj, path)
-            if hi is not None and not x < hi:
-                s = "key %r >= upper bound %r at index %d" % (x, hi, i)
-                self.complain(s, obj, path)
-            if i < n-1 and not x < keys[i+1]:
-                s = "key %r at index %d >= key %r at index %d" % (
-                    x, i, keys[i+1], i+1)
-                self.complain(s, obj, path)
-            i += 1
-
-    def complain(self, msg, obj, path):
-        s = "%s, in %s, path from root %s" % (
-                msg,
-                type_and_adr(obj),
-                ".".join(map(str, path)))
-        self.errors.append(s)
-
-class Printer(Walker):
-    def __init__(self, obj):
-        Walker.__init__(self, obj)
-
-    def display(self):
-        self.walk()
-
-    def visit_btree(self, obj, path, parent, is_mapping,
-                    keys, kids, lo, hi):
-        indent = "    " * len(path)
-        print "%s%s %s with %d children" % (
-                  indent,
-                  ".".join(map(str, path)),
-                  type_and_adr(obj),
-                  len(kids))
-        indent += "    "
-        n = len(keys)
-        for i in range(n):
-            print "%skey %d: %r" % (indent, i, keys[i])
-
-    def visit_bucket(self, obj, path, parent, is_mapping,
-                     keys, values, lo, hi):
-        indent = "    " * len(path)
-        print "%s%s %s with %d keys" % (
-                  indent,
-                  ".".join(map(str, path)),
-                  type_and_adr(obj),
-                  len(keys))
-        indent += "    "
-        n = len(keys)
-        for i in range(n):
-            print "%skey %d: %r" % (indent, i, keys[i]),
-            if is_mapping:
-                print "value %r" % (values[i],)
-
-def check(btree):
-    """Check internal value-based invariants in a BTree or TreeSet.
-
-    The btree._check() method checks internal C-level pointer consistency.
-    The check() function here checks value-based invariants:  whether the
-    keys in leaf bucket and internal nodes are in strictly increasing order,
-    and whether they all lie in their expected range.  The latter is a subtle
-    invariant that can't be checked locally -- it requires propagating
-    range info down from the root of the tree, and modifying it at each
-    level for each child.
-
-    Raises AssertionError if anything is wrong, with a string detail
-    explaining the problems.  The entire tree is checked before
-    AssertionError is raised, and the string detail may be large (depending
-    on how much went wrong).
-    """
-
-    Checker(btree).check()
-
-def display(btree):
-    "Display the internal structure of a BTree, Bucket, TreeSet or Set."
-    Printer(btree).display()
diff --git a/branches/bug1734/src/BTrees/convert.py b/branches/bug1734/src/BTrees/convert.py
deleted file mode 100644
index c9bee67b..00000000
--- a/branches/bug1734/src/BTrees/convert.py
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-def convert(old, new, threshold=200, f=None):
-    "Utility for converting old btree to new"
-    n=0
-    for k, v in old.items():
-        if f is not None: v=f(v)
-        new[k]=v
-        n=n+1
-        if n > threshold:
-            transaction.commit(1)
-            old._p_jar.cacheMinimize()
-            n=0
-
-    transaction.commit(1)
-    old._p_jar.cacheMinimize()
diff --git a/branches/bug1734/src/BTrees/floatvaluemacros.h b/branches/bug1734/src/BTrees/floatvaluemacros.h
deleted file mode 100644
index dd90ec66..00000000
--- a/branches/bug1734/src/BTrees/floatvaluemacros.h
+++ /dev/null
@@ -1,25 +0,0 @@
-
-#define VALUEMACROS_H "$Id$\n"
-
-#define VALUE_TYPE float
-#undef VALUE_TYPE_IS_PYOBJECT
-#define TEST_VALUE(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0))
-#define VALUE_SAME(VALUE, TARGET) ( (VALUE) == (TARGET) )
-#define DECLARE_VALUE(NAME) VALUE_TYPE NAME
-#define VALUE_PARSE "f"
-#define DECREF_VALUE(k)
-#define INCREF_VALUE(k)
-#define COPY_VALUE(V, E) (V=(E))
-#define COPY_VALUE_TO_OBJECT(O, K) O=PyFloat_FromDouble(K)
-#define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \
-  if (PyFloat_Check(ARG)) TARGET = (float)PyFloat_AsDouble(ARG); \
-  else if (PyInt_Check(ARG)) TARGET = (float)PyInt_AsLong(ARG); \
-  else { \
-      PyErr_SetString(PyExc_TypeError, "expected float or int value"); \
-      (STATUS)=0; (TARGET)=0; }
-
-#define NORMALIZE_VALUE(V, MIN) ((MIN) > 0) ? ((V)/=(MIN)) : 0
-
-#define MERGE_DEFAULT 1.0f
-#define MERGE(O1, w1, O2, w2) ((O1)*(w1)+(O2)*(w2))
-#define MERGE_WEIGHT(O, w) ((O)*(w))
diff --git a/branches/bug1734/src/BTrees/intkeymacros.h b/branches/bug1734/src/BTrees/intkeymacros.h
deleted file mode 100644
index c2340e06..00000000
--- a/branches/bug1734/src/BTrees/intkeymacros.h
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#define KEYMACROS_H "$Id$\n"
-
-#define KEY_TYPE int
-#undef KEY_TYPE_IS_PYOBJECT
-#define KEY_CHECK PyInt_Check
-#define TEST_KEY_SET_OR(V, K, T) if ( ( (V) = (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0)) ) , 0 )
-#define DECREF_KEY(KEY)
-#define INCREF_KEY(k)
-#define COPY_KEY(KEY, E) (KEY=(E))
-#define COPY_KEY_TO_OBJECT(O, K) O=PyInt_FromLong(K)
-#define COPY_KEY_FROM_ARG(TARGET, ARG, STATUS) \
-  if (PyInt_Check(ARG)) TARGET=PyInt_AS_LONG(ARG); else { \
-      PyErr_SetString(PyExc_TypeError, "expected integer key"); \
-      (STATUS)=0; (TARGET)=0; }
-#define MULTI_INT_UNION 1
diff --git a/branches/bug1734/src/BTrees/intvaluemacros.h b/branches/bug1734/src/BTrees/intvaluemacros.h
deleted file mode 100644
index fe9b15d3..00000000
--- a/branches/bug1734/src/BTrees/intvaluemacros.h
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#define VALUEMACROS_H "$Id$\n"
-
-#define VALUE_TYPE int
-#undef VALUE_TYPE_IS_PYOBJECT
-#define TEST_VALUE(K, T) (((K) < (T)) ? -1 : (((K) > (T)) ? 1: 0)) 
-#define VALUE_SAME(VALUE, TARGET) ( (VALUE) == (TARGET) )
-#define DECLARE_VALUE(NAME) VALUE_TYPE NAME
-#define VALUE_PARSE "i"
-#define DECREF_VALUE(k)
-#define INCREF_VALUE(k)
-#define COPY_VALUE(V, E) (V=(E))
-#define COPY_VALUE_TO_OBJECT(O, K) O=PyInt_FromLong(K) 
-#define COPY_VALUE_FROM_ARG(TARGET, ARG, STATUS) \
-  if (PyInt_Check(ARG)) TARGET=PyInt_AsLong(ARG); else { \
-      PyErr_SetString(PyExc_TypeError, "expected integer value"); \
-      (STATUS)=0; (TARGET)=0; } 
-  
-#define NORMALIZE_VALUE(V, MIN) ((MIN) > 0) ? ((V)/=(MIN)) : 0
-
-#define MERGE_DEFAULT 1
-#define MERGE(O1, w1, O2, w2) ((O1)*(w1)+(O2)*(w2))
-#define MERGE_WEIGHT(O, w) ((O)*(w))
diff --git a/branches/bug1734/src/BTrees/objectkeymacros.h b/branches/bug1734/src/BTrees/objectkeymacros.h
deleted file mode 100644
index 093e6933..00000000
--- a/branches/bug1734/src/BTrees/objectkeymacros.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#define KEYMACROS_H "$Id$\n"
-#define KEY_TYPE PyObject *
-#define KEY_TYPE_IS_PYOBJECT
-#define TEST_KEY_SET_OR(V, KEY, TARGET) if ( ( (V) = PyObject_Compare((KEY),(TARGET)) ), PyErr_Occurred() )
-#define INCREF_KEY(k) Py_INCREF(k)
-#define DECREF_KEY(KEY) Py_DECREF(KEY)
-#define COPY_KEY(KEY, E) KEY=(E)
-#define COPY_KEY_TO_OBJECT(O, K) O=(K); Py_INCREF(O)
-#define COPY_KEY_FROM_ARG(TARGET, ARG, S) TARGET=(ARG)
diff --git a/branches/bug1734/src/BTrees/objectvaluemacros.h b/branches/bug1734/src/BTrees/objectvaluemacros.h
deleted file mode 100644
index 085664e8..00000000
--- a/branches/bug1734/src/BTrees/objectvaluemacros.h
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#define VALUEMACROS_H "$Id$\n"
-
-#define VALUE_TYPE PyObject *
-#define VALUE_TYPE_IS_PYOBJECT
-#define TEST_VALUE(VALUE, TARGET) PyObject_Compare((VALUE),(TARGET))
-#define DECLARE_VALUE(NAME) VALUE_TYPE NAME
-#define INCREF_VALUE(k) Py_INCREF(k)
-#define DECREF_VALUE(k) Py_DECREF(k)
-#define COPY_VALUE(k,e) k=(e)
-#define COPY_VALUE_TO_OBJECT(O, K) O=(K); Py_INCREF(O)
-#define COPY_VALUE_FROM_ARG(TARGET, ARG, S) TARGET=(ARG)
-#define NORMALIZE_VALUE(V, MIN) Py_INCREF(V)
diff --git a/branches/bug1734/src/BTrees/sorters.c b/branches/bug1734/src/BTrees/sorters.c
deleted file mode 100644
index 0a4cdb6f..00000000
--- a/branches/bug1734/src/BTrees/sorters.c
+++ /dev/null
@@ -1,527 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-/* Revision information: $Id$ */
-
-/* The only routine here intended to be used outside the file is
-   size_t sort_int4_nodups(int *p, size_t n)
-
-   Sort the array of n ints pointed at by p, in place, and also remove
-   duplicates.  Return the number of unique elements remaining, which occupy
-   a contiguous and monotonically increasing slice of the array starting at p.
-
-   Example:  If the input array is [3, 1, 2, 3, 1, 5, 2], sort_int4_nodups
-   returns 4, and the first 4 elements of the array are changed to
-   [1, 2, 3, 5].  The content of the remaining array positions is not defined.
-
-   Notes:
-
-   + This is specific to 4-byte signed ints, with endianness natural to the
-     platform.
-
-   + 4*n bytes of available heap memory are required for best speed.
-*/
-
-#include <stdlib.h>
-#include <stddef.h>
-#include <memory.h>
-#include <string.h>
-#include <assert.h>
-
-/* The type of array elements to be sorted.  Most of the routines don't
-   care about the type, and will work fine for any scalar C type (provided
-   they're recompiled with element_type appropriately redefined).  However,
-   the radix sort has to know everything about the type's internal
-   representation.
-*/
-typedef int element_type;
-
-/* The radixsort is faster than the quicksort for large arrays, but radixsort
-   has high fixed overhead, making it a poor choice for small arrays.  The
-   crossover point isn't critical, and is sensitive to things like compiler
-   and machine cache structure, so don't worry much about this.
-*/
-#define QUICKSORT_BEATS_RADIXSORT 800U
-
-/* In turn, the quicksort backs off to an insertion sort for very small
-   slices.  MAX_INSERTION is the largest slice quicksort leaves entirely to
-   insertion.  Because this version of quicksort uses a median-of-3 rule for
-   selecting a pivot, MAX_INSERTION must be at least 2 (so that quicksort
-   has at least 3 values to look at in a slice).  Again, the exact value here
-   isn't critical.
-*/
-#define MAX_INSERTION 25U
-
-#if MAX_INSERTION < 2U
-#   error "MAX_INSERTION must be >= 2"
-#endif
-
-/* LSB-first radix sort of the n elements in 'in'.
-   'work' is work storage at least as large as 'in'.  Depending on how many
-   swaps are done internally, the final result may come back in 'in' or 'work';
-   and that pointer is returned.
-
-   radixsort_int4 is specific to signed 4-byte ints, with natural machine
-   endianness.
-*/
-static element_type*
-radixsort_int4(element_type *in, element_type *work, size_t n)
-{
-	/* count[i][j] is the number of input elements that have byte value j
-	   in byte position i, where byte position 0 is the LSB.  Note that
-	   holding i fixed, the sum of count[i][j] over all j in range(256)
-	   is n.
-	*/
-	size_t count[4][256];
-	size_t i;
-	int offset, offsetinc;
-
-	/* Which byte position are we working on now?  0=LSB, 1, 2, ... */
-	int bytenum;
-
-	assert(sizeof(element_type) == 4);
-	assert(in);
-	assert(work);
-
-	/* Compute all of count in one pass. */
-	memset(count, 0, sizeof(count));
-	for (i = 0; i < n; ++i) {
-		element_type const x = in[i];
-		++count[0][(x      ) & 0xff];
-		++count[1][(x >>  8) & 0xff];
-		++count[2][(x >> 16) & 0xff];
-		++count[3][(x >> 24) & 0xff];
-	}
-
-	/* For p an element_type* cast to char*, offset is how much farther we
-	   have to go to get to the LSB of the element; this is 0 for little-
-	   endian boxes and sizeof(element_type)-1 for big-endian.
-	   offsetinc is 1 or -1, respectively, telling us which direction to go
-	   from p+offset to get to the element's more-significant bytes.
-	*/
-	{
-		int one = 1;
-		if (*(char*)&one) {
-			/* Little endian. */
-			offset = 0;
-			offsetinc = 1;
-		}
-		else {
-			/* Big endian. */
-			offset = sizeof(element_type) - 1;
-			offsetinc = -1;
-		}
-	}
-
-	/* The radix sort. */
-	for (bytenum = 0;
-	     bytenum < sizeof(element_type);
-	     ++bytenum, offset += offsetinc) {
-
-		/* Do a stable distribution sort on byte position bytenum,
-		   from in to work.  index[i] tells us the work index at which
-		   to store the next in element with byte value i.  pinbyte
-		   points to the correct byte in the input array.
-		*/
-	     	size_t index[256];
-		unsigned char* pinbyte;
-		size_t total = 0;
-		size_t *pcount = count[bytenum];
-
-		/* Compute the correct output starting index for each possible
-		   byte value.
-		*/
-		if (bytenum < sizeof(element_type) - 1) {
-			for (i = 0; i < 256; ++i) {
-				const size_t icount = pcount[i];
-				index[i] = total;
-				total += icount;
-				if (icount == n)
-					break;
-			}
-			if (i < 256) {
-				/* All bytes in the current position have value
-				   i, so there's nothing to do on this pass.
-				*/
-				continue;
-			}
-		}
-		else {
-			/* The MSB of signed ints needs to be distributed
-			   differently than the other bytes, in order
-			   0x80, 0x81, ... 0xff, 0x00, 0x01, ... 0x7f
-			*/
-			for (i = 128; i < 256; ++i) {
-				const size_t icount = pcount[i];
-				index[i] = total;
-				total += icount;
-				if (icount == n)
-					break;
-			}
-			if (i < 256)
-				continue;
-			for (i = 0; i < 128; ++i) {
-				const size_t icount = pcount[i];
-				index[i] = total;
-				total += icount;
-				if (icount == n)
-					break;
-			}
-			if (i < 128)
-				continue;
-		}
-		assert(total == n);
-
-		/* Distribute the elements according to byte value.  Note that
-		   this is where most of the time is spent.
-		   Note:  The loop is unrolled 4x by hand, for speed.  This
-		   may be a pessimization someday, but was a significant win
-		   on my MSVC 6.0 timing tests.
-		*/
-		pinbyte = (unsigned char  *)in + offset;
-		i = 0;
-		/* Reduce number of elements to copy to a multiple of 4. */
-		while ((n - i) & 0x3) {
-			unsigned char byte = *pinbyte;
-			work[index[byte]++] = in[i];
-			++i;
-			pinbyte += sizeof(element_type);
-		}
-		for (; i < n; i += 4, pinbyte += 4 * sizeof(element_type)) {
-			unsigned char byte1 = *(pinbyte                           );
-			unsigned char byte2 = *(pinbyte +     sizeof(element_type));
-			unsigned char byte3 = *(pinbyte + 2 * sizeof(element_type));
-			unsigned char byte4 = *(pinbyte + 3 * sizeof(element_type));
-
-			element_type in1 = in[i  ];
-			element_type in2 = in[i+1];
-			element_type in3 = in[i+2];
-			element_type in4 = in[i+3];
-
-			work[index[byte1]++] = in1;
-			work[index[byte2]++] = in2;
-			work[index[byte3]++] = in3;
-			work[index[byte4]++] = in4;
-		}
-		/* Swap in and work (just a pointer swap). */
-		{
-			element_type *temp = in;
-			in = work;
-			work = temp;
-		}
-	}
-
-	return in;
-}
-
-/* Remove duplicates from sorted array in, storing exactly one of each distinct
-   element value into sorted array out.  It's OK (and expected!) for in == out,
-   but otherwise the n elements beginning at in must not overlap with the n
-   beginning at out.
-   Return the number of elements in out.
-*/
-static size_t
-uniq(element_type *out, element_type *in, size_t n)
-{
-	size_t i;
-	element_type lastelt;
-	element_type *pout;
-
-	assert(out);
-	assert(in);
-	if (n == 0)
-		return 0;
-
-	/* i <- first index in 'in' that contains a duplicate.
-	   in[0], in[1], ... in[i-1] are unique, but in[i-1] == in[i].
-	   Set i to n if everything is unique.
-	*/
-	for (i = 1; i < n; ++i) {
-		if (in[i-1] == in[i])
-			break;
-	}
-
-	/* in[:i] is unique; copy to out[:i] if needed. */
-	assert(i > 0);
-	if (in != out)
-		memcpy(out, in, i * sizeof(element_type));
-
-	pout = out + i;
-	lastelt = in[i-1];  /* safe even when i == n */
-	for (++i; i < n; ++i) {
-		element_type elt = in[i];
-		if (elt != lastelt)
-			*pout++ = lastelt = elt;
-	}
-	return pout - out;
-}
-
-#if 0
-/* insertionsort is no longer referenced directly, but I'd like to keep
- *  the code here just in case.
- */
-
-/* Straight insertion sort of the n elements starting at 'in'. */
-static void
-insertionsort(element_type *in, size_t n)
-{
-	element_type *p, *q;
-	element_type minimum;  /* smallest seen so far */
-	element_type *plimit = in + n;
-
-	assert(in);
-	if (n < 2)
-		return;
-
-	minimum = *in;
-	for (p = in+1; p < plimit; ++p) {
-		/* *in <= *(in+1) <= ... <= *(p-1).  Slide *p into place. */
-		element_type thiselt = *p;
-		if (thiselt < minimum) {
-			/* This is a new minimum.  This saves p-in compares
-			   when it happens, but should happen so rarely that
-			   it's not worth checking for its own sake:  the
-			   point is that the far more popular 'else' branch can
-			   exploit that thiselt is *not* the smallest so far.
-			*/
-			memmove(in+1, in, (p - in) * sizeof(*in));
-			*in = minimum = thiselt;
-		}
-		else {
-			/* thiselt >= minimum, so the loop will find a q
-			   with *q <= thiselt.  This saves testing q >= in
-			   on each trip.  It's such a simple loop that saving
-			   a per-trip test is a major speed win.
-			*/
-			for (q = p-1; *q > thiselt; --q)
-				*(q+1) = *q;
-			*(q+1) = thiselt;
-		}
-	}
-}
-#endif
-
-/* The maximum number of elements in the pending-work stack quicksort
-   maintains.  The maximum stack depth is approximately log2(n), so
-   arrays of size up to approximately MAX_INSERTION * 2**STACKSIZE can be
-   sorted.  The memory burden for the stack is small, so better safe than
-   sorry.
-*/
-#define STACKSIZE 60
-
-/* A _stacknode remembers a contiguous slice of an array that needs to sorted.
-   lo must be <= hi, and, unlike Python array slices, this includes both ends.
-*/
-struct _stacknode {
-	element_type *lo;
-	element_type *hi;
-};
-
-static void
-quicksort(element_type *plo, size_t n)
-{
-	element_type *phi;
-
-	/* Swap two array elements. */
-	element_type _temp;
-#define SWAP(P, Q) (_temp = *(P), *(P) = *(Q), *(Q) = _temp)
-
-	/* Stack of pending array slices to be sorted. */
-	struct _stacknode stack[STACKSIZE];
-	struct _stacknode *stackfree = stack;	/* available stack slot */
-
-	/* Push an array slice on the pending-work stack. */
-#define PUSH(PLO, PHI)					\
-	do {						\
-		assert(stackfree - stack < STACKSIZE);	\
-		assert((PLO) <= (PHI));			\
-		stackfree->lo = (PLO);			\
-		stackfree->hi = (PHI);			\
-		++stackfree;				\
-	} while(0)
-
-	assert(plo);
-	phi = plo + n - 1;
-
-	for (;;) {
-		element_type pivot;
-		element_type *pi, *pj;
-
-		assert(plo <= phi);
-		n = phi - plo + 1;
-		if (n <= MAX_INSERTION) {
-			/* Do a small insertion sort.  Contra Knuth, we do
-			   this now instead of waiting until the end, because
-			   this little slice is likely still in cache now.
-			*/
-			element_type *p, *q;
-			element_type minimum = *plo;
-
-			for (p = plo+1; p <= phi; ++p) {
-				/* *plo <= *(plo+1) <= ... <= *(p-1).
-				   Slide *p into place. */
-				element_type thiselt = *p;
-				if (thiselt < minimum) {
-					/* New minimum. */
-					memmove(plo+1,
-						plo,
-						(p - plo) * sizeof(*p));
-					*plo = minimum = thiselt;
-				}
-				else {
-					/* thiselt >= minimum, so the loop will
-					   find a q with *q <= thiselt.
-					*/
-					for (q = p-1; *q > thiselt; --q)
-						*(q+1) = *q;
-					*(q+1) = thiselt;
-				}
-			}
-
-			/* Pop another slice off the stack. */
-			if (stack == stackfree)
-				break;	/* no more slices -- we're done */
-			--stackfree;
-			plo = stackfree->lo;
-			phi = stackfree->hi;
-			continue;
-		}
-
-		/* Parition the slice.
-		   For pivot, take the median of the leftmost, rightmost, and
-		   middle elements.  First sort those three; then the median
-		   is the middle one.  For technical reasons, the middle
-		   element is swapped to plo+1 first (see Knuth Vol 3 Ed 2
-		   section 5.2.2 exercise 55 -- reverse-sorted arrays can
-		   take quadratic time otherwise!).
-		*/
-		{
-			element_type *plop1 = plo + 1;
-			element_type *pmid = plo + (n >> 1);
-
-			assert(plo < pmid && pmid < phi);
-			SWAP(plop1, pmid);
-
-			/* Sort plo, plop1, phi. */
-			/* Smaller of rightmost two -> middle. */
-			if (*plop1 > *phi)
-				SWAP(plop1, phi);
-			/* Smallest of all -> left; if plo is already the
-			   smallest, the sort is complete.
-			*/
-			if (*plo > *plop1) {
-				SWAP(plo, plop1);
-				/* Largest of all -> right. */
-				if (*plop1 > *phi)
-					SWAP(plop1, phi);
-			}
-			pivot = *plop1;
-			pi = plop1;
-		}
-		assert(*plo <= pivot);
-		assert(*pi == pivot);
-		assert(*phi >= pivot);
-		pj = phi;
-
-		/* Partition wrt pivot.  This is the time-critical part, and
-		   nearly every decision in the routine aims at making this
-		   loop as fast as possible -- even small points like
-		   arranging that all loop tests can be done correctly at the
-		   bottoms of loops instead of the tops, and that pointers can
-		   be derefenced directly as-is (without fiddly +1 or -1).
-		   The aim is to make the C here so simple that a compiler
-		   has a good shot at doing as well as hand-crafted assembler.
-		*/
-		for (;;) {
-			/* Invariants:
-			   1. pi < pj.
-			   2. All elements at plo, plo+1 .. pi are <= pivot.
-			   3. All elements at pj, pj+1 .. phi are >= pivot.
-			   4. There is an element >= pivot to the right of pi.
-			   5. There is an element <= pivot to the left of pj.
-
-			   Note that #4 and #5 save us from needing to check
-			   that the pointers stay in bounds.
-			*/
-			assert(pi < pj);
-
-			do { ++pi; } while (*pi < pivot);
-			assert(pi <= pj);
-
-			do { --pj; } while (*pj > pivot);
-			assert(pj >= pi - 1);
-
-			if (pi < pj)
-				SWAP(pi, pj);
-			else
-				break;
-		}
-		assert(plo+1 < pi && pi <= phi);
-		assert(plo < pj && pj < phi);
-		assert(*pi >= pivot);
-		assert( (pi == pj && *pj == pivot) ||
-			(pj + 1 == pi && *pj <= pivot) );
-
-		/* Swap pivot into its final position, pj. */
-		assert(plo[1] == pivot);
-		plo[1] = *pj;
-		*pj = pivot;
-
-		/* Subfiles are from plo to pj-1 inclusive, and pj+1 to phi
-		   inclusive.  Push the larger one, and loop back to do the
-		   smaller one directly.
-		*/
-		if (pj - plo >= phi - pj) {
-			PUSH(plo, pj-1);
-			plo = pj+1;
-		}
-		else {
-			PUSH(pj+1, phi);
-			phi = pj-1;
-		}
-	}
-
-#undef PUSH
-#undef SWAP
-}
-
-/* Sort p and remove duplicates, as fast as we can. */
-static size_t
-sort_int4_nodups(int *p, size_t n)
-{
-	size_t nunique;
-	element_type *work;
-
-	assert(sizeof(int) == sizeof(element_type));
-	assert(p);
-
-	/* Use quicksort if the array is small, OR if malloc can't find
-	   enough temp memory for radixsort.
-	*/
-	work = NULL;
-	if (n > QUICKSORT_BEATS_RADIXSORT)
-		work = (element_type *)malloc(n * sizeof(element_type));
-
-	if (work) {
-		element_type *out = radixsort_int4(p, work, n);
-		nunique = uniq(p, out, n);
-		free(work);
-	}
-	else {
-		quicksort(p, n);
-		nunique = uniq(p, p, n);
-	}
-
-	return nunique;
-}
diff --git a/branches/bug1734/src/BTrees/tests/__init__.py b/branches/bug1734/src/BTrees/tests/__init__.py
deleted file mode 100644
index c98a506b..00000000
--- a/branches/bug1734/src/BTrees/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# If tests is a package, debugging is a bit easier.
diff --git a/branches/bug1734/src/BTrees/tests/testBTrees.py b/branches/bug1734/src/BTrees/tests/testBTrees.py
deleted file mode 100644
index 830780f7..00000000
--- a/branches/bug1734/src/BTrees/tests/testBTrees.py
+++ /dev/null
@@ -1,1483 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import random
-from unittest import TestCase, TestSuite, TextTestRunner, makeSuite
-
-from BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet
-from BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet
-from BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet
-from BTrees.IFBTree import IFBTree, IFBucket, IFSet, IFTreeSet
-from BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet
-
-from BTrees.check import check
-
-import transaction
-from ZODB import DB
-from ZODB.MappingStorage import MappingStorage
-
-class Base(TestCase):
-    """ Tests common to all types: sets, buckets, and BTrees """
-
-    db = None
-
-    def tearDown(self):
-        if self.db is not None:
-            self.db.close()
-        self.t = None
-        del self.t
-
-    def _getRoot(self):
-        if self.db is None:
-            # Unclear:  On the next line, the ZODB4 flavor of this routine
-            # [asses a cache_size argument:
-            #     self.db = DB(MappingStorage(), cache_size=1)
-            # If that's done here, though, testLoadAndStore() and
-            # testGhostUnghost() both nail the CPU and seemingly
-            # never finish.
-            self.db = DB(MappingStorage())
-        return self.db.open().root()
-
-    def _closeRoot(self, root):
-        root._p_jar.close()
-
-    def testLoadAndStore(self):
-        for i in 0, 10, 1000:
-            t = self.t.__class__()
-            self._populate(t, i)
-            root = None
-            root = self._getRoot()
-            root[i] = t
-            transaction.commit()
-
-            root2 = self._getRoot()
-            if hasattr(t, 'items'):
-                self.assertEqual(list(root2[i].items()) , list(t.items()))
-            else:
-                self.assertEqual(list(root2[i].keys()) , list(t.keys()))
-
-            self._closeRoot(root)
-            self._closeRoot(root2)
-
-    def testGhostUnghost(self):
-        for i in 0, 10, 1000:
-            t = self.t.__class__()
-            self._populate(t, i)
-            root = self._getRoot()
-            root[i] = t
-            transaction.commit()
-
-            root2 = self._getRoot()
-            root2[i]._p_deactivate()
-            transaction.commit()
-            if hasattr(t, 'items'):
-                self.assertEqual(list(root2[i].items()) , list(t.items()))
-            else:
-                self.assertEqual(list(root2[i].keys()) , list(t.keys()))
-
-            self._closeRoot(root)
-            self._closeRoot(root2)
-
-    def testSimpleExclusiveKeyRange(self):
-        t = self.t.__class__()
-        self.assertEqual(list(t.keys()), [])
-        self.assertEqual(list(t.keys(excludemin=True)), [])
-        self.assertEqual(list(t.keys(excludemax=True)), [])
-        self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), [])
-
-        self._populate(t, 1)
-        self.assertEqual(list(t.keys()), [0])
-        self.assertEqual(list(t.keys(excludemin=True)), [])
-        self.assertEqual(list(t.keys(excludemax=True)), [])
-        self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), [])
-
-        t.clear()
-        self._populate(t, 2)
-        self.assertEqual(list(t.keys()), [0, 1])
-        self.assertEqual(list(t.keys(excludemin=True)), [1])
-        self.assertEqual(list(t.keys(excludemax=True)), [0])
-        self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), [])
-
-        t.clear()
-        self._populate(t, 3)
-        self.assertEqual(list(t.keys()), [0, 1, 2])
-        self.assertEqual(list(t.keys(excludemin=True)), [1, 2])
-        self.assertEqual(list(t.keys(excludemax=True)), [0, 1])
-        self.assertEqual(list(t.keys(excludemin=True, excludemax=True)), [1])
-
-        self.assertEqual(list(t.keys(-1, 3, excludemin=True, excludemax=True)),
-                         [0, 1, 2])
-        self.assertEqual(list(t.keys(0, 3, excludemin=True, excludemax=True)),
-                         [1, 2])
-        self.assertEqual(list(t.keys(-1, 2, excludemin=True, excludemax=True)),
-                         [0, 1])
-        self.assertEqual(list(t.keys(0, 2, excludemin=True, excludemax=True)),
-                         [1])
-
-class MappingBase(Base):
-    """ Tests common to mappings (buckets, btrees) """
-
-    def _populate(self, t, l):
-        # Make some data
-        for i in range(l): t[i]=i
-
-    def testRepr(self):
-        # test the repr because buckets have a complex repr implementation
-        # internally the cutoff from a stack allocated buffer to a heap
-        # allocated buffer is 10000.
-        for i in range(1000):
-            self.t[i] = i
-        r = repr(self.t)
-        # Make sure the repr is 10000 bytes long for a bucket.
-        # But since the test is also run for btrees, skip the length
-        # check if the repr starts with '<'
-        if not r.startswith('<'):
-            self.assert_(len(r) > 10000)
-
-    def testGetItemFails(self):
-        self.assertRaises(KeyError, self._getitemfail)
-
-    def _getitemfail(self):
-        return self.t[1]
-
-    def testGetReturnsDefault(self):
-        self.assertEqual(self.t.get(1) , None)
-        self.assertEqual(self.t.get(1, 'foo') , 'foo')
-
-    def testSetItemGetItemWorks(self):
-        self.t[1] = 1
-        a = self.t[1]
-        self.assertEqual(a , 1, `a`)
-
-    def testReplaceWorks(self):
-        self.t[1] = 1
-        self.assertEqual(self.t[1] , 1, self.t[1])
-        self.t[1] = 2
-        self.assertEqual(self.t[1] , 2, self.t[1])
-
-    def testLen(self):
-        added = {}
-        r = range(1000)
-        for x in r:
-            k = random.choice(r)
-            self.t[k] = x
-            added[k] = x
-        addl = added.keys()
-        self.assertEqual(len(self.t) , len(addl), len(self.t))
-
-    def testHasKeyWorks(self):
-        self.t[1] = 1
-        self.assert_(self.t.has_key(1))
-        self.assert_(1 in self.t)
-        self.assert_(0 not in self.t)
-        self.assert_(2 not in self.t)
-
-    def testValuesWorks(self):
-        for x in range(100):
-            self.t[x] = x*x
-        v = self.t.values()
-        for i in range(100):
-            self.assertEqual(v[i], i*i)
-        self.assertRaises(IndexError, lambda: v[i+1])
-        i = 0
-        for value in self.t.itervalues():
-            self.assertEqual(value, i*i)
-            i += 1
-
-    def testValuesWorks1(self):
-        for x in range(100):
-            self.t[99-x] = x
-
-        for x in range(40):
-            lst = list(self.t.values(0+x,99-x))
-            lst.sort()
-            self.assertEqual(lst,range(0+x,99-x+1))
-
-            lst = list(self.t.values(max=99-x, min=0+x))
-            lst.sort()
-            self.assertEqual(lst,range(0+x,99-x+1))
-
-    def testValuesNegativeIndex(self):
-        L = [-3, 6, -11, 4]
-        for i in L:
-            self.t[i] = i
-        L.sort()
-        vals = self.t.values()
-        for i in range(-1, -5, -1):
-            self.assertEqual(vals[i], L[i])
-        self.assertRaises(IndexError, lambda: vals[-5])
-
-    def testKeysWorks(self):
-        for x in range(100):
-            self.t[x] = x
-        v = self.t.keys()
-        i = 0
-        for x in v:
-            self.assertEqual(x,i)
-            i = i + 1
-        self.assertRaises(IndexError, lambda: v[i])
-
-        for x in range(40):
-            lst = self.t.keys(0+x,99-x)
-            self.assertEqual(list(lst), range(0+x, 99-x+1))
-
-            lst = self.t.keys(max=99-x, min=0+x)
-            self.assertEqual(list(lst), range(0+x, 99-x+1))
-
-        self.assertEqual(len(v), 100)
-
-    def testKeysNegativeIndex(self):
-        L = [-3, 6, -11, 4]
-        for i in L:
-            self.t[i] = i
-        L.sort()
-        keys = self.t.keys()
-        for i in range(-1, -5, -1):
-            self.assertEqual(keys[i], L[i])
-        self.assertRaises(IndexError, lambda: keys[-5])
-
-    def testItemsWorks(self):
-        for x in range(100):
-            self.t[x] = 2*x
-        v = self.t.items()
-        i = 0
-        for x in v:
-            self.assertEqual(x[0], i)
-            self.assertEqual(x[1], 2*i)
-            i += 1
-        self.assertRaises(IndexError, lambda: v[i+1])
-
-        i = 0
-        for x in self.t.iteritems():
-            self.assertEqual(x, (i, 2*i))
-            i += 1
-
-        items = list(self.t.items(min=12, max=20))
-        self.assertEqual(items, zip(range(12, 21), range(24, 43, 2)))
-
-        items = list(self.t.iteritems(min=12, max=20))
-        self.assertEqual(items, zip(range(12, 21), range(24, 43, 2)))
-
-    def testItemsNegativeIndex(self):
-        L = [-3, 6, -11, 4]
-        for i in L:
-            self.t[i] = i
-        L.sort()
-        items = self.t.items()
-        for i in range(-1, -5, -1):
-            self.assertEqual(items[i], (L[i], L[i]))
-        self.assertRaises(IndexError, lambda: items[-5])
-
-    def testDeleteInvalidKeyRaisesKeyError(self):
-        self.assertRaises(KeyError, self._deletefail)
-
-    def _deletefail(self):
-        del self.t[1]
-
-    def testMaxKeyMinKey(self):
-        self.t[7] = 6
-        self.t[3] = 10
-        self.t[8] = 12
-        self.t[1] = 100
-        self.t[5] = 200
-        self.t[10] = 500
-        self.t[6] = 99
-        self.t[4] = 150
-        del self.t[7]
-        t = self.t
-        self.assertEqual(t.maxKey(), 10)
-        self.assertEqual(t.maxKey(6), 6)
-        self.assertEqual(t.maxKey(9), 8)
-        self.assertEqual(t.minKey(), 1)
-        self.assertEqual(t.minKey(3), 3)
-        self.assertEqual(t.minKey(9), 10)
-
-    def testClear(self):
-        r = range(100)
-        for x in r:
-            rnd = random.choice(r)
-            self.t[rnd] = 0
-        self.t.clear()
-        diff = lsubtract(list(self.t.keys()), [])
-        self.assertEqual(diff, [])
-
-    def testUpdate(self):
-        d={}
-        l=[]
-        for i in range(10000):
-            k=random.randrange(-2000, 2001)
-            d[k]=i
-            l.append((k, i))
-
-        items=d.items()
-        items.sort()
-
-        self.t.update(d)
-        self.assertEqual(list(self.t.items()), items)
-
-        self.t.clear()
-        self.assertEqual(list(self.t.items()), [])
-
-        self.t.update(l)
-        self.assertEqual(list(self.t.items()), items)
-
-    def testEmptyRangeSearches(self):
-        t = self.t
-        t.update([(1,1), (5,5), (9,9)])
-        self.assertEqual(list(t.keys(-6,-4)), [], list(t.keys(-6,-4)))
-        self.assertEqual(list(t.keys(2,4)), [], list(t.keys(2,4)))
-        self.assertEqual(list(t.keys(6,8)), [], list(t.keys(6,8)))
-        self.assertEqual(list(t.keys(10,12)), [], list(t.keys(10,12)))
-        self.assertEqual(list(t.keys(9, 1)), [], list(t.keys(9, 1)))
-
-        # For IITreeSets, this one was returning 31 for len(keys), and
-        # list(keys) produced a list with 100 elements.
-        t.clear()
-        t.update(zip(range(300), range(300)))
-        keys = t.keys(200, 50)
-        self.assertEqual(len(keys), 0)
-        self.assertEqual(list(keys), [])
-        self.assertEqual(list(t.iterkeys(200, 50)), [])
-
-        keys = t.keys(max=50, min=200)
-        self.assertEqual(len(keys), 0)
-        self.assertEqual(list(keys), [])
-        self.assertEqual(list(t.iterkeys(max=50, min=200)), [])
-
-    def testSlicing(self):
-        # Test that slicing of .keys()/.values()/.items() works exactly the
-        # same way as slicing a Python list with the same contents.
-        # This tests fixes to several bugs in this area, starting with
-        # http://collector.zope.org/Zope/419,
-        # "BTreeItems slice contains 1 too many elements".
-
-        t = self.t
-        for n in range(10):
-            t.clear()
-            self.assertEqual(len(t), 0)
-
-            keys = []
-            values = []
-            items = []
-            for key in range(n):
-                value = -2 * key
-                t[key] = value
-                keys.append(key)
-                values.append(value)
-                items.append((key, value))
-            self.assertEqual(len(t), n)
-
-            kslice = t.keys()
-            vslice = t.values()
-            islice = t.items()
-            self.assertEqual(len(kslice), n)
-            self.assertEqual(len(vslice), n)
-            self.assertEqual(len(islice), n)
-
-            # Test whole-structure slices.
-            x = kslice[:]
-            self.assertEqual(list(x), keys[:])
-
-            x = vslice[:]
-            self.assertEqual(list(x), values[:])
-
-            x = islice[:]
-            self.assertEqual(list(x), items[:])
-
-            for lo in range(-2*n, 2*n+1):
-                # Test one-sided slices.
-                x = kslice[:lo]
-                self.assertEqual(list(x), keys[:lo])
-                x = kslice[lo:]
-                self.assertEqual(list(x), keys[lo:])
-
-                x = vslice[:lo]
-                self.assertEqual(list(x), values[:lo])
-                x = vslice[lo:]
-                self.assertEqual(list(x), values[lo:])
-
-                x = islice[:lo]
-                self.assertEqual(list(x), items[:lo])
-                x = islice[lo:]
-                self.assertEqual(list(x), items[lo:])
-
-                for hi in range(-2*n, 2*n+1):
-                    # Test two-sided slices.
-                    x = kslice[lo:hi]
-                    self.assertEqual(list(x), keys[lo:hi])
-
-                    x = vslice[lo:hi]
-                    self.assertEqual(list(x), values[lo:hi])
-
-                    x = islice[lo:hi]
-                    self.assertEqual(list(x), items[lo:hi])
-
-        # The specific test case from Zope collector 419.
-        t.clear()
-        for i in xrange(100):
-            t[i] = 1
-        tslice = t.items()[20:80]
-        self.assertEqual(len(tslice), 60)
-        self.assertEqual(list(tslice), zip(range(20, 80), [1]*60))
-
-    def testIterators(self):
-        t = self.t
-
-        for keys in [], [-2], [1, 4], range(-170, 2000, 6):
-            t.clear()
-            for k in keys:
-                t[k] = -3 * k
-
-            self.assertEqual(list(t), keys)
-
-            x = []
-            for k in t:
-                x.append(k)
-            self.assertEqual(x, keys)
-
-            it = iter(t)
-            self.assert_(it is iter(it))
-            x = []
-            try:
-                while 1:
-                    x.append(it.next())
-            except StopIteration:
-                pass
-            self.assertEqual(x, keys)
-
-            self.assertEqual(list(t.iterkeys()), keys)
-            self.assertEqual(list(t.itervalues()), list(t.values()))
-            self.assertEqual(list(t.iteritems()), list(t.items()))
-
-    def testRangedIterators(self):
-        t = self.t
-
-        for keys in [], [-2], [1, 4], range(-170, 2000, 13):
-            t.clear()
-            values = []
-            for k in keys:
-                value = -3 * k
-                t[k] = value
-                values.append(value)
-            items = zip(keys, values)
-
-            self.assertEqual(list(t.iterkeys()), keys)
-            self.assertEqual(list(t.itervalues()), values)
-            self.assertEqual(list(t.iteritems()), items)
-
-            if not keys:
-                continue
-
-            min_mid_max = (keys[0], keys[len(keys) >> 1], keys[-1])
-            for key1 in min_mid_max:
-                for lo in range(key1 - 1, key1 + 2):
-                    # Test one-sided range iterators.
-                    goodkeys = [k for k in keys if lo <= k]
-                    got = t.iterkeys(lo)
-                    self.assertEqual(goodkeys, list(got))
-
-                    goodvalues = [t[k] for k in goodkeys]
-                    got = t.itervalues(lo)
-                    self.assertEqual(goodvalues, list(got))
-
-                    gooditems = zip(goodkeys, goodvalues)
-                    got = t.iteritems(lo)
-                    self.assertEqual(gooditems, list(got))
-
-                    for key2 in min_mid_max:
-                        for hi in range(key2 - 1, key2 + 2):
-                            goodkeys = [k for k in keys if lo <= k <= hi]
-                            got = t.iterkeys(min=lo, max=hi)
-                            self.assertEqual(goodkeys, list(got))
-
-                            goodvalues = [t[k] for k in goodkeys]
-                            got = t.itervalues(lo, max=hi)
-                            self.assertEqual(goodvalues, list(got))
-
-                            gooditems = zip(goodkeys, goodvalues)
-                            got = t.iteritems(max=hi, min=lo)
-                            self.assertEqual(gooditems, list(got))
-
-    def testBadUpdateTupleSize(self):
-        # This one silently ignored the excess in Zope3.
-        try:
-            self.t.update([(1, 2, 3)])
-        except TypeError:
-            pass
-        else:
-            self.fail("update() with 3-tuple didn't complain")
-
-        # This one dumped core in Zope3.
-        try:
-            self.t.update([(1,)])
-        except TypeError:
-            pass
-        else:
-            self.fail("update() with 1-tuple didn't complain")
-
-        # This one should simply succeed.
-        self.t.update([(1, 2)])
-        self.assertEqual(list(self.t.items()), [(1, 2)])
-
-    def testSimpleExclusivRanges(self):
-        def identity(x):
-            return x
-        def dup(x):
-            return [(y, y) for y in x]
-
-        for methodname, f in (("keys", identity),
-                              ("values", identity),
-                              ("items", dup),
-                              ("iterkeys", identity),
-                              ("itervalues", identity),
-                              ("iteritems", dup)):
-
-            t = self.t.__class__()
-            meth = getattr(t, methodname, None)
-            if meth is None:
-                continue
-
-            self.assertEqual(list(meth()), [])
-            self.assertEqual(list(meth(excludemin=True)), [])
-            self.assertEqual(list(meth(excludemax=True)), [])
-            self.assertEqual(list(meth(excludemin=True, excludemax=True)), [])
-
-            self._populate(t, 1)
-            self.assertEqual(list(meth()), f([0]))
-            self.assertEqual(list(meth(excludemin=True)), [])
-            self.assertEqual(list(meth(excludemax=True)), [])
-            self.assertEqual(list(meth(excludemin=True, excludemax=True)), [])
-
-            t.clear()
-            self._populate(t, 2)
-            self.assertEqual(list(meth()), f([0, 1]))
-            self.assertEqual(list(meth(excludemin=True)), f([1]))
-            self.assertEqual(list(meth(excludemax=True)), f([0]))
-            self.assertEqual(list(meth(excludemin=True, excludemax=True)), [])
-
-            t.clear()
-            self._populate(t, 3)
-            self.assertEqual(list(meth()), f([0, 1, 2]))
-            self.assertEqual(list(meth(excludemin=True)), f([1, 2]))
-            self.assertEqual(list(meth(excludemax=True)), f([0, 1]))
-            self.assertEqual(list(meth(excludemin=True, excludemax=True)),
-                            f([1]))
-            self.assertEqual(list(meth(-1, 3, excludemin=True,
-                                       excludemax=True)),
-                             f([0, 1, 2]))
-            self.assertEqual(list(meth(0, 3, excludemin=True,
-                                       excludemax=True)),
-                             f([1, 2]))
-            self.assertEqual(list(meth(-1, 2, excludemin=True,
-                                       excludemax=True)),
-                             f([0, 1]))
-            self.assertEqual(list(meth(0, 2, excludemin=True,
-                                       excludemax=True)),
-                             f([1]))
-
-class NormalSetTests(Base):
-    """ Test common to all set types """
-
-    def _populate(self, t, l):
-        # Make some data
-        t.update(range(l))
-
-    def testInsertReturnsValue(self):
-        t = self.t
-        self.assertEqual(t.insert(5) , 1)
-
-    def testDuplicateInsert(self):
-        t = self.t
-        t.insert(5)
-        self.assertEqual(t.insert(5) , 0)
-
-    def testInsert(self):
-        t = self.t
-        t.insert(1)
-        self.assert_(t.has_key(1))
-        self.assert_(1 in t)
-        self.assert_(2 not in t)
-
-    def testBigInsert(self):
-        t = self.t
-        r = xrange(10000)
-        for x in r:
-            t.insert(x)
-        for x in r:
-            self.assert_(t.has_key(x))
-            self.assert_(x in t)
-
-    def testRemoveSucceeds(self):
-        t = self.t
-        r = xrange(10000)
-        for x in r: t.insert(x)
-        for x in r: t.remove(x)
-
-    def testRemoveFails(self):
-        self.assertRaises(KeyError, self._removenonexistent)
-
-    def _removenonexistent(self):
-        self.t.remove(1)
-
-    def testHasKeyFails(self):
-        t = self.t
-        self.assert_(not t.has_key(1))
-        self.assert_(1 not in t)
-
-    def testKeys(self):
-        t = self.t
-        r = xrange(1000)
-        for x in r:
-            t.insert(x)
-        diff = lsubtract(t.keys(), r)
-        self.assertEqual(diff, [])
-
-
-    def testClear(self):
-        t = self.t
-        r = xrange(1000)
-        for x in r: t.insert(x)
-        t.clear()
-        diff = lsubtract(t.keys(), [])
-        self.assertEqual(diff , [], diff)
-
-    def testMaxKeyMinKey(self):
-        t = self.t
-        t.insert(1)
-        t.insert(2)
-        t.insert(3)
-        t.insert(8)
-        t.insert(5)
-        t.insert(10)
-        t.insert(6)
-        t.insert(4)
-        self.assertEqual(t.maxKey() , 10)
-        self.assertEqual(t.maxKey(6) , 6)
-        self.assertEqual(t.maxKey(9) , 8)
-        self.assertEqual(t.minKey() , 1)
-        self.assertEqual(t.minKey(3) , 3)
-        self.assertEqual(t.minKey(9) , 10)
-        self.assert_(t.minKey() in t)
-        self.assert_(t.minKey()-1 not in t)
-        self.assert_(t.maxKey() in t)
-        self.assert_(t.maxKey()+1 not in t)
-
-    def testUpdate(self):
-        d={}
-        l=[]
-        for i in range(10000):
-            k=random.randrange(-2000, 2001)
-            d[k]=i
-            l.append(k)
-
-        items = d.keys()
-        items.sort()
-
-        self.t.update(l)
-        self.assertEqual(list(self.t.keys()), items)
-
-    def testEmptyRangeSearches(self):
-        t = self.t
-        t.update([1, 5, 9])
-        self.assertEqual(list(t.keys(-6,-4)), [], list(t.keys(-6,-4)))
-        self.assertEqual(list(t.keys(2,4)), [], list(t.keys(2,4)))
-        self.assertEqual(list(t.keys(6,8)), [], list(t.keys(6,8)))
-        self.assertEqual(list(t.keys(10,12)), [], list(t.keys(10,12)))
-        self.assertEqual(list(t.keys(9,1)), [], list(t.keys(9,1)))
-
-        # For IITreeSets, this one was returning 31 for len(keys), and
-        # list(keys) produced a list with 100 elements.
-        t.clear()
-        t.update(range(300))
-        keys = t.keys(200, 50)
-        self.assertEqual(len(keys), 0)
-        self.assertEqual(list(keys), [])
-
-        keys = t.keys(max=50, min=200)
-        self.assertEqual(len(keys), 0)
-        self.assertEqual(list(keys), [])
-
-    def testSlicing(self):
-        # Test that slicing of .keys() works exactly the same way as slicing
-        # a Python list with the same contents.
-
-        t = self.t
-        for n in range(10):
-            t.clear()
-            self.assertEqual(len(t), 0)
-
-            keys = range(10*n, 11*n)
-            t.update(keys)
-            self.assertEqual(len(t), n)
-
-            kslice = t.keys()
-            self.assertEqual(len(kslice), n)
-
-            # Test whole-structure slices.
-            x = kslice[:]
-            self.assertEqual(list(x), keys[:])
-
-            for lo in range(-2*n, 2*n+1):
-                # Test one-sided slices.
-                x = kslice[:lo]
-                self.assertEqual(list(x), keys[:lo])
-                x = kslice[lo:]
-                self.assertEqual(list(x), keys[lo:])
-
-                for hi in range(-2*n, 2*n+1):
-                    # Test two-sided slices.
-                    x = kslice[lo:hi]
-                    self.assertEqual(list(x), keys[lo:hi])
-
-    def testIterator(self):
-        t = self.t
-
-        for keys in [], [-2], [1, 4], range(-170, 2000, 6):
-            t.clear()
-            t.update(keys)
-
-            self.assertEqual(list(t), keys)
-
-            x = []
-            for k in t:
-                x.append(k)
-            self.assertEqual(x, keys)
-
-            it = iter(t)
-            self.assert_(it is iter(it))
-            x = []
-            try:
-                while 1:
-                    x.append(it.next())
-            except StopIteration:
-                pass
-            self.assertEqual(x, keys)
-
-class ExtendedSetTests(NormalSetTests):
-    def testLen(self):
-        t = self.t
-        r = xrange(10000)
-        for x in r: t.insert(x)
-        self.assertEqual(len(t) , 10000, len(t))
-
-    def testGetItem(self):
-        t = self.t
-        r = xrange(10000)
-        for x in r: t.insert(x)
-        for x in r:
-            self.assertEqual(t[x] , x)
-
-class BTreeTests(MappingBase):
-    """ Tests common to all BTrees """
-
-    def tearDown(self):
-        self.t._check()
-        check(self.t)
-        MappingBase.tearDown(self)
-
-    def testDeleteNoChildrenWorks(self):
-        self.t[5] = 6
-        self.t[2] = 10
-        self.t[6] = 12
-        self.t[1] = 100
-        self.t[3] = 200
-        self.t[10] = 500
-        self.t[4] = 99
-        del self.t[4]
-        diff = lsubtract(self.t.keys(), [1,2,3,5,6,10])
-        self.assertEqual(diff , [], diff)
-
-    def testDeleteOneChildWorks(self):
-        self.t[5] = 6
-        self.t[2] = 10
-        self.t[6] = 12
-        self.t[1] = 100
-        self.t[3] = 200
-        self.t[10] = 500
-        self.t[4] = 99
-        del self.t[3]
-        diff = lsubtract(self.t.keys(), [1,2,4,5,6,10])
-        self.assertEqual(diff , [], diff)
-
-    def testDeleteTwoChildrenNoInorderSuccessorWorks(self):
-        self.t[5] = 6
-        self.t[2] = 10
-        self.t[6] = 12
-        self.t[1] = 100
-        self.t[3] = 200
-        self.t[10] = 500
-        self.t[4] = 99
-        del self.t[2]
-        diff = lsubtract(self.t.keys(), [1,3,4,5,6,10])
-        self.assertEqual(diff , [], diff)
-
-    def testDeleteTwoChildrenInorderSuccessorWorks(self):
-        # 7, 3, 8, 1, 5, 10, 6, 4 -- del 3
-        self.t[7] = 6
-        self.t[3] = 10
-        self.t[8] = 12
-        self.t[1] = 100
-        self.t[5] = 200
-        self.t[10] = 500
-        self.t[6] = 99
-        self.t[4] = 150
-        del self.t[3]
-        diff = lsubtract(self.t.keys(), [1,4,5,6,7,8,10])
-        self.assertEqual(diff , [], diff)
-
-    def testDeleteRootWorks(self):
-        # 7, 3, 8, 1, 5, 10, 6, 4 -- del 7
-        self.t[7] = 6
-        self.t[3] = 10
-        self.t[8] = 12
-        self.t[1] = 100
-        self.t[5] = 200
-        self.t[10] = 500
-        self.t[6] = 99
-        self.t[4] = 150
-        del self.t[7]
-        diff = lsubtract(self.t.keys(), [1,3,4,5,6,8,10])
-        self.assertEqual(diff , [], diff)
-
-    def testRandomNonOverlappingInserts(self):
-        added = {}
-        r = range(100)
-        for x in r:
-            k = random.choice(r)
-            if not added.has_key(k):
-                self.t[k] = x
-                added[k] = 1
-        addl = added.keys()
-        addl.sort()
-        diff = lsubtract(list(self.t.keys()), addl)
-        self.assertEqual(diff , [], (diff, addl, list(self.t.keys())))
-
-    def testRandomOverlappingInserts(self):
-        added = {}
-        r = range(100)
-        for x in r:
-            k = random.choice(r)
-            self.t[k] = x
-            added[k] = 1
-        addl = added.keys()
-        addl.sort()
-        diff = lsubtract(self.t.keys(), addl)
-        self.assertEqual(diff , [], diff)
-
-    def testRandomDeletes(self):
-        r = range(1000)
-        added = []
-        for x in r:
-            k = random.choice(r)
-            self.t[k] = x
-            added.append(k)
-        deleted = []
-        for x in r:
-            k = random.choice(r)
-            if self.t.has_key(k):
-                self.assert_(k in self.t)
-                del self.t[k]
-                deleted.append(k)
-                if self.t.has_key(k):
-                    self.fail( "had problems deleting %s" % k )
-        badones = []
-        for x in deleted:
-            if self.t.has_key(x):
-                badones.append(x)
-        self.assertEqual(badones , [], (badones, added, deleted))
-
-    def testTargetedDeletes(self):
-        r = range(1000)
-        for x in r:
-            k = random.choice(r)
-            self.t[k] = x
-        for x in r:
-            try:
-                del self.t[x]
-            except KeyError:
-                pass
-        self.assertEqual(realseq(self.t.keys()) , [], realseq(self.t.keys()))
-
-    def testPathologicalRightBranching(self):
-        r = range(1000)
-        for x in r:
-            self.t[x] = 1
-        self.assertEqual(realseq(self.t.keys()) , r, realseq(self.t.keys()))
-        for x in r:
-            del self.t[x]
-        self.assertEqual(realseq(self.t.keys()) , [], realseq(self.t.keys()))
-
-    def testPathologicalLeftBranching(self):
-        r = range(1000)
-        revr = r[:]
-        revr.reverse()
-        for x in revr:
-            self.t[x] = 1
-        self.assertEqual(realseq(self.t.keys()) , r, realseq(self.t.keys()))
-
-        for x in revr:
-            del self.t[x]
-        self.assertEqual(realseq(self.t.keys()) , [], realseq(self.t.keys()))
-
-    def testSuccessorChildParentRewriteExerciseCase(self):
-        add_order = [
-            85, 73, 165, 273, 215, 142, 233, 67, 86, 166, 235, 225, 255,
-            73, 175, 171, 285, 162, 108, 28, 283, 258, 232, 199, 260,
-            298, 275, 44, 261, 291, 4, 181, 285, 289, 216, 212, 129,
-            243, 97, 48, 48, 159, 22, 285, 92, 110, 27, 55, 202, 294,
-            113, 251, 193, 290, 55, 58, 239, 71, 4, 75, 129, 91, 111,
-            271, 101, 289, 194, 218, 77, 142, 94, 100, 115, 101, 226,
-            17, 94, 56, 18, 163, 93, 199, 286, 213, 126, 240, 245, 190,
-            195, 204, 100, 199, 161, 292, 202, 48, 165, 6, 173, 40, 218,
-            271, 228, 7, 166, 173, 138, 93, 22, 140, 41, 234, 17, 249,
-            215, 12, 292, 246, 272, 260, 140, 58, 2, 91, 246, 189, 116,
-            72, 259, 34, 120, 263, 168, 298, 118, 18, 28, 299, 192, 252,
-            112, 60, 277, 273, 286, 15, 263, 141, 241, 172, 255, 52, 89,
-            127, 119, 255, 184, 213, 44, 116, 231, 173, 298, 178, 196,
-            89, 184, 289, 98, 216, 115, 35, 132, 278, 238, 20, 241, 128,
-            179, 159, 107, 206, 194, 31, 260, 122, 56, 144, 118, 283,
-            183, 215, 214, 87, 33, 205, 183, 212, 221, 216, 296, 40,
-            108, 45, 188, 139, 38, 256, 276, 114, 270, 112, 214, 191,
-            147, 111, 299, 107, 101, 43, 84, 127, 67, 205, 251, 38, 91,
-            297, 26, 165, 187, 19, 6, 73, 4, 176, 195, 90, 71, 30, 82,
-            139, 210, 8, 41, 253, 127, 190, 102, 280, 26, 233, 32, 257,
-            194, 263, 203, 190, 111, 218, 199, 29, 81, 207, 18, 180,
-            157, 172, 192, 135, 163, 275, 74, 296, 298, 265, 105, 191,
-            282, 277, 83, 188, 144, 259, 6, 173, 81, 107, 292, 231,
-            129, 65, 161, 113, 103, 136, 255, 285, 289, 1
-            ]
-        delete_order = [
-            276, 273, 12, 275, 2, 286, 127, 83, 92, 33, 101, 195,
-            299, 191, 22, 232, 291, 226, 110, 94, 257, 233, 215, 184,
-            35, 178, 18, 74, 296, 210, 298, 81, 265, 175, 116, 261,
-            212, 277, 260, 234, 6, 129, 31, 4, 235, 249, 34, 289, 105,
-            259, 91, 93, 119, 7, 183, 240, 41, 253, 290, 136, 75, 292,
-            67, 112, 111, 256, 163, 38, 126, 139, 98, 56, 282, 60, 26,
-            55, 245, 225, 32, 52, 40, 271, 29, 252, 239, 89, 87, 205,
-            213, 180, 97, 108, 120, 218, 44, 187, 196, 251, 202, 203,
-            172, 28, 188, 77, 90, 199, 297, 282, 141, 100, 161, 216,
-            73, 19, 17, 189, 30, 258
-            ]
-        for x in add_order:
-            self.t[x] = 1
-        for x in delete_order:
-            try: del self.t[x]
-            except KeyError:
-                if self.t.has_key(x): self.assertEqual(1,2,"failed to delete %s" % x)
-
-    def testRangeSearchAfterSequentialInsert(self):
-        r = range(100)
-        for x in r:
-            self.t[x] = 0
-        diff = lsubtract(list(self.t.keys(0, 100)), r)
-        self.assertEqual(diff , [], diff)
-
-    def testRangeSearchAfterRandomInsert(self):
-        r = range(100)
-        a = {}
-        for x in r:
-            rnd = random.choice(r)
-            self.t[rnd] = 0
-            a[rnd] = 0
-        diff = lsubtract(list(self.t.keys(0, 100)), a.keys())
-        self.assertEqual(diff , [], diff)
-
-    def testPathologicalRangeSearch(self):
-        t = self.t
-        # Build a 2-level tree with at least two buckets.
-        for i in range(200):
-            t[i] = i
-        items, dummy = t.__getstate__()
-        self.assert_(len(items) > 2)   # at least two buckets and a key
-        # All values in the first bucket are < firstkey.  All in the
-        # second bucket are >= firstkey, and firstkey is the first key in
-        # the second bucket.
-        firstkey = items[1]
-        therange = t.keys(-1, firstkey)
-        self.assertEqual(len(therange), firstkey + 1)
-        self.assertEqual(list(therange), range(firstkey + 1))
-        # Now for the tricky part.  If we delete firstkey, the second bucket
-        # loses its smallest key, but firstkey remains in the BTree node.
-        # If we then do a high-end range search on firstkey, the BTree node
-        # directs us to look in the second bucket, but there's no longer any
-        # key <= firstkey in that bucket.  The correct answer points to the
-        # end of the *first* bucket.  The algorithm has to be smart enough
-        # to "go backwards" in the BTree then; if it doesn't, it will
-        # erroneously claim that the range is empty.
-        del t[firstkey]
-        therange = t.keys(min=-1, max=firstkey)
-        self.assertEqual(len(therange), firstkey)
-        self.assertEqual(list(therange), range(firstkey))
-
-    def testInsertMethod(self):
-        t = self.t
-        t[0] = 1
-        self.assertEqual(t.insert(0, 1) , 0)
-        self.assertEqual(t.insert(1, 1) , 1)
-        self.assertEqual(lsubtract(list(t.keys()), [0,1]) , [])
-
-    def testDamagedIterator(self):
-        # A cute one from Steve Alexander.  This caused the BTreeItems
-        # object to go insane, accessing memory beyond the allocated part
-        # of the bucket.  If it fails, the symptom is either a C-level
-        # assertion error (if the BTree code was compiled without NDEBUG),
-        # or most likely a segfault (if the BTree code was compiled with
-        # NDEBUG).
-
-        t = self.t.__class__()
-        self._populate(t, 10)
-        # In order for this to fail, it's important that k be a "lazy"
-        # iterator, referring to the BTree by indirect position (index)
-        # instead of a fully materialized list.  Then the position can
-        # end up pointing into trash memory, if the bucket pointed to
-        # shrinks.
-        k = t.keys()
-        for dummy in range(20):
-            try:
-                del t[k[0]]
-            except RuntimeError, detail:
-                self.assertEqual(str(detail), "the bucket being iterated "
-                                              "changed size")
-                break
-
-# tests of various type errors
-
-class TypeTest(TestCase):
-
-    def testBadTypeRaises(self):
-        self.assertRaises(TypeError, self._stringraises)
-        self.assertRaises(TypeError, self._floatraises)
-        self.assertRaises(TypeError, self._noneraises)
-
-class TestIOBTrees(TypeTest):
-    def setUp(self):
-        self.t = IOBTree()
-
-    def _stringraises(self):
-        self.t['c'] = 1
-
-    def _floatraises(self):
-        self.t[2.5] = 1
-
-    def _noneraises(self):
-        self.t[None] = 1
-
-class TestOIBTrees(TypeTest):
-    def setUp(self):
-        self.t = OIBTree()
-
-    def _stringraises(self):
-        self.t[1] = 'c'
-
-    def _floatraises(self):
-        self.t[1] = 1.4
-
-    def _noneraises(self):
-        self.t[1] = None
-
-    def testEmptyFirstBucketReportedByGuido(self):
-        b = self.t
-        for i in xrange(29972): # reduce to 29971 and it works
-            b[i] = i
-        for i in xrange(30): # reduce to 29 and it works
-            del b[i]
-            b[i+40000] = i
-
-        self.assertEqual(b.keys()[0], 30)
-
-class TestIIBTrees(TestCase):
-    def setUp(self):
-        self.t = IIBTree()
-
-    def testNonIntegerKeyRaises(self):
-        self.assertRaises(TypeError, self._stringraiseskey)
-        self.assertRaises(TypeError, self._floatraiseskey)
-        self.assertRaises(TypeError, self._noneraiseskey)
-
-    def testNonIntegerValueRaises(self):
-        self.assertRaises(TypeError, self._stringraisesvalue)
-        self.assertRaises(TypeError, self._floatraisesvalue)
-        self.assertRaises(TypeError, self._noneraisesvalue)
-
-    def _stringraiseskey(self):
-        self.t['c'] = 1
-
-    def _floatraiseskey(self):
-        self.t[2.5] = 1
-
-    def _noneraiseskey(self):
-        self.t[None] = 1
-
-    def _stringraisesvalue(self):
-        self.t[1] = 'c'
-
-    def _floatraisesvalue(self):
-        self.t[1] = 1.4
-
-    def _noneraisesvalue(self):
-        self.t[1] = None
-
-class TestIFBTrees(TestCase):
-    def setUp(self):
-        self.t = IFBTree()
-
-    def testNonIntegerKeyRaises(self):
-        self.assertRaises(TypeError, self._stringraiseskey)
-        self.assertRaises(TypeError, self._floatraiseskey)
-        self.assertRaises(TypeError, self._noneraiseskey)
-
-    def testNonNumericValueRaises(self):
-        self.assertRaises(TypeError, self._stringraisesvalue)
-        self.assertRaises(TypeError, self._noneraisesvalue)
-        self.t[1] = 1
-        self.t[1] = 1.0
-
-    def _stringraiseskey(self):
-        self.t['c'] = 1
-
-    def _floatraiseskey(self):
-        self.t[2.5] = 1
-
-    def _noneraiseskey(self):
-        self.t[None] = 1
-
-    def _stringraisesvalue(self):
-        self.t[1] = 'c'
-
-    def _floatraisesvalue(self):
-        self.t[1] = 1.4
-
-    def _noneraisesvalue(self):
-        self.t[1] = None
-
-class TestIOSets(TestCase):
-    def setUp(self):
-        self.t = IOSet()
-
-    def testNonIntegerInsertRaises(self):
-        self.assertRaises(TypeError,self._insertstringraises)
-        self.assertRaises(TypeError,self._insertfloatraises)
-        self.assertRaises(TypeError,self._insertnoneraises)
-
-    def _insertstringraises(self):
-        self.t.insert('a')
-
-    def _insertfloatraises(self):
-        self.t.insert(1.4)
-
-    def _insertnoneraises(self):
-        self.t.insert(None)
-
-class DegenerateBTree(TestCase):
-    # Build a degenerate tree (set).  Boxes are BTree nodes.  There are
-    # 5 leaf buckets, each containing a single int.  Keys in the BTree
-    # nodes don't appear in the buckets.  Seven BTree nodes are purely
-    # indirection nodes (no keys).  Buckets aren't all at the same depth:
-    #
-    #     +------------------------+
-    #     |          4             |
-    #     +------------------------+
-    #         |              |
-    #         |              v
-    #         |             +-+
-    #         |             | |
-    #         |             +-+
-    #         |              |
-    #         v              v
-    #     +-------+   +-------------+
-    #     |   2   |   |   6     10  |
-    #     +-------+   +-------------+
-    #      |     |     |     |     |
-    #      v     v     v     v     v
-    #     +-+   +-+   +-+   +-+   +-+
-    #     | |   | |   | |   | |   | |
-    #     +-+   +-+   +-+   +-+   +-+
-    #      |     |     |     |     |
-    #      v     v     v     v     v
-    #      1     3    +-+    7     11
-    #                 | |
-    #                 +-+
-    #                  |
-    #                  v
-    #                  5
-    #
-    # This is nasty for many algorithms.  Consider a high-end range search
-    # for 4.  The BTree nodes direct it to the 5 bucket, but the correct
-    # answer is the 3 bucket, which requires going in a different direction
-    # at the very top node already.  Consider a low-end range search for
-    # 9.  The BTree nodes direct it to the 7 bucket, but the correct answer
-    # is the 11 bucket.  This is also a nasty-case tree for deletions.
-
-    def _build_degenerate_tree(self):
-        # Build the buckets and chain them together.
-        bucket11 = IISet([11])
-
-        bucket7 = IISet()
-        bucket7.__setstate__(((7,), bucket11))
-
-        bucket5 = IISet()
-        bucket5.__setstate__(((5,), bucket7))
-
-        bucket3 = IISet()
-        bucket3.__setstate__(((3,), bucket5))
-
-        bucket1 = IISet()
-        bucket1.__setstate__(((1,), bucket3))
-
-        # Build the deepest layers of indirection nodes.
-        ts = IITreeSet
-        tree1 = ts()
-        tree1.__setstate__(((bucket1,), bucket1))
-
-        tree3 = ts()
-        tree3.__setstate__(((bucket3,), bucket3))
-
-        tree5lower = ts()
-        tree5lower.__setstate__(((bucket5,), bucket5))
-        tree5 = ts()
-        tree5.__setstate__(((tree5lower,), bucket5))
-
-        tree7 = ts()
-        tree7.__setstate__(((bucket7,), bucket7))
-
-        tree11 = ts()
-        tree11.__setstate__(((bucket11,), bucket11))
-
-        # Paste together the middle layers.
-        tree13 = ts()
-        tree13.__setstate__(((tree1, 2, tree3), bucket1))
-
-        tree5711lower = ts()
-        tree5711lower.__setstate__(((tree5, 6, tree7, 10, tree11), bucket5))
-        tree5711 = ts()
-        tree5711.__setstate__(((tree5711lower,), bucket5))
-
-        # One more.
-        t = ts()
-        t.__setstate__(((tree13, 4, tree5711), bucket1))
-        t._check()
-        check(t)
-        return t, [1, 3, 5, 7, 11]
-
-    def testBasicOps(self):
-        t, keys = self._build_degenerate_tree()
-        self.assertEqual(len(t), len(keys))
-        self.assertEqual(list(t.keys()), keys)
-        # has_key actually returns the depth of a bucket.
-        self.assertEqual(t.has_key(1), 4)
-        self.assertEqual(t.has_key(3), 4)
-        self.assertEqual(t.has_key(5), 6)
-        self.assertEqual(t.has_key(7), 5)
-        self.assertEqual(t.has_key(11), 5)
-        for i in 0, 2, 4, 6, 8, 9, 10, 12:
-            self.assert_(i not in t)
-
-    def _checkRanges(self, tree, keys):
-        self.assertEqual(len(tree), len(keys))
-        sorted_keys = keys[:]
-        sorted_keys.sort()
-        self.assertEqual(list(tree.keys()), sorted_keys)
-        for k in keys:
-            self.assert_(k in tree)
-        if keys:
-            lokey = sorted_keys[0]
-            hikey = sorted_keys[-1]
-            self.assertEqual(lokey, tree.minKey())
-            self.assertEqual(hikey, tree.maxKey())
-        else:
-            lokey = hikey = 42
-
-        # Try all range searches.
-        for lo in range(lokey - 1, hikey + 2):
-            for hi in range(lo - 1, hikey + 2):
-                for skipmin in False, True:
-                    for skipmax in False, True:
-                        wantlo, wanthi = lo, hi
-                        if skipmin:
-                            wantlo += 1
-                        if skipmax:
-                            wanthi -= 1
-                        want = [k for k in keys if wantlo <= k <= wanthi]
-                        got = list(tree.keys(lo, hi, skipmin, skipmax))
-                        self.assertEqual(want, got)
-
-    def testRanges(self):
-        t, keys = self._build_degenerate_tree()
-        self._checkRanges(t, keys)
-
-    def testDeletes(self):
-        # Delete keys in all possible orders, checking each tree along
-        # the way.
-
-        # This is a tough test.  Previous failure modes included:
-        # 1. A variety of assertion failures in _checkRanges.
-        # 2. Assorted "Invalid firstbucket pointer" failures at
-        #    seemingly random times, coming out of the BTree destructor.
-        # 3. Under Python 2.3 CVS, some baffling
-        #      RuntimeWarning: tp_compare didn't return -1 or -2 for exception
-        #    warnings, possibly due to memory corruption after a BTree
-        #    goes insane.
-
-        t, keys = self._build_degenerate_tree()
-        for oneperm in permutations(keys):
-            t, keys = self._build_degenerate_tree()
-            for key in oneperm:
-                t.remove(key)
-                keys.remove(key)
-                t._check()
-                check(t)
-                self._checkRanges(t, keys)
-            # We removed all the keys, so the tree should be empty now.
-            self.assertEqual(t.__getstate__(), None)
-
-            # A damaged tree may trigger an "invalid firstbucket pointer"
-            # failure at the time its destructor is invoked.  Try to force
-            # that to happen now, so it doesn't look like a baffling failure
-            # at some unrelated line.
-            del t   # trigger destructor
-
-class IIBucketTest(MappingBase):
-    def setUp(self):
-        self.t = IIBucket()
-class IFBucketTest(MappingBase):
-    def setUp(self):
-        self.t = IFBucket()
-class IOBucketTest(MappingBase):
-    def setUp(self):
-        self.t = IOBucket()
-class OIBucketTest(MappingBase):
-    def setUp(self):
-        self.t = OIBucket()
-class OOBucketTest(MappingBase):
-    def setUp(self):
-        self.t = OOBucket()
-
-class IITreeSetTest(NormalSetTests):
-    def setUp(self):
-        self.t = IITreeSet()
-class IFTreeSetTest(NormalSetTests):
-    def setUp(self):
-        self.t = IFTreeSet()
-class IOTreeSetTest(NormalSetTests):
-    def setUp(self):
-        self.t = IOTreeSet()
-class OITreeSetTest(NormalSetTests):
-    def setUp(self):
-        self.t = OITreeSet()
-class OOTreeSetTest(NormalSetTests):
-    def setUp(self):
-        self.t = OOTreeSet()
-
-class IISetTest(ExtendedSetTests):
-    def setUp(self):
-        self.t = IISet()
-class IFSetTest(ExtendedSetTests):
-    def setUp(self):
-        self.t = IFSet()
-class IOSetTest(ExtendedSetTests):
-    def setUp(self):
-        self.t = IOSet()
-class OISetTest(ExtendedSetTests):
-    def setUp(self):
-        self.t = OISet()
-class OOSetTest(ExtendedSetTests):
-    def setUp(self):
-        self.t = OOSet()
-
-class IIBTreeTest(BTreeTests):
-    def setUp(self):
-        self.t = IIBTree()
-class IFBTreeTest(BTreeTests):
-    def setUp(self):
-        self.t = IFBTree()
-class IOBTreeTest(BTreeTests):
-    def setUp(self):
-        self.t = IOBTree()
-class OIBTreeTest(BTreeTests):
-    def setUp(self):
-        self.t = OIBTree()
-class OOBTreeTest(BTreeTests):
-    def setUp(self):
-        self.t = OOBTree()
-
-# cmp error propagation tests
-
-class DoesntLikeBeingCompared:
-    def __cmp__(self,other):
-        raise ValueError('incomparable')
-
-class TestCmpError(TestCase):
-    def testFoo(self):
-        t = OOBTree()
-        t['hello world'] = None
-        try:
-            t[DoesntLikeBeingCompared()] = None
-        except ValueError,e:
-            self.assertEqual(str(e), 'incomparable')
-        else:
-            self.fail('incomarable objects should not be allowed into '
-                      'the tree')
-
-def test_suite():
-    s = TestSuite()
-
-    for klass in (
-        IIBucketTest, IIBTreeTest, IISetTest, IITreeSetTest,
-        IFBucketTest, IFBTreeTest, IFSetTest, IFTreeSetTest,
-        IOBucketTest, IOBTreeTest, IOSetTest, IOTreeSetTest,
-        OOBucketTest, OOBTreeTest, OOSetTest, OOTreeSetTest,
-        OIBucketTest, OIBTreeTest, OISetTest, OITreeSetTest,
-
-        # Note:  there is no TestOOBTrees.  The next three are
-        # checking for assorted TypeErrors, and when both keys
-        # and values oare objects (OO), there's nothing to test.
-        TestIIBTrees, TestIFBTrees,  TestIOBTrees,  TestOIBTrees,
-        TestIOSets,
-        DegenerateBTree,
-        TestCmpError):
-        s.addTest(makeSuite(klass))
-
-    return s
-
-## utility functions
-
-def lsubtract(l1, l2):
-    l1 = list(l1)
-    l2 = list(l2)
-    l = filter(lambda x, l1=l1: x not in l1, l2)
-    l = l + filter(lambda x, l2=l2: x not in l2, l1)
-    return l
-
-def realseq(itemsob):
-    return [x for x in itemsob]
-
-def permutations(x):
-    # Return a list of all permutations of list x.
-    n = len(x)
-    if n <= 1:
-        return [x]
-    result = []
-    x0 = x[0]
-    for i in range(n):
-        # Build the (n-1)! permutations with x[i] in the first position.
-        xcopy = x[:]
-        first, xcopy[i] = xcopy[i], x0
-        result.extend([[first] + p for p in permutations(xcopy[1:])])
-    return result
-
-
-def main():
-    TextTestRunner().run(test_suite())
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/BTrees/tests/testBTreesUnicode.py b/branches/bug1734/src/BTrees/tests/testBTreesUnicode.py
deleted file mode 100644
index 1d27dbee..00000000
--- a/branches/bug1734/src/BTrees/tests/testBTreesUnicode.py
+++ /dev/null
@@ -1,76 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import unittest
-from BTrees.OOBTree import OOBTree
-
-# When an OOBtree contains unicode strings as keys,
-# it is neccessary accessing non-unicode strings are
-# either ascii strings or encoded as unicoded using the
-# corresponding encoding
-
-encoding = 'ISO-8859-1'
-
-class TestBTreesUnicode(unittest.TestCase):
-    """ test unicode"""
-
-    def setUp(self):
-        """setup an OOBTree with some unicode strings"""
-
-        self.s = unicode('dreit\xe4gigen', 'latin1')
-
-        self.data = [('alien', 1),
-                     ('k\xf6nnten', 2),
-                     ('fox', 3),
-                     ('future', 4),
-                     ('quick', 5),
-                     ('zerst\xf6rt', 6),
-                     (unicode('dreit\xe4gigen','latin1'), 7),
-                    ]
-
-        self.tree = OOBTree()
-        for k, v in self.data:
-            if isinstance(k, str):
-                k = unicode(k, 'latin1')
-            self.tree[k] = v
-
-    def testAllKeys(self):
-        # check every item of the tree
-        for k, v in self.data:
-            if isinstance(k, str):
-                k = unicode(k, encoding)
-            self.assert_(self.tree.has_key(k))
-            self.assertEqual(self.tree[k], v)
-
-    def testUnicodeKeys(self):
-        # try to access unicode keys in tree
-        k, v = self.data[-1]
-        self.assertEqual(k, self.s)
-        self.assertEqual(self.tree[k], v)
-        self.assertEqual(self.tree[self.s], v)
-
-    def testAsciiKeys(self):
-        # try to access some "plain ASCII" keys in the tree
-        for k, v in self.data[0], self.data[2]:
-            self.assert_(isinstance(k, str))
-            self.assertEqual(self.tree[k], v)
-
-def test_suite():
-    return unittest.makeSuite(TestBTreesUnicode)
-
-def main():
-    unittest.TextTestRunner().run(test_suite())
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/BTrees/tests/testConflict.py b/branches/bug1734/src/BTrees/tests/testConflict.py
deleted file mode 100644
index 6a598600..00000000
--- a/branches/bug1734/src/BTrees/tests/testConflict.py
+++ /dev/null
@@ -1,857 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import os
-from unittest import TestCase, TestSuite, makeSuite
-
-from BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet
-from BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet
-from BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet
-from BTrees.IFBTree import IFBTree, IFBucket, IFSet, IFTreeSet
-from BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet
-
-import transaction
-from ZODB.POSException import ConflictError
-
-class Base:
-    """ Tests common to all types: sets, buckets, and BTrees """
-
-    storage = None
-
-    def tearDown(self):
-        transaction.abort()
-        del self.t
-        if self.storage is not None:
-            self.storage.close()
-            self.storage.cleanup()
-
-    def openDB(self):
-        from ZODB.FileStorage import FileStorage
-        from ZODB.DB import DB
-        n = 'fs_tmp__%s' % os.getpid()
-        self.storage = FileStorage(n)
-        self.db = DB(self.storage)
-
-class MappingBase(Base):
-    """ Tests common to mappings (buckets, btrees) """
-
-    def _deletefail(self):
-        del self.t[1]
-
-    def _setupConflict(self):
-
-        l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679,
-            3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191,
-            -4067]
-
-
-        e1=[(-1704, 0), (5420, 1), (-239, 2), (4024, 3), (-6984, 4)]
-        e2=[(7745, 0), (4868, 1), (-2548, 2), (-2711, 3), (-3154, 4)]
-
-
-        base=self.t
-        base.update([(i, i*i) for i in l[:20]])
-        b1=base.__class__(base)
-        b2=base.__class__(base)
-        bm=base.__class__(base)
-
-        items=base.items()
-
-        return  base, b1, b2, bm, e1, e2, items
-
-    def testSimpleConflict(self):
-        # Unlike all the other tests, invoke conflict resolution
-        # by committing a transaction and catching a conflict
-        # in the storage.
-        self.openDB()
-
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open().root()
-        copy = r2["t"]
-        list(copy)    # unghostify
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        self.t.update({1:2, 2:3})
-        transaction.commit()
-
-        copy.update({3:4})
-        transaction.commit()
-
-
-    def testMergeDelete(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        del b1[items[0][0]]
-        del b2[items[5][0]]
-        del b1[items[-1][0]]
-        del b2[items[-2][0]]
-        del bm[items[0][0]]
-        del bm[items[5][0]]
-        del bm[items[-1][0]]
-        del bm[items[-2][0]]
-        test_merge(base, b1, b2, bm, 'merge  delete')
-
-    def testMergeDeleteAndUpdate(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        del b1[items[0][0]]
-        b2[items[5][0]]=1
-        del b1[items[-1][0]]
-        b2[items[-2][0]]=2
-        del bm[items[0][0]]
-        bm[items[5][0]]=1
-        del bm[items[-1][0]]
-        bm[items[-2][0]]=2
-        test_merge(base, b1, b2, bm, 'merge update and delete')
-
-    def testMergeUpdate(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1[items[0][0]]=1
-        b2[items[5][0]]=2
-        b1[items[-1][0]]=3
-        b2[items[-2][0]]=4
-        bm[items[0][0]]=1
-        bm[items[5][0]]=2
-        bm[items[-1][0]]=3
-        bm[items[-2][0]]=4
-        test_merge(base, b1, b2, bm, 'merge update')
-
-    def testFailMergeDelete(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        del b1[items[0][0]]
-        del b2[items[0][0]]
-        test_merge(base, b1, b2, bm, 'merge conflicting delete',
-                   should_fail=1)
-
-    def testFailMergeUpdate(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1[items[0][0]]=1
-        b2[items[0][0]]=2
-        test_merge(base, b1, b2, bm, 'merge conflicting update',
-                   should_fail=1)
-
-    def testFailMergeDeleteAndUpdate(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        del b1[items[0][0]]
-        b2[items[0][0]]=-9
-        test_merge(base, b1, b2, bm, 'merge conflicting update and delete',
-                   should_fail=1)
-
-    def testMergeInserts(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1[-99999]=-99999
-        b1[e1[0][0]]=e1[0][1]
-        b2[99999]=99999
-        b2[e1[2][0]]=e1[2][1]
-
-        bm[-99999]=-99999
-        bm[e1[0][0]]=e1[0][1]
-        bm[99999]=99999
-        bm[e1[2][0]]=e1[2][1]
-        test_merge(base, b1, b2, bm, 'merge insert')
-
-    def testMergeInsertsFromEmpty(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        base.clear()
-        b1.clear()
-        b2.clear()
-        bm.clear()
-
-        b1.update(e1)
-        bm.update(e1)
-        b2.update(e2)
-        bm.update(e2)
-
-        test_merge(base, b1, b2, bm, 'merge insert from empty')
-
-    def testFailMergeEmptyAndFill(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1.clear()
-        bm.clear()
-        b2.update(e2)
-        bm.update(e2)
-
-        test_merge(base, b1, b2, bm, 'merge insert from empty', should_fail=1)
-
-    def testMergeEmpty(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1.clear()
-        bm.clear()
-
-        test_merge(base, b1, b2, bm, 'empty one and not other', should_fail=1)
-
-    def testFailMergeInsert(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1[-99999]=-99999
-        b1[e1[0][0]]=e1[0][1]
-        b2[99999]=99999
-        b2[e1[0][0]]=e1[0][1]
-        test_merge(base, b1, b2, bm, 'merge conflicting inserts',
-                   should_fail=1)
-
-class SetTests(Base):
-    "Set (as opposed to TreeSet) specific tests."
-
-    def _setupConflict(self):
-        l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679,
-            3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191,
-            -4067]
-
-        e1=[-1704, 5420, -239, 4024, -6984]
-        e2=[7745, 4868, -2548, -2711, -3154]
-
-
-        base=self.t
-        base.update(l)
-        b1=base.__class__(base)
-        b2=base.__class__(base)
-        bm=base.__class__(base)
-
-        items=base.keys()
-
-        return  base, b1, b2, bm, e1, e2, items
-
-    def testMergeDelete(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1.remove(items[0])
-        b2.remove(items[5])
-        b1.remove(items[-1])
-        b2.remove(items[-2])
-        bm.remove(items[0])
-        bm.remove(items[5])
-        bm.remove(items[-1])
-        bm.remove(items[-2])
-        test_merge(base, b1, b2, bm, 'merge  delete')
-
-    def testFailMergeDelete(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1.remove(items[0])
-        b2.remove(items[0])
-        test_merge(base, b1, b2, bm, 'merge conflicting delete',
-                   should_fail=1)
-
-    def testMergeInserts(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1.insert(-99999)
-        b1.insert(e1[0])
-        b2.insert(99999)
-        b2.insert(e1[2])
-
-        bm.insert(-99999)
-        bm.insert(e1[0])
-        bm.insert(99999)
-        bm.insert(e1[2])
-        test_merge(base, b1, b2, bm, 'merge insert')
-
-    def testMergeInsertsFromEmpty(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        base.clear()
-        b1.clear()
-        b2.clear()
-        bm.clear()
-
-        b1.update(e1)
-        bm.update(e1)
-        b2.update(e2)
-        bm.update(e2)
-
-        test_merge(base, b1, b2, bm, 'merge insert from empty')
-
-    def testFailMergeEmptyAndFill(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1.clear()
-        bm.clear()
-        b2.update(e2)
-        bm.update(e2)
-
-        test_merge(base, b1, b2, bm, 'merge insert from empty', should_fail=1)
-
-    def testMergeEmpty(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-
-        b1.clear()
-        bm.clear()
-
-        test_merge(base, b1, b2, bm, 'empty one and not other', should_fail=1)
-
-    def testFailMergeInsert(self):
-        base, b1, b2, bm, e1, e2, items = self._setupConflict()
-        b1.insert(-99999)
-        b1.insert(e1[0])
-        b2.insert(99999)
-        b2.insert(e1[0])
-        test_merge(base, b1, b2, bm, 'merge conflicting inserts',
-                   should_fail=1)
-
-
-def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0):
-    s1 = o1.__getstate__()
-    s2 = o2.__getstate__()
-    s3 = o3.__getstate__()
-    expected = expect.__getstate__()
-    if expected is None:
-        expected = ((((),),),)
-
-    if should_fail:
-        try:
-            merged = o1._p_resolveConflict(s1, s2, s3)
-        except ConflictError, err:
-            pass
-        else:
-            assert 0, message
-    else:
-        merged = o1._p_resolveConflict(s1, s2, s3)
-        assert merged == expected, message
-
-class BucketTests(MappingBase):
-    """ Tests common to all buckets """
-
-class BTreeTests(MappingBase):
-    """ Tests common to all BTrees """
-
-## BTree tests
-
-class TestIOBTrees(BTreeTests, TestCase):
-    def setUp(self):
-        self.t = IOBTree()
-
-class TestOOBTrees(BTreeTests, TestCase):
-    def setUp(self):
-        self.t = OOBTree()
-
-class TestOIBTrees(BTreeTests, TestCase):
-    def setUp(self):
-        self.t = OIBTree()
-
-class TestIIBTrees(BTreeTests, TestCase):
-    def setUp(self):
-        self.t = IIBTree()
-
-class TestIFBTrees(BTreeTests, TestCase):
-    def setUp(self):
-        self.t = IFBTree()
-
-## Set tests
-
-class TestIOSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IOSet()
-
-class TestOOSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = OOSet()
-
-class TestIISets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IISet()
-
-class TestIFSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IFSet()
-
-class TestOISets(SetTests, TestCase):
-    def setUp(self):
-        self.t = OISet()
-
-class TestIOTreeSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IOTreeSet()
-
-class TestOOTreeSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = OOTreeSet()
-
-class TestIITreeSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IITreeSet()
-
-class TestIFTreeSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = IFTreeSet()
-
-class TestOITreeSets(SetTests, TestCase):
-    def setUp(self):
-        self.t = OITreeSet()
-
-## Bucket tests
-
-class TestIOBuckets(BucketTests, TestCase):
-    def setUp(self):
-        self.t = IOBucket()
-
-class TestOOBuckets(BucketTests, TestCase):
-    def setUp(self):
-        self.t = OOBucket()
-
-class TestIIBuckets(BucketTests, TestCase):
-    def setUp(self):
-        self.t = IIBucket()
-
-class TestIFBuckets(BucketTests, TestCase):
-    def setUp(self):
-        self.t = IFBucket()
-
-class TestOIBuckets(BucketTests, TestCase):
-    def setUp(self):
-        self.t = OIBucket()
-
-class NastyConfict(Base, TestCase):
-    def setUp(self):
-        self.t = OOBTree()
-
-    # This tests a problem that cropped up while trying to write
-    # testBucketSplitConflict (below):  conflict resolution wasn't
-    # working at all in non-trivial cases.  Symptoms varied from
-    # strange complaints about pickling (despite that the test isn't
-    # doing any *directly*), thru SystemErrors from Python and
-    # AssertionErrors inside the BTree code.
-    def testResolutionBlowsUp(self):
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Invoke conflict resolution by committing a transaction.
-        self.openDB()
-
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open().root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        self.t.update({1:2, 2:3})
-        transaction.commit()
-
-        copy.update({3:4})
-        transaction.commit()  # if this doesn't blow up
-        list(copy.values())         # and this doesn't either, then fine
-
-    def testBucketSplitConflict(self):
-        # Tests that a bucket split is viewed as a conflict.
-        # It's (almost necessarily) a white-box test, and sensitive to
-        # implementation details.
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Invoke conflict resolution by committing a transaction.
-        self.openDB()
-
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        # In one transaction, add 16 new keys to bucket1, to force a bucket
-        # split.
-        b = self.t
-        numtoadd = 16
-        candidate = 60
-        while numtoadd:
-            if not b.has_key(candidate):
-                b[candidate] = candidate
-                numtoadd -= 1
-            candidate += 1
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 61 .. 74
-        # bucket 2 has 16 values: [75, 76 .. 81] + [84, 88 ..116]
-        # bucket 3 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((b0, 60, b1, 75, b2, 120, b3), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state) , 2)
-        self.assertEqual(len(state[0]), 7)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 75)
-        self.assertEqual(state[0][5], 120)
-
-        transaction.commit()
-
-        # In the other transaction, add 3 values near the tail end of bucket1.
-        # This doesn't cause a split.
-        b = copy
-        for i in range(112, 116):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 18 values: 60, 64 .. 112, 113, 114, 115, 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        self.assertRaises(ConflictError, transaction.commit)
-        transaction.abort()   # horrible things happen w/o this
-
-    def testEmptyBucketConflict(self):
-        # Tests that an emptied bucket *created by* conflict resolution is
-        # viewed as a conflict:  conflict resolution doesn't have enough
-        # info to unlink the empty bucket from the BTree correctly.
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Invoke conflict resolution by committing a transaction.
-        self.openDB()
-
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        # In one transaction, delete half of bucket 1.
-        b = self.t
-        for k in 60, 64, 68, 72, 76, 80, 84, 88:
-            del b[k]
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 7 values: 92, 96, 100, 104, 108, 112, 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state) , 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        transaction.commit()
-
-        # In the other transaction, delete the other half of bucket 1.
-        b = copy
-        for k in 92, 96, 100, 104, 108, 112, 116:
-            del b[k]
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 8 values: 60, 64, 68, 72, 76, 80, 84, 88
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Conflict resolution empties bucket1 entirely.  This used to
-        # create an "insane" BTree (a legit BTree cannot contain an empty
-        # bucket -- it contains NULL pointers the BTree code doesn't
-        # expect, and segfaults result).
-        self.assertRaises(ConflictError, transaction.commit)
-        transaction.abort()   # horrible things happen w/o this
-
-
-    def testEmptyBucketNoConflict(self):
-        # Tests that a plain empty bucket (on input) is not viewed as a
-        # conflict.
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Invoke conflict resolution by committing a transaction.
-        self.openDB()
-
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open().root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        # In one transaction, just add a key.
-        b = self.t
-        b[1] = 1
-        # bucket 0 has 16 values: [0, 1] + [4, 8 .. 56]
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        transaction.commit()
-
-        # In the other transaction, delete bucket 2.
-        b = copy
-        for k in range(120, 200, 4):
-            del b[k]
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1), firstbucket)
-        # The next block is still verifying preconditions.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 3)
-        self.assertEqual(state[0][1], 60)
-
-        # This shouldn't create a ConflictError.
-        transaction.commit()
-        # And the resulting BTree shouldn't have internal damage.
-        b._check()
-
-    # The snaky control flow in _bucket__p_resolveConflict ended up trying
-    # to decref a NULL pointer if conflict resolution was fed 3 empty
-    # buckets.  http://collector.zope.org/Zope/553
-    def testThreeEmptyBucketsNoSegfault(self):
-        self.openDB()
-
-        r1 = self.db.open().root()
-        self.assertEqual(len(self.t), 0)
-        r1["t"] = b = self.t  # an empty tree
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        # In one transaction, add and delete a key.
-        b[2] = 2
-        del b[2]
-        transaction.commit()
-
-        # In the other transaction, also add and delete a key.
-        b = copy
-        b[1] = 1
-        del b[1]
-        # If the commit() segfaults, the C code is still wrong for this case.
-        self.assertRaises(ConflictError, transaction.commit)
-        transaction.abort()
-
-    def testCantResolveBTreeConflict(self):
-        # Test that a conflict involving two different changes to
-        # an internal BTree node is unresolvable.  An internal node
-        # only changes when there are enough additions or deletions
-        # to a child bucket that the bucket is split or removed.
-        # It's (almost necessarily) a white-box test, and sensitive to
-        # implementation details.
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Set up database connections to provoke conflict.
-        self.openDB()
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        # Now one transaction should add enough keys to cause a split,
-        # and another should remove all the keys in one bucket.
-
-        for k in range(200, 300, 4):
-            self.t[k] = k
-        transaction.commit()
-
-        for k in range(0, 60, 4):
-            del copy[k]
-
-        try:
-            transaction.commit()
-        except ConflictError, detail:
-            self.assert_(str(detail).startswith('database conflict error'))
-            transaction.abort()
-        else:
-            self.fail("expected ConflictError")
-
-    def testConflictWithOneEmptyBucket(self):
-        # If one transaction empties a bucket, while another adds an item
-        # to the bucket, all the changes "look resolvable":  bucket conflict
-        # resolution returns a bucket containing (only) the item added by
-        # the latter transaction, but changes from the former transaction
-        # removing the bucket are uncontested:  the bucket is removed from
-        # the BTree despite that resolution thinks it's non-empty!  This
-        # was first reported by Dieter Maurer, to zodb-dev on 22 Mar 2005.
-        b = self.t
-        for i in range(0, 200, 4):
-            b[i] = i
-        # bucket 0 has 15 values: 0, 4 .. 56
-        # bucket 1 has 15 values: 60, 64 .. 116
-        # bucket 2 has 20 values: 120, 124 .. 196
-        state = b.__getstate__()
-        # Looks like:  ((bucket0, 60, bucket1, 120, bucket2), firstbucket)
-        # If these fail, the *preconditions* for running the test aren't
-        # satisfied -- the test itself hasn't been run yet.
-        self.assertEqual(len(state), 2)
-        self.assertEqual(len(state[0]), 5)
-        self.assertEqual(state[0][1], 60)
-        self.assertEqual(state[0][3], 120)
-
-        # Set up database connections to provoke conflict.
-        self.openDB()
-        r1 = self.db.open().root()
-        r1["t"] = self.t
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(self.t._p_serial, copy._p_serial)
-
-        # Now one transaction empties the first bucket, and another adds a
-        # key to the first bucket.
-
-        for k in range(0, 60, 4):
-            del self.t[k]
-        transaction.commit()
-
-        copy[1] = 1
-
-        try:
-            transaction.commit()
-        except ConflictError, detail:
-            self.assert_(str(detail).startswith('database conflict error'))
-            transaction.abort()
-        else:
-            self.fail("expected ConflictError")
-
-        # Same thing, except commit the transactions in the opposite order.
-        b = OOBTree()
-        for i in range(0, 200, 4):
-            b[i] = i
-
-        r1 = self.db.open().root()
-        r1["t"] = b
-        transaction.commit()
-
-        r2 = self.db.open(synch=False).root()
-        copy = r2["t"]
-        # Make sure all of copy is loaded.
-        list(copy.values())
-
-        self.assertEqual(b._p_serial, copy._p_serial)
-
-        # Now one transaction empties the first bucket, and another adds a
-        # key to the first bucket.
-        b[1] = 1
-        transaction.commit()
-
-        for k in range(0, 60, 4):
-            del copy[k]
-        try:
-            transaction.commit()
-        except ConflictError, detail:
-            self.assert_(str(detail).startswith('database conflict error'))
-            transaction.abort()
-        else:
-            self.fail("expected ConflictError")
-
-
-def test_suite():
-    suite = TestSuite()
-    for k in (
-        TestIIBTrees, TestIISets, TestIITreeSets, TestIIBuckets,
-        TestIFBTrees, TestIFSets, TestIFTreeSets, TestIFBuckets,
-        TestIOBTrees, TestIOSets, TestIOTreeSets, TestIOBuckets,
-        TestOOBTrees, TestOOSets, TestOOTreeSets, TestOOBuckets,
-        TestOIBTrees, TestOISets, TestOITreeSets, TestOIBuckets,
-        NastyConfict):
-        suite.addTest(makeSuite(k))
-    return suite
diff --git a/branches/bug1734/src/BTrees/tests/testSetOps.py b/branches/bug1734/src/BTrees/tests/testSetOps.py
deleted file mode 100644
index b2f6edf9..00000000
--- a/branches/bug1734/src/BTrees/tests/testSetOps.py
+++ /dev/null
@@ -1,485 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-from unittest import TestCase, TestSuite, TextTestRunner, makeSuite
-
-from BTrees.OOBTree import OOBTree, OOBucket, OOSet, OOTreeSet
-from BTrees.IOBTree import IOBTree, IOBucket, IOSet, IOTreeSet
-from BTrees.IIBTree import IIBTree, IIBucket, IISet, IITreeSet
-from BTrees.OIBTree import OIBTree, OIBucket, OISet, OITreeSet
-
-# Subclasses have to set up:
-#     builders - functions to build inputs, taking an optional keys arg
-#     intersection, union, difference - set to the type-correct versions
-class SetResult(TestCase):
-    def setUp(self):
-        self.Akeys = [1,    3,    5, 6   ]
-        self.Bkeys = [   2, 3, 4,    6, 7]
-        self.As = [makeset(self.Akeys) for makeset in self.builders]
-        self.Bs = [makeset(self.Bkeys) for makeset in self.builders]
-        self.emptys = [makeset() for makeset in self.builders]
-
-    # Slow but obviously correct Python implementations of basic ops.
-    def _union(self, x, y):
-        result = list(x)
-        for e in y:
-            if e not in result:
-                result.append(e)
-        result.sort()
-        return result
-
-    def _intersection(self, x, y):
-        result = []
-        for e in x:
-            if e in y:
-                result.append(e)
-        return result
-
-    def _difference(self, x, y):
-        result = list(x)
-        for e in y:
-            if e in result:
-                result.remove(e)
-        # Difference preserves LHS values.
-        if hasattr(x, "values"):
-            result = [(k, x[k]) for k in result]
-        return result
-
-    def testNone(self):
-        for op in self.union, self.intersection, self.difference:
-            C = op(None, None)
-            self.assert_(C is None)
-
-        for op in self.union, self.intersection, self.difference:
-            for A in self.As:
-                C = op(A, None)
-                self.assert_(C is A)
-
-                C = op(None, A)
-                if op is self.difference:
-                    self.assert_(C is None)
-                else:
-                    self.assert_(C is A)
-
-    def testEmptyUnion(self):
-        for A in self.As:
-            for E in self.emptys:
-                C = self.union(A, E)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), self.Akeys)
-
-                C = self.union(E, A)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), self.Akeys)
-
-    def testEmptyIntersection(self):
-        for A in self.As:
-            for E in self.emptys:
-                C = self.intersection(A, E)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), [])
-
-                C = self.intersection(E, A)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), [])
-
-    def testEmptyDifference(self):
-        for A in self.As:
-            for E in self.emptys:
-                C = self.difference(A, E)
-                # Difference preserves LHS values.
-                self.assertEqual(hasattr(C, "values"), hasattr(A, "values"))
-                if hasattr(A, "values"):
-                    self.assertEqual(list(C.items()), list(A.items()))
-                else:
-                    self.assertEqual(list(C), self.Akeys)
-
-                C = self.difference(E, A)
-                self.assertEqual(hasattr(C, "values"), hasattr(E, "values"))
-                self.assertEqual(list(C), [])
-
-    def testUnion(self):
-        inputs = self.As + self.Bs
-        for A in inputs:
-            for B in inputs:
-                C = self.union(A, B)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), self._union(A, B))
-
-    def testIntersection(self):
-        inputs = self.As + self.Bs
-        for A in inputs:
-            for B in inputs:
-                C = self.intersection(A, B)
-                self.assert_(not hasattr(C, "values"))
-                self.assertEqual(list(C), self._intersection(A, B))
-
-    def testDifference(self):
-        inputs = self.As + self.Bs
-        for A in inputs:
-            for B in inputs:
-                C = self.difference(A, B)
-                # Difference preserves LHS values.
-                self.assertEqual(hasattr(C, "values"), hasattr(A, "values"))
-                want = self._difference(A, B)
-                if hasattr(A, "values"):
-                    self.assertEqual(list(C.items()), want)
-                else:
-                    self.assertEqual(list(C), want)
-
-    def testLargerInputs(self):
-        from random import randint
-        MAXSIZE = 200
-        MAXVAL = 400
-        for i in range(3):
-            n = randint(0, MAXSIZE)
-            Akeys = [randint(1, MAXVAL) for j in range(n)]
-            As = [makeset(Akeys) for makeset in self.builders]
-            Akeys = IISet(Akeys)
-
-            n = randint(0, MAXSIZE)
-            Bkeys = [randint(1, MAXVAL) for j in range(n)]
-            Bs = [makeset(Bkeys) for makeset in self.builders]
-            Bkeys = IISet(Bkeys)
-
-            for op, simulator in ((self.union, self._union),
-                                  (self.intersection, self._intersection),
-                                  (self.difference, self._difference)):
-                for A in As:
-                    for B in Bs:
-                        got = op(A, B)
-                        want = simulator(Akeys, Bkeys)
-                        self.assertEqual(list(got), want,
-                                         (A, B, Akeys, Bkeys, list(got), want))
-
-# Given a mapping builder (IIBTree, OOBucket, etc), return a function
-# that builds an object of that type given only a list of keys.
-def makeBuilder(mapbuilder):
-    def result(keys=[], mapbuilder=mapbuilder):
-        return mapbuilder(zip(keys, keys))
-    return result
-
-class PureII(SetResult):
-    from BTrees.IIBTree import union, intersection, difference
-    builders = IISet, IITreeSet, makeBuilder(IIBTree), makeBuilder(IIBucket)
-
-class PureIO(SetResult):
-    from BTrees.IOBTree import union, intersection, difference
-    builders = IOSet, IOTreeSet, makeBuilder(IOBTree), makeBuilder(IOBucket)
-
-class PureOO(SetResult):
-    from BTrees.OOBTree import union, intersection, difference
-    builders = OOSet, OOTreeSet, makeBuilder(OOBTree), makeBuilder(OOBucket)
-
-class PureOI(SetResult):
-    from BTrees.OIBTree import union, intersection, difference
-    builders = OISet, OITreeSet, makeBuilder(OIBTree), makeBuilder(OIBucket)
-
-# Subclasses must set up (as class variables):
-#     multiunion, union
-#     mkset, mktreeset
-#     mkbucket, mkbtree
-class MultiUnion(TestCase):
-
-    def testEmpty(self):
-        self.assertEqual(len(self.multiunion([])), 0)
-
-    def testOne(self):
-        for sequence in [3], range(20), range(-10, 0, 2) + range(1, 10, 2):
-            seq1 = sequence[:]
-            seq2 = sequence[:]
-            seq2.reverse()
-            seqsorted = sequence[:]
-            seqsorted.sort()
-            for seq in seq1, seq2, seqsorted:
-                for builder in self.mkset, self.mktreeset:
-                    input = builder(seq)
-                    output = self.multiunion([input])
-                    self.assertEqual(len(seq), len(output))
-                    self.assertEqual(seqsorted, list(output))
-
-    def testValuesIgnored(self):
-        for builder in self.mkbucket, self.mkbtree:
-            input = builder([(1, 2), (3, 4), (5, 6)])
-            output = self.multiunion([input])
-            self.assertEqual([1, 3, 5], list(output))
-
-    def testBigInput(self):
-        N = 100000
-        input = self.mkset(range(N))
-        output = self.multiunion([input] * 10)
-        self.assertEqual(len(output), N)
-        self.assertEqual(output.minKey(), 0)
-        self.assertEqual(output.maxKey(), N-1)
-        self.assertEqual(list(output), range(N))
-
-    def testLotsOfLittleOnes(self):
-        from random import shuffle
-        N = 5000
-        inputs = []
-        mkset, mktreeset = self.mkset, self.mktreeset
-        for i in range(N):
-            base = i * 4 - N
-            inputs.append(mkset([base, base+1]))
-            inputs.append(mktreeset([base+2, base+3]))
-        shuffle(inputs)
-        output = self.multiunion(inputs)
-        self.assertEqual(len(output), N*4)
-        self.assertEqual(list(output), range(-N, 3*N))
-
-    def testFunkyKeyIteration(self):
-        # The internal set iteration protocol allows "iterating over" a
-        # a single key as if it were a set.
-        N = 100
-        union, mkset = self.union, self.mkset
-        slow = mkset()
-        for i in range(N):
-            slow = union(slow, mkset([i]))
-        fast = self.multiunion(range(N))  # acts like N distinct singleton sets
-        self.assertEqual(len(slow), N)
-        self.assertEqual(len(fast), N)
-        self.assertEqual(list(slow), list(fast))
-        self.assertEqual(list(fast), range(N))
-
-class TestIIMultiUnion(MultiUnion):
-    from BTrees.IIBTree import multiunion, union
-    from BTrees.IIBTree import IISet as mkset, IITreeSet as mktreeset
-    from BTrees.IIBTree import IIBucket as mkbucket, IIBTree as mkbtree
-
-class TestIOMultiUnion(MultiUnion):
-    from BTrees.IOBTree import multiunion, union
-    from BTrees.IOBTree import IOSet as mkset, IOTreeSet as mktreeset
-    from BTrees.IOBTree import IOBucket as mkbucket, IOBTree as mkbtree
-
-# Check that various special module functions are and aren't imported from
-# the expected BTree modules.
-class TestImports(TestCase):
-    def testWeightedUnion(self):
-        from BTrees.IIBTree import weightedUnion
-        from BTrees.OIBTree import weightedUnion
-
-        try:
-            from BTrees.IOBTree import weightedUnion
-        except ImportError:
-            pass
-        else:
-            self.fail("IOBTree shouldn't have weightedUnion")
-
-        try:
-            from BTrees.OOBTree import weightedUnion
-        except ImportError:
-            pass
-        else:
-            self.fail("OOBTree shouldn't have weightedUnion")
-
-    def testWeightedIntersection(self):
-        from BTrees.IIBTree import weightedIntersection
-        from BTrees.OIBTree import weightedIntersection
-
-        try:
-            from BTrees.IOBTree import weightedIntersection
-        except ImportError:
-            pass
-        else:
-            self.fail("IOBTree shouldn't have weightedIntersection")
-
-        try:
-            from BTrees.OOBTree import weightedIntersection
-        except ImportError:
-            pass
-        else:
-            self.fail("OOBTree shouldn't have weightedIntersection")
-
-    def testMultiunion(self):
-        from BTrees.IIBTree import multiunion
-        from BTrees.IOBTree import multiunion
-
-        try:
-            from BTrees.OIBTree import multiunion
-        except ImportError:
-            pass
-        else:
-            self.fail("OIBTree shouldn't have multiunion")
-
-        try:
-            from BTrees.OOBTree import multiunion
-        except ImportError:
-            pass
-        else:
-            self.fail("OOBTree shouldn't have multiunion")
-
-# Subclasses must set up (as class variables):
-#     weightedUnion, weightedIntersection
-#     builders -- sequence of constructors, taking items
-#     union, intersection -- the module routines of those names
-#     mkbucket -- the module bucket builder
-class Weighted(TestCase):
-
-    def setUp(self):
-        self.Aitems = [(1, 10), (3, 30),  (5, 50), (6, 60)]
-        self.Bitems = [(2, 21), (3, 31), (4, 41),  (6, 61), (7, 71)]
-
-        self.As = [make(self.Aitems) for make in self.builders]
-        self.Bs = [make(self.Bitems) for make in self.builders]
-        self.emptys = [make([]) for make in self.builders]
-
-        weights = []
-        for w1 in -3, -1, 0, 1, 7:
-            for w2 in -3, -1, 0, 1, 7:
-                weights.append((w1, w2))
-        self.weights = weights
-
-    def testBothNone(self):
-        for op in self.weightedUnion, self.weightedIntersection:
-            w, C = op(None, None)
-            self.assert_(C is None)
-            self.assertEqual(w, 0)
-
-            w, C = op(None, None, 42, 666)
-            self.assert_(C is None)
-            self.assertEqual(w, 0)
-
-    def testLeftNone(self):
-        for op in self.weightedUnion, self.weightedIntersection:
-            for A in self.As + self.emptys:
-                w, C = op(None, A)
-                self.assert_(C is A)
-                self.assertEqual(w, 1)
-
-                w, C = op(None, A, 42, 666)
-                self.assert_(C is A)
-                self.assertEqual(w, 666)
-
-    def testRightNone(self):
-        for op in self.weightedUnion, self.weightedIntersection:
-            for A in self.As + self.emptys:
-                w, C = op(A, None)
-                self.assert_(C is A)
-                self.assertEqual(w, 1)
-
-                w, C = op(A, None, 42, 666)
-                self.assert_(C is A)
-                self.assertEqual(w, 42)
-
-    # If obj is a set, return a bucket with values all 1; else return obj.
-    def _normalize(self, obj):
-        if isaset(obj):
-            obj = self.mkbucket(zip(obj, [1] * len(obj)))
-        return obj
-
-    # Python simulation of weightedUnion.
-    def _wunion(self, A, B, w1=1, w2=1):
-        if isaset(A) and isaset(B):
-            return 1, self.union(A, B).keys()
-        A = self._normalize(A)
-        B = self._normalize(B)
-        result = []
-        for key in self.union(A, B):
-            v1 = A.get(key, 0)
-            v2 = B.get(key, 0)
-            result.append((key, v1*w1 + v2*w2))
-        return 1, result
-
-    def testUnion(self):
-        inputs = self.As + self.Bs + self.emptys
-        for A in inputs:
-            for B in inputs:
-                want_w, want_s = self._wunion(A, B)
-                got_w, got_s = self.weightedUnion(A, B)
-                self.assertEqual(got_w, want_w)
-                if isaset(got_s):
-                    self.assertEqual(got_s.keys(), want_s)
-                else:
-                    self.assertEqual(got_s.items(), want_s)
-
-                for w1, w2 in self.weights:
-                    want_w, want_s = self._wunion(A, B, w1, w2)
-                    got_w, got_s = self.weightedUnion(A, B, w1, w2)
-                    self.assertEqual(got_w, want_w)
-                    if isaset(got_s):
-                        self.assertEqual(got_s.keys(), want_s)
-                    else:
-                        self.assertEqual(got_s.items(), want_s)
-
-    # Python simulation weightedIntersection.
-    def _wintersection(self, A, B, w1=1, w2=1):
-        if isaset(A) and isaset(B):
-            return w1 + w2, self.intersection(A, B).keys()
-        A = self._normalize(A)
-        B = self._normalize(B)
-        result = []
-        for key in self.intersection(A, B):
-            result.append((key, A[key]*w1 + B[key]*w2))
-        return 1, result
-
-    def testIntersection(self):
-        inputs = self.As + self.Bs + self.emptys
-        for A in inputs:
-            for B in inputs:
-                want_w, want_s = self._wintersection(A, B)
-                got_w, got_s = self.weightedIntersection(A, B)
-                self.assertEqual(got_w, want_w)
-                if isaset(got_s):
-                    self.assertEqual(got_s.keys(), want_s)
-                else:
-                    self.assertEqual(got_s.items(), want_s)
-
-                for w1, w2 in self.weights:
-                    want_w, want_s = self._wintersection(A, B, w1, w2)
-                    got_w, got_s = self.weightedIntersection(A, B, w1, w2)
-                    self.assertEqual(got_w, want_w)
-                    if isaset(got_s):
-                        self.assertEqual(got_s.keys(), want_s)
-                    else:
-                        self.assertEqual(got_s.items(), want_s)
-
-# Given a set builder (like OITreeSet or OISet), return a function that
-# takes a list of (key, value) pairs and builds a set out of the keys.
-def itemsToSet(setbuilder):
-    def result(items, setbuilder=setbuilder):
-        return setbuilder([key for key, value in items])
-    return result
-
-class TestWeightedII(Weighted):
-    from BTrees.IIBTree import weightedUnion, weightedIntersection
-    from BTrees.IIBTree import union, intersection
-    from BTrees.IIBTree import IIBucket as mkbucket
-    builders = IIBucket, IIBTree, itemsToSet(IISet), itemsToSet(IITreeSet)
-
-class TestWeightedOI(Weighted):
-    from BTrees.OIBTree import weightedUnion, weightedIntersection
-    from BTrees.OIBTree import union, intersection
-    from BTrees.OIBTree import OIBucket as mkbucket
-    builders = OIBucket, OIBTree, itemsToSet(OISet), itemsToSet(OITreeSet)
-
-
-# 'thing' is a bucket, btree, set or treeset.  Return true iff it's one of the
-# latter two.
-def isaset(thing):
-    return not hasattr(thing, 'values')
-
-
-def test_suite():
-    s = TestSuite()
-    for klass in (TestIIMultiUnion, TestIOMultiUnion,
-                  TestImports,
-                  PureII, PureIO, PureOI, PureOO,
-                  TestWeightedII, TestWeightedOI):
-        s.addTest(makeSuite(klass))
-    return s
-
-def main():
-    TextTestRunner().run(test_suite())
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/BTrees/tests/test_btreesubclass.py b/branches/bug1734/src/BTrees/tests/test_btreesubclass.py
deleted file mode 100644
index 4ec19509..00000000
--- a/branches/bug1734/src/BTrees/tests/test_btreesubclass.py
+++ /dev/null
@@ -1,44 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-from BTrees.OOBTree import OOBTree, OOBucket
-
-class B(OOBucket):
-    pass
-
-class T(OOBTree):
-    _bucket_type = B
-
-import unittest
-
-class SubclassTest(unittest.TestCase):
-
-    def testSubclass(self):
-        # test that a subclass that defines _bucket_type gets buckets
-        # of that type
-        t = T()
-
-        # There's no good way to get a bucket at the moment.
-        # __getstate__() is as good as it gets, but the default
-        # getstate explicitly includes the pickle of the bucket
-        # for small trees, so we have to be clever :-(
-
-        # make sure there is more than one bucket in the tree
-        for i in range(1000):
-            t[i] = i
-
-        state = t.__getstate__()
-        self.assert_(state[0][0].__class__ is B)
-
-def test_suite():
-    return unittest.makeSuite(SubclassTest)
diff --git a/branches/bug1734/src/BTrees/tests/test_check.py b/branches/bug1734/src/BTrees/tests/test_check.py
deleted file mode 100644
index 5f253301..00000000
--- a/branches/bug1734/src/BTrees/tests/test_check.py
+++ /dev/null
@@ -1,96 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the BTree check.check() function."""
-
-import unittest
-
-from BTrees.OOBTree import OOBTree
-from BTrees.check import check
-
-class CheckTest(unittest.TestCase):
-
-    def setUp(self):
-        self.t = t = OOBTree()
-        for i in range(31):
-            t[i] = 2*i
-        self.state = t.__getstate__()
-
-    def testNormal(self):
-        s = self.state
-        # Looks like (state, first_bucket)
-        # where state looks like (bucket0, 15, bucket1).
-        self.assertEqual(len(s), 2)
-        self.assertEqual(len(s[0]), 3)
-        self.assertEqual(s[0][1], 15)
-        self.t._check() # shouldn't blow up
-        check(self.t)   # shouldn't blow up
-
-    def testKeyTooLarge(self):
-        # Damage an invariant by dropping the BTree key to 14.
-        s = self.state
-        news = (s[0][0], 14, s[0][2]), s[1]
-        self.t.__setstate__(news)
-        self.t._check() # not caught
-        try:
-            # Expecting "... key %r >= upper bound %r at index %d"
-            check(self.t)
-        except AssertionError, detail:
-            self.failUnless(str(detail).find(">= upper bound") > 0)
-        else:
-            self.fail("expected self.t_check() to catch the problem")
-
-    def testKeyTooSmall(self):
-        # Damage an invariant by bumping the BTree key to 16.
-        s = self.state
-        news = (s[0][0], 16, s[0][2]), s[1]
-        self.t.__setstate__(news)
-        self.t._check() # not caught
-        try:
-            # Expecting "... key %r < lower bound %r at index %d"
-            check(self.t)
-        except AssertionError, detail:
-            self.failUnless(str(detail).find("< lower bound") > 0)
-        else:
-            self.fail("expected self.t_check() to catch the problem")
-
-    def testKeysSwapped(self):
-        # Damage an invariant by swapping two key/value pairs.
-        s = self.state
-        # Looks like (state, first_bucket)
-        # where state looks like (bucket0, 15, bucket1).
-        (b0, num, b1), firstbucket = s
-        self.assertEqual(b0[4], 8)
-        self.assertEqual(b0[5], 10)
-        b0state = b0.__getstate__()
-        self.assertEqual(len(b0state), 2)
-        # b0state looks like
-        # ((k0, v0, k1, v1, ...), nextbucket)
-        pairs, nextbucket = b0state
-        self.assertEqual(pairs[8], 4)
-        self.assertEqual(pairs[9], 8)
-        self.assertEqual(pairs[10], 5)
-        self.assertEqual(pairs[11], 10)
-        newpairs = pairs[:8] + (5, 10, 4, 8) + pairs[12:]
-        b0.__setstate__((newpairs, nextbucket))
-        self.t._check() # not caught
-        try:
-            check(self.t)
-        except AssertionError, detail:
-            self.failUnless(str(detail).find(
-                "key 5 at index 4 >= key 4 at index 5") > 0)
-        else:
-            self.fail("expected self.t_check() to catch the problem")
-
-def test_suite():
-    return unittest.makeSuite(CheckTest)
diff --git a/branches/bug1734/src/BTrees/tests/test_compare.py b/branches/bug1734/src/BTrees/tests/test_compare.py
deleted file mode 100644
index ca1630d9..00000000
--- a/branches/bug1734/src/BTrees/tests/test_compare.py
+++ /dev/null
@@ -1,74 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test errors during comparison of BTree keys."""
-
-import unittest
-
-from BTrees.OOBTree import OOBucket as Bucket, OOSet as Set
-
-import transaction
-from ZODB.MappingStorage import MappingStorage
-from ZODB.DB import DB
-
-class CompareTest(unittest.TestCase):
-
-    s = "A string with hi-bit-set characters: \700\701"
-    u = u"A unicode string"
-
-    def setUp(self):
-        # These defaults only make sense if the default encoding
-        # prevents s from being promoted to Unicode.
-        self.assertRaises(UnicodeError, unicode, self.s)
-
-        # An object needs to be added to the database to
-        self.db = DB(MappingStorage())
-        root = self.db.open().root()
-        self.bucket = root["bucket"] = Bucket()
-        self.set = root["set"] = Set()
-        transaction.commit()
-
-    def tearDown(self):
-        self.assert_(self.bucket._p_changed != 2)
-        self.assert_(self.set._p_changed != 2)
-        transaction.abort()
-
-    def assertUE(self, callable, *args):
-        self.assertRaises(UnicodeError, callable, *args)
-
-    def testBucketGet(self):
-        self.bucket[self.s] = 1
-        self.assertUE(self.bucket.get, self.u)
-
-    def testSetGet(self):
-        self.set.insert(self.s)
-        self.assertUE(self.set.remove, self.u)
-
-    def testBucketSet(self):
-        self.bucket[self.s] = 1
-        self.assertUE(self.bucket.__setitem__, self.u, 1)
-
-    def testSetSet(self):
-        self.set.insert(self.s)
-        self.assertUE(self.set.insert, self.u)
-
-    def testBucketMinKey(self):
-        self.bucket[self.s] = 1
-        self.assertUE(self.bucket.minKey, self.u)
-
-    def testSetMinKey(self):
-        self.set.insert(self.s)
-        self.assertUE(self.set.minKey, self.u)
-
-def test_suite():
-    return unittest.makeSuite(CompareTest)
diff --git a/branches/bug1734/src/Persistence/DEPENDENCIES.cfg b/branches/bug1734/src/Persistence/DEPENDENCIES.cfg
deleted file mode 100644
index b4b35ad3..00000000
--- a/branches/bug1734/src/Persistence/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-ZODB
-persistent
diff --git a/branches/bug1734/src/Persistence/_Persistence.c b/branches/bug1734/src/Persistence/_Persistence.c
deleted file mode 100644
index c08287d6..00000000
--- a/branches/bug1734/src/Persistence/_Persistence.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
-
- Copyright (c) 2003 Zope Corporation and Contributors.
- All Rights Reserved.
-
- This software is subject to the provisions of the Zope Public License,
- Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
- THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
- WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
- FOR A PARTICULAR PURPOSE.
-
-*/
-static char _Persistence_module_documentation[] = 
-"Persistent ExtensionClass\n"
-"\n"
-"$Id$\n"
-;
-
-#include "ExtensionClass.h"
-#include "cPersistence.h"
-
-
-/* convert_name() returns a new reference to a string name
-   or sets an exception and returns NULL.
-*/
-
-static PyObject *
-convert_name(PyObject *name)
-{
-#ifdef Py_USING_UNICODE
-    /* The Unicode to string conversion is done here because the
-       existing tp_setattro slots expect a string object as name
-       and we wouldn't want to break those. */
-    if (PyUnicode_Check(name)) {
-	name = PyUnicode_AsEncodedString(name, NULL, NULL);
-    }
-    else
-#endif
-    if (!PyString_Check(name)) {
-	PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
-	return NULL;
-    } else
-	Py_INCREF(name);
-    return name;
-}
-
-/* Returns true if the object requires unghostification.
-
-   There are several special attributes that we allow access to without
-   requiring that the object be unghostified:
-   __class__
-   __del__
-   __dict__
-   __of__
-   __setstate__
-*/
-
-static int
-unghost_getattr(const char *s)
-{
-    if (*s++ != '_')
-	return 1;
-    if (*s == 'p') {
-	s++;
-	if (*s == '_')
-	    return 0; /* _p_ */
-	else
-	    return 1;
-    }
-    else if (*s == '_') {
-	s++;
-	switch (*s) {
-	case 'c':
-	    return strcmp(s, "class__");
-	case 'd':
-	    s++;
-	    if (!strcmp(s, "el__"))
-		return 0; /* __del__ */
-	    if (!strcmp(s, "ict__"))
-		return 0; /* __dict__ */
-	    return 1;
-	case 'o':
-	    return strcmp(s, "of__");
-	case 's':
-	    return strcmp(s, "setstate__");
-	default:
-	    return 1;
-	}
-    }
-    return 1;
-}
-
-static PyObject *
-P_getattr(cPersistentObject *self, PyObject *name)
-{
-  PyObject *v=NULL;
-  char *s;
-
-  name = convert_name(name);
-  if (!name)
-    return NULL;
-
-  s = PyString_AS_STRING(name);
-
-  if (*s != '_' || unghost_getattr(s)) 
-    {
-      if (PER_USE(self))
-        {
-          v = Py_FindAttr((PyObject*)self, name);
-          PER_ALLOW_DEACTIVATION(self);
-          PER_ACCESSED(self);
-        }
-    }
-  else
-    v = Py_FindAttr((PyObject*)self, name);
-
-  Py_DECREF(name);
-
-  return v;
-}
-
-
-static PyTypeObject Ptype = {
-	PyObject_HEAD_INIT(NULL)
-	/* ob_size           */ 0,
-	/* tp_name           */ "Persistence.Persistent",
-        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-        /* tp_getattro       */ (getattrofunc)P_getattr,
-        0, 0,
-        /* tp_flags          */ Py_TPFLAGS_DEFAULT
-				| Py_TPFLAGS_BASETYPE ,
-	/* tp_doc            */ "Persistent ExtensionClass",
-};
-
-static struct PyMethodDef _Persistence_methods[] = {
-	{NULL,	 (PyCFunction)NULL, 0, NULL}		/* sentinel */
-};
-
-#ifndef PyMODINIT_FUNC	/* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-PyMODINIT_FUNC
-init_Persistence(void)
-{
-  PyObject *m;
-        
-  if (! ExtensionClassImported)
-    return;
-
-  cPersistenceCAPI = PyCObject_Import("persistent.cPersistence", "CAPI");
-  if (cPersistenceCAPI == NULL)
-    return;
-
-  Ptype.tp_bases = Py_BuildValue("OO", cPersistenceCAPI->pertype, ECBaseType);
-  if (Ptype.tp_bases == NULL)
-    return;
-  Ptype.tp_base = cPersistenceCAPI->pertype;
-  
-  Ptype.ob_type = ECExtensionClassType;
-  if (PyType_Ready(&Ptype) < 0)
-    return;
-        
-  /* Create the module and add the functions */
-  m = Py_InitModule3("_Persistence", _Persistence_methods,
-                     _Persistence_module_documentation);
-  
-  if (m == NULL)
-    return;
-  
-  /* Add types: */
-  if (PyModule_AddObject(m, "Persistent", (PyObject *)&Ptype) < 0)
-    return;
-}
-
diff --git a/branches/bug1734/src/Persistence/__init__.py b/branches/bug1734/src/Persistence/__init__.py
deleted file mode 100644
index 3f01fb25..00000000
--- a/branches/bug1734/src/Persistence/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Persistence and ExtensionClass combined
-
-$Id$
-"""
-
-from persistent import PickleCache
-
-try:
-    from _Persistence import Persistent
-except:
-    from warnings import warn
-    warn("""Couldn't import the ExtensionClass-based base class
-
-    There are two possibilities:
-
-    1. You don't care about ExtensionClass. You are importing
-       Persistence because that's what you imported in the past.
-       In this case, you should really use the persistent package
-       instead:
-
-          >>> from persistent import Persistent
-          >>> from persistent.list import PersistentList
-          >>> from persistent.mapping import PersistentMapping
-
-    2. You want your classes to be ExtensionClasses. In this case,
-       you need to install the ExtensionClass package
-       separately. ExtensionClass is no-longer included with ZODB3.
-
-    """)
-
-    from persistent import Persistent
-
-Overridable = Persistent
-
-from Persistence.mapping import PersistentMapping
-
-# This is a travesty. Whimper. The Data.fs.in used in Zope 2 have
-# ancient pickles refering to BoboPOS. Waaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa!
-import sys
-sys.modules['BoboPOS'] = sys.modules['Persistence']
-sys.modules['BoboPOS.PersistentMapping'] = sys.modules['Persistence.mapping']
-del sys
diff --git a/branches/bug1734/src/Persistence/mapping.py b/branches/bug1734/src/Persistence/mapping.py
deleted file mode 100644
index 8c177631..00000000
--- a/branches/bug1734/src/Persistence/mapping.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Python implementation of persistent base types
-
-$Id$"""
-
-import Persistence
-import persistent
-from persistent.mapping import PersistentMapping
-
-if Persistence.Persistent is not persistent.Persistent:
-    class PersistentMapping(Persistence.Persistent, PersistentMapping):
-        """Legacy persistent mapping class
-
-        This class mixes in ExtensionClass Base if it is present.
-
-        Unless you actually want ExtensionClass semantics, use
-        persistent.mapping.PersistentMapping instead.
-        """
-
-        def __setstate__(self, state):
-            if 'data' not in state:
-                state['data'] = state['_container']
-                del state['_container']
-            self.__dict__.update(state)
diff --git a/branches/bug1734/src/Persistence/tests/__init__.py b/branches/bug1734/src/Persistence/tests/__init__.py
deleted file mode 100644
index 5bb534f7..00000000
--- a/branches/bug1734/src/Persistence/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# package
diff --git a/branches/bug1734/src/Persistence/tests/testPersistent.py b/branches/bug1734/src/Persistence/tests/testPersistent.py
deleted file mode 100644
index 06cfef2b..00000000
--- a/branches/bug1734/src/Persistence/tests/testPersistent.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import pickle
-import time
-import unittest
-
-from Persistence import Persistent
-from persistent.cPickleCache import PickleCache
-from persistent.TimeStamp import TimeStamp
-from ZODB.utils import p64
-
-class Jar(object):
-    """Testing stub for _p_jar attribute."""
-
-    def __init__(self):
-        self.cache = PickleCache(self)
-        self.oid = 1
-        self.registered = {}
-
-    def add(self, obj):
-        obj._p_oid = p64(self.oid)
-        self.oid += 1
-        obj._p_jar = self
-        self.cache[obj._p_oid] = obj
-
-    def close(self):
-        pass
-
-    # the following methods must be implemented to be a jar
-
-    def setklassstate(self):
-        # I don't know what this method does, but the pickle cache
-        # constructor calls it.
-        pass
-
-    def register(self, obj):
-        self.registered[obj] = 1
-
-    def setstate(self, obj):
-        # Trivial setstate() implementation that just re-initializes
-        # the object.  This isn't what setstate() is supposed to do,
-        # but it suffices for the tests.
-        obj.__class__.__init__(obj)
-
-class P(Persistent):
-    pass
-
-class H1(Persistent):
-
-    def __init__(self):
-        self.n = 0
-
-    def __getattr__(self, attr):
-        self.n += 1
-        return self.n
-
-class H2(Persistent):
-
-    def __init__(self):
-        self.n = 0
-
-    def __getattribute__(self, attr):
-        supergetattr = super(H2, self).__getattribute__
-        try:
-            return supergetattr(attr)
-        except AttributeError:
-            n = supergetattr("n")
-            self.n = n + 1
-            return n + 1
-
-class PersistenceTest(unittest.TestCase):
-
-    def setUp(self):
-        self.jar = Jar()
-
-    def tearDown(self):
-        self.jar.close()
-
-    def testOidAndJarAttrs(self):
-        obj = P()
-        self.assertEqual(obj._p_oid, None)
-        obj._p_oid = 12
-        self.assertEqual(obj._p_oid, 12)
-        del obj._p_oid
-
-        self.jar.add(obj)
-
-        # Can't change oid of cache object.
-        def deloid():
-            del obj._p_oid
-        self.assertRaises(ValueError, deloid)
-        def setoid():
-            obj._p_oid = 12
-        self.assertRaises(ValueError, setoid)
-
-        def deloid():
-            del obj._p_jar
-        self.assertRaises(ValueError, deloid)
-        def setoid():
-            obj._p_jar = 12
-        self.assertRaises(ValueError, setoid)
-
-    def testChanged(self):
-        obj = P()
-        self.jar.add(obj)
-
-        # The value returned for _p_changed can be one of:
-        # 0 -- it is not changed
-        # 1 -- it is changed
-        # None -- it is a ghost
-
-        obj.x = 1
-        self.assertEqual(obj._p_changed, 1)
-        self.assert_(obj in self.jar.registered)
-
-        obj._p_changed = 0
-        self.assertEqual(obj._p_changed, 0)
-        self.jar.registered.clear()
-
-        obj._p_changed = 1
-        self.assertEqual(obj._p_changed, 1)
-        self.assert_(obj in self.jar.registered)
-
-        # setting obj._p_changed to None ghostifies if the
-        # object is in the up-to-date state, but not otherwise.
-        obj._p_changed = None
-        self.assertEqual(obj._p_changed, 1)
-        obj._p_changed = 0
-        # Now it's a ghost.
-        obj._p_changed = None
-        self.assertEqual(obj._p_changed, None)
-
-        obj = P()
-        self.jar.add(obj)
-        obj._p_changed = 1
-        # You can transition directly from modified to ghost if
-        # you delete the _p_changed attribute.
-        del obj._p_changed
-        self.assertEqual(obj._p_changed, None)
-
-    def testSerial(self):
-        noserial = "\000" * 8
-        obj = P()
-        self.assertEqual(obj._p_serial, noserial)
-
-        def set(val):
-            obj._p_serial = val
-        self.assertRaises(ValueError, set, 1)
-        self.assertRaises(ValueError, set, "0123")
-        self.assertRaises(ValueError, set, "012345678")
-        self.assertRaises(ValueError, set, u"01234567")
-
-        obj._p_serial = "01234567"
-        del obj._p_serial
-        self.assertEqual(obj._p_serial, noserial)
-
-    def testMTime(self):
-        obj = P()
-        self.assertEqual(obj._p_mtime, None)
-
-        t = int(time.time())
-        ts = TimeStamp(*time.gmtime(t)[:6])
-        obj._p_serial = repr(ts)
-        self.assertEqual(obj._p_mtime, t)
-        self.assert_(isinstance(obj._p_mtime, float))
-
-    def testPicklable(self):
-        obj = P()
-        obj.attr = "test"
-        s = pickle.dumps(obj)
-        obj2 = pickle.loads(s)
-        self.assertEqual(obj.attr, obj2.attr)
-
-    def testGetattr(self):
-        obj = H1()
-        self.assertEqual(obj.larry, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-        self.jar.add(obj)
-        obj._p_deactivate()
-
-        # The simple Jar used for testing re-initializes the object.
-        self.assertEqual(obj.larry, 1)
-        # The getattr hook modified the object, so it should now be
-        # in the changed state.
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-    def testGetattribute(self):
-        obj = H2()
-        self.assertEqual(obj.larry, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-        self.jar.add(obj)
-        obj._p_deactivate()
-
-        # The simple Jar used for testing re-initializes the object.
-        self.assertEqual(obj.larry, 1)
-        # The getattr hook modified the object, so it should now be
-        # in the changed state.
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-    # TODO:  Need to decide how __setattr__ and __delattr__ should work,
-    # then write tests.
-
-
-def test_suite():
-    return unittest.makeSuite(PersistenceTest)
diff --git a/branches/bug1734/src/Persistence/tests/test_ExtensionClass.py b/branches/bug1734/src/Persistence/tests/test_ExtensionClass.py
deleted file mode 100644
index cf4ce6c7..00000000
--- a/branches/bug1734/src/Persistence/tests/test_ExtensionClass.py
+++ /dev/null
@@ -1,507 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test ExtensionClass support in Persistence.Persistent
-
-$Id$
-"""
-
-from doctest import DocTestSuite
-import pickle
-
-from Persistence import Persistent
-
-try:
-    # The _Persistence module is only compiled in a Zope checkout,
-    # where ExtensionClass is available.
-    import Persistence._Persistence
-except ImportError:
-    pass
-else:
-
-    def test_basic():
-        """
-
-        >>> from ExtensionClass import Base
-
-        - Support for a class initialiser:
-
-          >>> class C(Persistent):
-          ...   def __class_init__(self):
-          ...      print 'class init called'
-          ...      print self.__name__
-          ...   def bar(self):
-          ...      return 'bar called'
-          class init called
-          C
-          >>> c = C()
-          >>> int(c.__class__ is C)
-          1
-          >>> int(c.__class__ is type(c))
-          1
-
-        - Provide an inheritedAttribute method for looking up attributes in
-          base classes:
-
-          >>> class C2(C):
-          ...   def bar(*a):
-          ...      return C2.inheritedAttribute('bar')(*a), 42
-          class init called
-          C2
-          >>> o = C2()
-          >>> o.bar()
-          ('bar called', 42)
-
-          This is for compatability with old code. New code should use super
-          instead.
-
-        The base class, Base, exists mainly to support the __of__ protocol.
-        The __of__ protocol is similar to __get__ except that __of__ is called
-        when an implementor is retrieved from an instance as well as from a
-        class:
-
-        >>> class O(Base):
-        ...   def __of__(*a):
-        ...      return a
-
-        >>> o1 = O()
-        >>> o2 = O()
-        >>> C.o1 = o1
-        >>> c.o2 = o2
-        >>> c.o1 == (o1, c)
-        1
-        >>> C.o1 == o1
-        1
-        >>> int(c.o2 == (o2, c))
-        1
-
-        We accomplish this by making a class that implements __of__ a
-        descriptor and treating all descriptor ExtensionClasses this way. That
-        is, if an extension class is a descriptor, it's __get__ method will be
-        called even when it is retrieved from an instance.
-
-        >>> class O(Base):
-        ...   def __get__(*a):
-        ...      return a
-        ...
-        >>> o1 = O()
-        >>> o2 = O()
-        >>> C.o1 = o1
-        >>> c.o2 = o2
-        >>> int(c.o1 == (o1, c, type(c)))
-        1
-        >>> int(C.o1 == (o1, None, type(c)))
-        1
-        >>> int(c.o2 == (o2, c, type(c)))
-        1
-        """
-
-    def test_mixing():
-        """Test working with a classic class
-
-        >>> class Classic:
-        ...   def x(self):
-        ...     return 42
-
-        >>> class O(Persistent):
-        ...   def __of__(*a):
-        ...      return a
-
-        >>> class O2(Classic, O):
-        ...   def __of__(*a):
-        ...      return (O2.inheritedAttribute('__of__')(*a),
-        ...              O2.inheritedAttribute('x')(a[0]))
-
-        >>> class C(Persistent):
-        ...   def __class_init__(self):
-        ...      print 'class init called'
-        ...      print self.__name__
-        ...   def bar(self):
-        ...      return 'bar called'
-        class init called
-        C
-
-        >>> c = C()
-        >>> o2 = O2()
-        >>> c.o2 = o2
-        >>> int(c.o2 == ((o2, c), 42))
-        1
-
-        Test working with a new style
-
-        >>> class Modern(object):
-        ...   def x(self):
-        ...     return 42
-
-        >>> class O2(Modern, O):
-        ...   def __of__(*a):
-        ...      return (O2.inheritedAttribute('__of__')(*a),
-        ...              O2.inheritedAttribute('x')(a[0]))
-
-        >>> o2 = O2()
-        >>> c.o2 = o2
-        >>> int(c.o2 == ((o2, c), 42))
-        1
-
-        """
-
-    def proper_error_on_deleattr():
-        """
-        Florent Guillaume wrote:
-
-        ...
-
-        Excellent.
-        Will it also fix this particularity of ExtensionClass:
-
-
-        >>> class A(Persistent):
-        ...   def foo(self):
-        ...     self.gee
-        ...   def bar(self):
-        ...     del self.gee
-
-        >>> a=A()
-        >>> a.foo()
-        Traceback (most recent call last):
-        ...
-        AttributeError: gee
-
-        >>> a.bar()
-        Traceback (most recent call last):
-        ...
-        AttributeError: 'A' object has no attribute 'gee'
-
-        I.e., the fact that KeyError is raised whereas a normal class would
-        raise AttributeError.
-        """
-
-    def test__basicnew__():
-        """
-        >>> x = Simple.__basicnew__()
-        >>> x.__dict__
-        {}
-        """
-
-    def test_setattr_on_extension_type():
-        """
-        >>> for name in 'x', '_x', 'x_', '__x_y__', '___x__', '__x___', '_x_':
-        ...     setattr(Persistent, name, 1)
-        ...     print getattr(Persistent, name)
-        ...     delattr(Persistent, name)
-        ...     print getattr(Persistent, name, 0)
-        1
-        0
-        1
-        0
-        1
-        0
-        1
-        0
-        1
-        0
-        1
-        0
-        1
-        0
-
-        >>> Persistent.__foo__ = 1
-        Traceback (most recent call last):
-        ...
-        TypeError: can't set attributes of built-in/extension type """ \
-            """'Persistence.Persistent' if the attribute name begins """ \
-            """and ends with __ and contains only 4 _ characters
-
-        >>> Persistent.__foo__
-        Traceback (most recent call last):
-        ...
-        AttributeError: type object 'Persistence.Persistent' """ \
-            """has no attribute '__foo__'
-
-        >>> del Persistent.__foo__
-        Traceback (most recent call last):
-        ...
-        TypeError: can't set attributes of built-in/extension type """ \
-            """'Persistence.Persistent' if the attribute name begins """ \
-            """and ends with __ and contains only 4 _ characters
-
-        """
-
-def test_class_creation_under_stress():
-    """
-    >>> for i in range(100):
-    ...   class B(Persistent):
-    ...     print i,
-    ...     if i and i%20 == 0:
-    ...         print
-    0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
-    21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
-    41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
-    61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
-    81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
-
-    >>> import gc
-    >>> x = gc.collect()
-
-    """
-
-def print_dict(d):
-    d = d.items()
-    d.sort()
-    print '{%s}' % (', '.join(
-        [('%r: %r' % (k, v)) for (k, v) in d]
-        ))
-
-def cmpattrs(self, other, *attrs):
-    for attr in attrs:
-        if attr[:3] in ('_v_', '_p_'):
-            continue
-        c = cmp(getattr(self, attr, None), getattr(other, attr, None))
-        if c:
-            return c
-    return 0
-
-class Simple(Persistent):
-    def __init__(self, name, **kw):
-        self.__name__ = name
-        self.__dict__.update(kw)
-        self._v_favorite_color = 'blue'
-        self._p_foo = 'bar'
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other, '__class__', *(self.__dict__.keys()))
-
-def test_basic_pickling():
-    """
-    >>> x = Simple('x', aaa=1, bbb='foo')
-
-    >>> print_dict(x.__getstate__())
-    {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'}
-
-    >>> f, (c,), state = x.__reduce__()
-    >>> f.__name__
-    '__newobj__'
-    >>> f.__module__
-    'copy_reg'
-    >>> c.__name__
-    'Simple'
-
-    >>> print_dict(state)
-    {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.__setstate__({'z': 1})
-    >>> x.__dict__
-    {'z': 1}
-
-    """
-
-class Custom(Simple):
-
-    def __new__(cls, x, y):
-        r = Persistent.__new__(cls)
-        r.x, r.y = x, y
-        return r
-
-    def __init__(self, x, y):
-        self.a = 42
-
-    def __getnewargs__(self):
-        return self.x, self.y
-
-    def __getstate__(self):
-        return self.a
-
-    def __setstate__(self, a):
-        self.a = a
-
-
-def test_pickling_w_overrides():
-    """
-    >>> x = Custom('x', 'y')
-    >>> x.a = 99
-
-    >>> (f, (c, ax, ay), a) = x.__reduce__()
-    >>> f.__name__
-    '__newobj__'
-    >>> f.__module__
-    'copy_reg'
-    >>> c.__name__
-    'Custom'
-    >>> ax, ay, a
-    ('x', 'y', 99)
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-class Slotted(Persistent):
-    __slots__ = 's1', 's2', '_p_splat', '_v_eek'
-    def __init__(self, s1, s2):
-        self.s1, self.s2 = s1, s2
-        self._v_eek = 1
-        self._p_splat = 2
-
-class SubSlotted(Slotted):
-    __slots__ = 's3', 's4'
-    def __init__(self, s1, s2, s3):
-        Slotted.__init__(self, s1, s2)
-        self.s3 = s3
-
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other, '__class__', 's1', 's2', 's3', 's4')
-
-
-def test_pickling_w_slots_only():
-    """
-    >>> x = SubSlotted('x', 'y', 'z')
-
-    >>> d, s = x.__getstate__()
-    >>> d
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> d
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-class SubSubSlotted(SubSlotted):
-
-    def __init__(self, s1, s2, s3, **kw):
-        SubSlotted.__init__(self, s1, s2, s3)
-        self.__dict__.update(kw)
-        self._v_favorite_color = 'blue'
-        self._p_foo = 'bar'
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other,
-                        '__class__', 's1', 's2', 's3', 's4',
-                        *(self.__dict__.keys()))
-
-def test_pickling_w_slots():
-    """
-    >>> x = SubSubSlotted('x', 'y', 'z', aaa=1, bbb='foo')
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {'aaa': 1, 'bbb': 'foo'}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {'aaa': 1, 'bbb': 'foo'}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-def test_pickling_w_slots_w_empty_dict():
-    """
-    >>> x = SubSubSlotted('x', 'y', 'z')
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-def test_suite():
-    return DocTestSuite()
diff --git a/branches/bug1734/src/Persistence/tests/test_mapping.py b/branches/bug1734/src/Persistence/tests/test_mapping.py
deleted file mode 100644
index 796048e1..00000000
--- a/branches/bug1734/src/Persistence/tests/test_mapping.py
+++ /dev/null
@@ -1,90 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""
-$Id$
-"""
-import unittest
-from doctest import DocTestSuite
-from Persistence import PersistentMapping
-
-def test_basic_functionality():
-    """
-    >>> m = PersistentMapping({'x': 1}, a=2, b=3)
-    >>> m['name'] = 'bob'
-    >>> m['fred']
-    Traceback (most recent call last):
-    ...
-    KeyError: 'fred'
-    >>> m.get('fred')
-    >>> m.get('fred', 42)
-    42
-    >>> m.get('name', 42)
-    'bob'
-    >>> m.get('name')
-    'bob'
-    >>> m['name']
-    'bob'
-
-    >>> keys = m.keys()
-    >>> keys.sort()
-    >>> keys
-    ['a', 'b', 'name', 'x']
-
-    >>> values = m.values()
-    >>> values.sort()
-    >>> values
-    [1, 2, 3, 'bob']
-
-    >>> items = m.items()
-    >>> items.sort()
-    >>> items
-    [('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)]
-
-    >>> keys = list(m.iterkeys())
-    >>> keys.sort()
-    >>> keys
-    ['a', 'b', 'name', 'x']
-
-    >>> values = list(m.itervalues())
-    >>> values.sort()
-    >>> values
-    [1, 2, 3, 'bob']
-
-    >>> items = list(m.iteritems())
-    >>> items.sort()
-    >>> items
-    [('a', 2), ('b', 3), ('name', 'bob'), ('x', 1)]
-
-    >>> 'name' in m
-    True
-
-    """
-
-def test_old_pickles():
-    """
-    >>> m = PersistentMapping()
-    >>> m.__setstate__({'_container': {'x': 1, 'y': 2}})
-    >>> items = m.items()
-    >>> items.sort()
-    >>> items
-    [('x', 1), ('y', 2)]
-
-    """
-
-def test_suite():
-    return unittest.TestSuite((
-        DocTestSuite(),
-        ))
-
-if __name__ == '__main__': unittest.main()
diff --git a/branches/bug1734/src/ThreadedAsync/LoopCallback.py b/branches/bug1734/src/ThreadedAsync/LoopCallback.py
deleted file mode 100644
index 60c30ae6..00000000
--- a/branches/bug1734/src/ThreadedAsync/LoopCallback.py
+++ /dev/null
@@ -1,192 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Manage the asyncore mainloop in a multi-threaded app
-
-In a multi-threaded application, only a single thread runs the
-asyncore mainloop.  This thread (the "mainloop thread") may not start
-the mainloop before another thread needs to perform an async action
-that requires it.  As a result, other threads need to coordinate with
-the mainloop thread to find out whether the mainloop is running.
-
-This module implements a callback mechanism that allows other threads
-to be notified when the mainloop starts.  A thread calls
-register_loop_callback() to register interest.  When the mainloop
-thread calls loop(), each registered callback will be called with the
-socket map as its first argument.
-"""
-
-import asyncore
-import select
-import thread
-import time
-from errno import EINTR
-
-_loop_lock = thread.allocate_lock()
-_looping = None
-_loop_callbacks = []
-
-def remove_loop_callback(callback):
-    """Remove a callback function registered earlier.
-
-    This is useful if loop() was never called.
-    """
-    for i in range(len(_loop_callbacks)):
-        if _loop_callbacks[i][0] == callback:
-            del _loop_callbacks[i]
-            return
-
-def register_loop_callback(callback, args=(), kw=None):
-    """Register callback function to be called when mainloop starts
-
-    The callable object callback will be invokved when the mainloop
-    starts.  If the mainloop is currently running, the callback will
-    be invoked immediately.
-
-    The callback will be called with a single argument, the mainloop
-    socket map, unless the optional args or kw arguments are used.
-    args defines a tuple of extra arguments to pass after the socket
-    map.  kw defines a dictionary of keyword arguments.
-    """
-    _loop_lock.acquire()
-    try:
-        if _looping is not None:
-            callback(_looping, *args, **(kw or {}))
-        else:
-            _loop_callbacks.append((callback, args, kw))
-    finally:
-        _loop_lock.release()
-
-def remove_loop_callback(callback):
-    """Remove a callback function registered earlier.
-
-    This is useful if loop() was never called.
-    """
-    for i in range(len(_loop_callbacks)):
-        if _loop_callbacks[i][0] == callback:
-            del _loop_callbacks[i]
-            return
-
-def _start_loop(map):
-    _loop_lock.acquire()
-    try:
-        global _looping
-        _looping = map
-        while _loop_callbacks:
-            cb, args, kw = _loop_callbacks.pop()
-            cb(map, *args, **(kw or {}))
-    finally:
-        _loop_lock.release()
-
-def _stop_loop():
-    _loop_lock.acquire()
-    try:
-        global _looping
-        _looping = None
-    finally:
-        _loop_lock.release()
-
-def poll(timeout=0.0, map=None):
-    """A copy of asyncore.poll() with a bug fixed (see comment).
-
-    (asyncore.poll2() and .poll3() don't have this bug.)
-    """
-    if map is None:
-        map = asyncore.socket_map
-    if map:
-        r = []; w = []; e = []
-        for fd, obj in map.items():
-            if obj.readable():
-                r.append(fd)
-            if obj.writable():
-                w.append(fd)
-        if [] == r == w == e:
-            time.sleep(timeout)
-        else:
-            try:
-                r, w, e = select.select(r, w, e, timeout)
-            except select.error, err:
-                if err[0] != EINTR:
-                    raise
-                else:
-                    # This part is missing in asyncore before Python 2.3
-                    return
-
-        for fd in r:
-            obj = map.get(fd)
-            if obj is not None:
-                try:
-                    obj.handle_read_event()
-                except asyncore.ExitNow:
-                    raise asyncore.ExitNow
-                except:
-                    obj.handle_error()
-
-        for fd in w:
-            obj = map.get(fd)
-            if obj is not None:
-                try:
-                    obj.handle_write_event()
-                except asyncore.ExitNow:
-                    raise asyncore.ExitNow
-                except:
-                    obj.handle_error()
-
-def loop(timeout=30.0, use_poll=0, map=None):
-    """Invoke asyncore mainloop
-
-    This function functions like the regular asyncore.loop() function
-    except that it also triggers ThreadedAsync callback functions
-    before starting the loop.
-    """
-    global exit_status
-    exit_status = None
-
-    if use_poll:
-        if hasattr(select, 'poll'):
-            poll_fun = asyncore.poll3
-        else:
-            poll_fun = asyncore.poll2
-    else:
-        poll_fun = poll
-
-    if map is None:
-        map = asyncore.socket_map
-
-    _start_loop(map)
-    while map and exit_status is None:
-        poll_fun(timeout, map)
-    _stop_loop()
-
-
-# This module used to do something evil -- it rebound asyncore.loop to the
-# above loop() function.  What was evil about this is that if you added some
-# debugging to asyncore.loop, you'd spend 6 hours debugging why your debugging
-# code wasn't called!
-#
-# Code should instead explicitly call ThreadedAsync.loop() instead of
-# asyncore.loop().  Most of ZODB has been fixed, but ripping this out may
-# break 3rd party code.  So we'll issue a warning and let it continue -- for
-# now.
-
-##def deprecated_loop(*args, **kws):
-##    import warnings
-##    warnings.warn("""\
-##ThreadedAsync.loop() called through sneaky asyncore.loop() rebinding.
-##You should change your code to call ThreadedAsync.loop() explicitly.""",
-##                  DeprecationWarning)
-##    loop(*args, **kws)
-
-##asyncore.loop = deprecated_loop
-
-asyncore.loop = loop
diff --git a/branches/bug1734/src/ThreadedAsync/__init__.py b/branches/bug1734/src/ThreadedAsync/__init__.py
deleted file mode 100644
index f7378609..00000000
--- a/branches/bug1734/src/ThreadedAsync/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Manage the asyncore mainloop in a multi-threaded app.
-
-$Id$
-"""
-
-from LoopCallback import register_loop_callback, loop, remove_loop_callback
diff --git a/branches/bug1734/src/ZConfig/NEWS.txt b/branches/bug1734/src/ZConfig/NEWS.txt
deleted file mode 100644
index d9ac98e3..00000000
--- a/branches/bug1734/src/ZConfig/NEWS.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-Changes since ZConfig 2.1:
-
-- More documentation has been written.
-
-- Added a timedelta datatype function; the input is the same as for
-  the time-interval datatype, but the resulting value is a
-  datetime.timedelta object.
-
-- Make sure keys specified as attributes of the <default> element are
-  converted by the appropriate key type, and are re-checked for
-  derived sections.
-
-- Refactored the ZConfig.components.logger schema components so that a
-  schema can import just one of the "eventlog" or "logger" sections if
-  desired.  This can be helpful to avoid naming conflicts.
-
-- Added a reopen() method to the logger factories.
-
-- Always use an absolute pathname when opening a FileHandler.
-
-- A fix to the logger 'format' key to allow the %(process)d expansion variable
-  that the logging package supports.
-
-- A new timedelta built-in datatype was added.  Similar to time-interval
-  except that it returns a datetime.timedelta object instead.
-
-Changes since ZConfig 2.0:
-
-- Removed compatibility with Python 2.1 and 2.2.
-
-- Schema components must really be in Python packages; the directory
-  search has been modified to perform an import to locate the package
-  rather than incorrectly implementing the search algorithm.
-
-- The default objects use for section values now provide a method
-  getSectionAttributes(); this returns a list of all the attributes of
-  the section object which store configuration-defined data (including
-  information derived from the schema).
-
-- Default information can now be included in a schema for <key
-  name="+"> and <multikey name="+"> by using <default key="...">.
-
-- More documentation has been added to discuss schema extension.
-
-- Support for a Unicode-free Python has been fixed.
-
-- Derived section types now inherit the datatype of the base type if
-  no datatype is identified explicitly.
-
-- Derived section types can now override the keytype instead of always
-  inheriting from their base type.
-
-- <import package='...'/> makes use of the current prefix if the
-  package name begins witha dot.
-
-- Added two standard datatypes:  dotted-name and dotted-suffix.
-
-- Added two standard schema components: ZConfig.components.basic and
-  ZConfig.components.logger.
-
-Changes since ZConfig 1.0:
-
-- Configurations can import additional schema components using a new
-  "%import" directive; this can be used to integrate 3rd-party
-  components into an application.
-
-- Schemas may be extended using a new "extends" attribute on the
-  <schema> element.
-
-- Better error messages when elements in a schema definition are
-  improperly nested.
-
-- The "zconfig" script can now simply verify that a schema definition
-  is valid, if that's all that's needed.
diff --git a/branches/bug1734/src/ZConfig/PACKAGE.cfg b/branches/bug1734/src/ZConfig/PACKAGE.cfg
deleted file mode 100644
index 4145e089..00000000
--- a/branches/bug1734/src/ZConfig/PACKAGE.cfg
+++ /dev/null
@@ -1,26 +0,0 @@
-# Load the license from an external source, so we don't have to keep a
-# copy of it sitting around:
-<load>
-  LICENSE.txt  http://cvs.zope.org/Zope3/ZopePublicLicense.txt?rev=HEAD
-</load>
-
-# Add a few things to the distribution root.
-<distribution>
-  doc
-  LICENSE.txt
-  NEWS.txt
-  README.txt
-</distribution>
-
-# Specify what is included in the component.
-<collection>
-  # Python modules from the package:
-  *.py
-
-  # Child packages:
-  components
-  tests
-
-  # Other files and directories needed when distutils runs:
-  scripts
-</collection>
diff --git a/branches/bug1734/src/ZConfig/PUBLICATION.cfg b/branches/bug1734/src/ZConfig/PUBLICATION.cfg
deleted file mode 100644
index 8463e20c..00000000
--- a/branches/bug1734/src/ZConfig/PUBLICATION.cfg
+++ /dev/null
@@ -1,31 +0,0 @@
-Metadata-Version: 1.1
-Name: ZConfig
-Summary: Structured Configuration Library
-Home-page: http://www.zope.org/Members/fdrake/zconfig/
-Author: Fred L. Drake, Jr.
-Author-email: fred@zope.com
-License: ZPL 2
-Description: ZConfig is a configuration library intended for general use.  It
-        supports a hierarchical schema-driven configuration model that allows
-        a schema to specify data conversion routines written in Python.
-        ZConfig's model is very different from the model supported by the
-        ConfigParser module found in Python's standard library, and is more
-        suitable to configuration-intensive applications.
-        
-        ZConfig schema are written in an XML-based language and are able to
-        "import" schema components provided by Python packages.  Since
-        components are able to bind to conversion functions provided by Python
-        code in the package (or elsewhere), configuration objects can be
-        arbitrarily complex, with values that have been verified against
-        arbitrary constraints.  This makes it easy for applications to
-        separate configuration support from configuration loading even with
-        configuration data being defined and consumed by a wide range of
-        separate packages.
-Platform: POSIX
-Platform: Windows
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: System Administrators
-Classifier: License :: OSI Approved :: Zope Public License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/branches/bug1734/src/ZConfig/README.txt b/branches/bug1734/src/ZConfig/README.txt
deleted file mode 100644
index 185f236f..00000000
--- a/branches/bug1734/src/ZConfig/README.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-This is ZConfig.
-
-ZConfig is a configuration library intended for general use.  It
-supports a hierarchical schema-driven configuration model that allows
-a schema to specify data conversion routines written in Python.
-ZConfig's model is very different from the model supported by the
-ConfigParser module found in Python's standard library, and is more
-suitable to configuration-intensive applications.
-
-ZConfig schema are written in an XML-based language and are able to
-"import" schema components provided by Python packages.  Since
-components are able to bind to conversion functions provided by Python
-code in the package (or elsewhere), configuration objects can be
-arbitrarily complex, with values that have been verified against
-arbitrary constraints.  This makes it easy for applications to
-separate configuration support from configuration loading even with
-configuration data being defined and consumed by a wide range of
-separate packages.
-
-ZConfig is licensed under the Zope Public License, version 2.1.  See
-the file LICENSE.txt in the distribution for the full license text.
-
-Reference documentation is available in the doc/ directory.
-
-Information on the latest released version of the ZConfig package is
-available at
-
-  http://www.zope.org/Members/fdrake/zconfig/
-
-You may either create an RPM and install this, or install directly from
-the source distribution.
-
-
-Creating RPMS:
-
-  python setup.py bdist_rpm
-
-  If you need to force the Python interpreter to, for example, python2:
-
-    python2 setup.py bdist_rpm --python=python2
-
-
-Installation from the source distribution:
-
-    python setup.py install
-
-  To install to a user's home-dir:
-    python setup.py install --home=<dir>
-
-  To install to another prefix (eg. /usr/local)
-    python setup.py install --prefix=/usr/local
-
-  If you need to force the python interpreter to e.g. python2:
-    python2 setup.py install
-
-  For more information please refer to
-    http://www.python.org/doc/current/inst/inst.html
diff --git a/branches/bug1734/src/ZConfig/SETUP.cfg b/branches/bug1734/src/ZConfig/SETUP.cfg
deleted file mode 100644
index 601c2f40..00000000
--- a/branches/bug1734/src/ZConfig/SETUP.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-# Metadata used by zpkg.
-
-#documentation  doc/zconfig.pdf
-#documentation  doc/schema.dtd
-
-script scripts/zconfig*
diff --git a/branches/bug1734/src/ZConfig/__init__.py b/branches/bug1734/src/ZConfig/__init__.py
deleted file mode 100644
index 0704d19e..00000000
--- a/branches/bug1734/src/ZConfig/__init__.py
+++ /dev/null
@@ -1,128 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Configuration data structures and loader for the ZRS.
-
-$Id: __init__.py,v 1.18 2004/04/15 20:33:32 fdrake Exp $
-"""
-version_info = (2, 2)
-__version__ = ".".join([str(n) for n in version_info])
-
-from ZConfig.loader import loadConfig, loadConfigFile
-from ZConfig.loader import loadSchema, loadSchemaFile
-
-
-class ConfigurationError(Exception):
-    """Base class for ZConfig exceptions."""
-
-    def __init__(self, msg, url=None):
-        self.message = msg
-        self.url = url
-        Exception.__init__(self, msg)
-
-    def __str__(self):
-        return self.message
-
-
-class _ParseError(ConfigurationError):
-    def __init__(self, msg, url, lineno, colno=None):
-        self.lineno = lineno
-        self.colno = colno
-        ConfigurationError.__init__(self, msg, url)
-
-    def __str__(self):
-        s = self.message
-        if self.url:
-            s += "\n("
-        elif (self.lineno, self.colno) != (None, None):
-            s += " ("
-        if self.lineno:
-            s += "line %d" % self.lineno
-            if self.colno is not None:
-                s += ", column %d" % self.colno
-            if self.url:
-                s += " in %s)" % self.url
-            else:
-                s += ")"
-        elif self.url:
-            s += self.url + ")"
-        return s
-
-
-class SchemaError(_ParseError):
-    """Raised when there's an error in the schema itself."""
-
-    def __init__(self, msg, url=None, lineno=None, colno=None):
-        _ParseError.__init__(self, msg, url, lineno, colno)
-
-
-class SchemaResourceError(SchemaError):
-    """Raised when there's an error locating a resource required by the schema.
-    """
-
-    def __init__(self, msg, url=None, lineno=None, colno=None,
-                 path=None, package=None, filename=None):
-        self.filename = filename
-        self.package = package
-        if path is not None:
-            path = path[:]
-        self.path = path
-        SchemaError.__init__(self, msg, url, lineno, colno)
-
-    def __str__(self):
-        s = SchemaError.__str__(self)
-        if self.package is not None:
-            s += "\n  Package name: " + repr(self.package)
-        if self.filename is not None:
-            s += "\n  File name: " + repr(self.filename)
-        if self.package is not None:
-            s += "\n  Package path: " + repr(self.path)
-        return s
-
-
-class ConfigurationSyntaxError(_ParseError):
-    """Raised when there's a syntax error in a configuration file."""
-
-
-class DataConversionError(ConfigurationError, ValueError):
-    """Raised when a data type conversion function raises ValueError."""
-
-    def __init__(self, exception, value, position):
-        ConfigurationError.__init__(self, str(exception))
-        self.exception = exception
-        self.value = value
-        self.lineno, self.colno, self.url = position
-
-    def __str__(self):
-        s = "%s (line %s" % (self.message, self.lineno)
-        if self.colno is not None:
-            s += ", %s" % self.colno
-        if self.url:
-            s += ", in %s)" % self.url
-        else:
-            s += ")"
-        return s
-
-
-class SubstitutionSyntaxError(ConfigurationError):
-    """Raised when interpolation source text contains syntactical errors."""
-
-
-class SubstitutionReplacementError(ConfigurationSyntaxError, LookupError):
-    """Raised when no replacement is available for a reference."""
-
-    def __init__(self, source, name, url=None, lineno=None):
-        self.source = source
-        self.name = name
-        ConfigurationSyntaxError.__init__(
-            self, "no replacement for " + `name`, url, lineno)
diff --git a/branches/bug1734/src/ZConfig/cfgparser.py b/branches/bug1734/src/ZConfig/cfgparser.py
deleted file mode 100644
index ef511275..00000000
--- a/branches/bug1734/src/ZConfig/cfgparser.py
+++ /dev/null
@@ -1,191 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Configuration parser."""
-
-import ZConfig
-import ZConfig.url
-
-from ZConfig.substitution import isname, substitute
-
-
-class ZConfigParser:
-    __metaclass__ = type
-    __slots__ = ('resource', 'context', 'lineno',
-                 'stack', 'defs', 'file', 'url')
-
-    def __init__(self, resource, context, defines=None):
-        self.resource = resource
-        self.context = context
-        self.file = resource.file
-        self.url = resource.url
-        self.lineno = 0
-        self.stack = []   # [(type, name, prevmatcher), ...]
-        if defines is None:
-            defines = {}
-        self.defs = defines
-
-    def nextline(self):
-        line = self.file.readline()
-        if line:
-            self.lineno += 1
-            return False, line.strip()
-        else:
-            return True, None
-
-    def parse(self, section):
-        done, line = self.nextline()
-        while not done:
-            if line[:1] in ("", "#"):
-                # blank line or comment
-                pass
-
-            elif line[:2] == "</":
-                # section end
-                if line[-1] != ">":
-                    self.error("malformed section end")
-                section = self.end_section(section, line[2:-1])
-
-            elif line[0] == "<":
-                # section start
-                if line[-1] != ">":
-                    self.error("malformed section start")
-                section = self.start_section(section, line[1:-1])
-
-            elif line[0] == "%":
-                self.handle_directive(section, line[1:])
-
-            else:
-                self.handle_key_value(section, line)
-
-            done, line = self.nextline()
-
-        if self.stack:
-            self.error("unclosed sections not allowed")
-
-    def start_section(self, section, rest):
-        isempty = rest[-1:] == "/"
-        if isempty:
-            text = rest[:-1].rstrip()
-        else:
-            text = rest.rstrip()
-        # parse section start stuff here
-        m = _section_start_rx.match(text)
-        if not m:
-            self.error("malformed section header")
-        type, name = m.group('type', 'name')
-        type = type.lower()
-        if name:
-            name = name.lower()
-        try:
-            newsect = self.context.startSection(section, type, name)
-        except ZConfig.ConfigurationError, e:
-            self.error(e[0])
-
-        if isempty:
-            self.context.endSection(section, type, name, newsect)
-            return section
-        else:
-            self.stack.append((type, name, section))
-            return newsect
-
-    def end_section(self, section, rest):
-        if not self.stack:
-            self.error("unexpected section end")
-        type = rest.rstrip().lower()
-        opentype, name, prevsection = self.stack.pop()
-        if type != opentype:
-            self.error("unbalanced section end")
-        try:
-            self.context.endSection(
-                prevsection, type, name, section)
-        except ZConfig.ConfigurationError, e:
-            self.error(e[0])
-        return prevsection
-
-    def handle_key_value(self, section, rest):
-        m = _keyvalue_rx.match(rest)
-        if not m:
-            self.error("malformed configuration data")
-        key, value = m.group('key', 'value')
-        if not value:
-            value = ''
-        else:
-            value = self.replace(value)
-        try:
-            section.addValue(key, value, (self.lineno, None, self.url))
-        except ZConfig.ConfigurationError, e:
-            self.error(e[0])
-
-    def handle_directive(self, section, rest):
-        m = _keyvalue_rx.match(rest)
-        if not m:
-            self.error("missing or unrecognized directive")
-        name, arg = m.group('key', 'value')
-        if name not in ("define", "import", "include"):
-            self.error("unknown directive: " + `name`)
-        if not arg:
-            self.error("missing argument to %%%s directive" % name)
-        if name == "include":
-            self.handle_include(section, arg)
-        elif name == "define":
-            self.handle_define(section, arg)
-        elif name == "import":
-            self.handle_import(section, arg)
-        else:
-            assert 0, "unexpected directive for " + `"%" + rest`
-
-    def handle_import(self, section, rest):
-        pkgname = self.replace(rest.strip())
-        self.context.importSchemaComponent(pkgname)
-
-    def handle_include(self, section, rest):
-        rest = self.replace(rest.strip())
-        newurl = ZConfig.url.urljoin(self.url, rest)
-        self.context.includeConfiguration(section, newurl, self.defs)
-
-    def handle_define(self, section, rest):
-        parts = rest.split(None, 1)
-        defname = parts[0].lower()
-        defvalue = ''
-        if len(parts) == 2:
-            defvalue = parts[1]
-        if self.defs.has_key(defname):
-            self.error("cannot redefine " + `defname`)
-        if not isname(defname):
-            self.error("not a substitution legal name: " + `defname`)
-        self.defs[defname] = self.replace(defvalue)
-
-    def replace(self, text):
-        try:
-            return substitute(text, self.defs)
-        except ZConfig.SubstitutionReplacementError, e:
-            e.lineno = self.lineno
-            e.url = self.url
-            raise
-
-    def error(self, message):
-        raise ZConfig.ConfigurationSyntaxError(message, self.url, self.lineno)
-
-
-import re
-# _name_re does not allow "(" or ")" for historical reasons.  Though
-# the restriction could be lifted, there seems no need to do so.
-_name_re = r"[^\s()]+"
-_keyvalue_rx = re.compile(r"(?P<key>%s)\s*(?P<value>[^\s].*)?$"
-                          % _name_re)
-_section_start_rx = re.compile(r"(?P<type>%s)"
-                               r"(?:\s+(?P<name>%s))?"
-                               r"$"
-                               % (_name_re, _name_re))
-del re
diff --git a/branches/bug1734/src/ZConfig/cmdline.py b/branches/bug1734/src/ZConfig/cmdline.py
deleted file mode 100644
index 11389921..00000000
--- a/branches/bug1734/src/ZConfig/cmdline.py
+++ /dev/null
@@ -1,179 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Support for command-line provision of settings.
-
-This module provides an extension of the ConfigLoader class which adds
-a way to add configuration settings from an alternate source.  Each
-setting is described by a string of the form::
-
-    some/path/to/key=value
-"""
-
-import ZConfig
-import ZConfig.loader
-import ZConfig.matcher
-
-
-class ExtendedConfigLoader(ZConfig.loader.ConfigLoader):
-    def __init__(self, schema):
-        ZConfig.loader.ConfigLoader.__init__(self, schema)
-        self.clopts = []   # [(optpath, value, source-position), ...]
-
-    def addOption(self, spec, pos=None):
-        if pos is None:
-            pos = "<command-line option>", -1, -1
-        if "=" not in spec:
-            e = ZConfig.ConfigurationSyntaxError(
-                "invalid configuration specifier", *pos)
-            e.specifier = spec
-            raise e
-        # For now, just add it to the list; not clear that checking
-        # against the schema at this point buys anything.
-        opt, val = spec.split("=", 1)
-        optpath = opt.split("/")
-        if "" in optpath:
-            # // is not allowed in option path
-            e = ZConfig.ConfigurationSyntaxError(
-                "'//' is not allowed in an option path", *pos)
-            e.specifier = spec
-            raise e
-        self.clopts.append((optpath, val, pos))
-
-    def createSchemaMatcher(self):
-        if self.clopts:
-            sm = ExtendedSchemaMatcher(self.schema)
-            sm.set_optionbag(self.cook())
-        else:
-            sm = ZConfig.loader.ConfigLoader.createSchemaMatcher(self)
-        return sm
-
-    def cook(self):
-        if self.clopts:
-            return OptionBag(self.schema, self.schema, self.clopts)
-        else:
-            return None
-
-
-class OptionBag:
-    def __init__(self, schema, sectiontype, options):
-        self.sectiontype = sectiontype
-        self.schema = schema
-        self.keypairs = {}
-        self.sectitems = []
-        self._basic_key = schema.registry.get("basic-key")
-        for item in options:
-            optpath, val, pos = item
-            name = sectiontype.keytype(optpath[0])
-            if len(optpath) == 1:
-                self.add_value(name, val, pos)
-            else:
-                self.sectitems.append(item)
-
-    def basic_key(self, s, pos):
-        try:
-            return self._basic_key(s)
-        except ValueError:
-            raise ZConfig.ConfigurationSyntaxError(
-                "could not convert basic-key value", *pos)
-
-    def add_value(self, name, val, pos):
-        if self.keypairs.has_key(name):
-            L = self.keypairs[name]
-        else:
-            L = []
-            self.keypairs[name] = L
-        L.append((val, pos))
-
-    def has_key(self, name):
-        return self.keypairs.has_key(name)
-
-    def get_key(self, name):
-        """Return a list of (value, pos) items for the key 'name'.
-
-        The returned list may be empty.
-        """
-        L = self.keypairs.get(name)
-        if L:
-            del self.keypairs[name]
-            return L
-        else:
-            return []
-
-    def keys(self):
-        return self.keypairs.keys()
-
-    def get_section_info(self, type, name):
-        L = []  # what pertains to the child section
-        R = []  # what we keep
-        for item in self.sectitems:
-            optpath, val, pos = item
-            s = optpath[0]
-            bk = self.basic_key(s, pos)
-            if name and s.lower() == name:
-                L.append((optpath[1:], val, pos))
-            elif bk == type:
-                L.append((optpath[1:], val, pos))
-            else:
-                R.append(item)
-        if L:
-            self.sectitems[:] = R
-            return OptionBag(self.schema, self.schema.gettype(type), L)
-        else:
-            return None
-
-    def finish(self):
-        if self.sectitems or self.keypairs:
-            raise ZConfig.ConfigurationError(
-                "not all command line options were consumed")
-
-
-class MatcherMixin:
-    def set_optionbag(self, bag):
-        self.optionbag = bag
-
-    def addValue(self, key, value, position):
-        try:
-            realkey = self.type.keytype(key)
-        except ValueError, e:
-            raise ZConfig.DataConversionError(e, key, position)
-        if self.optionbag.has_key(realkey):
-            return
-        ZConfig.matcher.BaseMatcher.addValue(self, key, value, position)
-
-    def createChildMatcher(self, type, name):
-        sm = ZConfig.matcher.BaseMatcher.createChildMatcher(self, type, name)
-        bag = self.optionbag.get_section_info(type.name, name)
-        if bag is not None:
-            sm = ExtendedSectionMatcher(
-                sm.info, sm.type, sm.name, sm.handlers)
-            sm.set_optionbag(bag)
-        return sm
-
-    def finish_optionbag(self):
-        for key in self.optionbag.keys():
-            for val, pos in self.optionbag.get_key(key):
-                ZConfig.matcher.BaseMatcher.addValue(self, key, val, pos)
-        self.optionbag.finish()
-
-
-class ExtendedSectionMatcher(MatcherMixin, ZConfig.matcher.SectionMatcher):
-    def finish(self):
-        self.finish_optionbag()
-        return ZConfig.matcher.SectionMatcher.finish(self)
-
-class ExtendedSchemaMatcher(MatcherMixin, ZConfig.matcher.SchemaMatcher):
-    def finish(self):
-        self.finish_optionbag()
-        return ZConfig.matcher.SchemaMatcher.finish(self)
diff --git a/branches/bug1734/src/ZConfig/components/__init__.py b/branches/bug1734/src/ZConfig/components/__init__.py
deleted file mode 100644
index f8981395..00000000
--- a/branches/bug1734/src/ZConfig/components/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This is a Python package.
diff --git a/branches/bug1734/src/ZConfig/components/basic/__init__.py b/branches/bug1734/src/ZConfig/components/basic/__init__.py
deleted file mode 100644
index f8981395..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This is a Python package.
diff --git a/branches/bug1734/src/ZConfig/components/basic/component.xml b/branches/bug1734/src/ZConfig/components/basic/component.xml
deleted file mode 100644
index 5a2195dc..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/component.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-<component>
-  <description>
-    Convenient loader which causes all the "basic" components to be
-    loaded.
-  </description>
-
-  <import package="ZConfig.components.basic" file="mapping.xml"/>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/basic/mapping.py b/branches/bug1734/src/ZConfig/components/basic/mapping.py
deleted file mode 100644
index 0825aeef..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/mapping.py
+++ /dev/null
@@ -1,18 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Python datatype for the ZConfig.components.basic.mapping section type."""
-
-def mapping(section):
-    return section.mapping
diff --git a/branches/bug1734/src/ZConfig/components/basic/mapping.xml b/branches/bug1734/src/ZConfig/components/basic/mapping.xml
deleted file mode 100644
index 4426fdd1..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/mapping.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<component>
-
-  <sectiontype name="ZConfig.basic.mapping"
-               datatype="ZConfig.components.basic.mapping.mapping">
-    <description>
-      Section that provides a simple mapping implementation.  An
-      application should derive a more specific section type for use
-      in configuration files:
-
-        &lt;import package="ZConfig.components.basic"
-                     file="mapping.xml"
-                     /&gt;
-
-        &lt;sectiontype name="mapping"
-                     extends="ZConfig.basic.mapping"
-                     /&gt;
-
-      If a non-standard keytype is needed, it can be overridden as
-      well:
-
-        &lt;sectiontype name="system-map"
-                     extends="ZConfig.basic.mapping"
-                     keytype="mypkg.datatypes.system_name"
-                     /&gt;
-
-    </description>
-
-    <key name="+"
-         attribute="mapping"
-         required="no"
-         />
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/basic/tests/__init__.py b/branches/bug1734/src/ZConfig/components/basic/tests/__init__.py
deleted file mode 100644
index f8981395..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This is a Python package.
diff --git a/branches/bug1734/src/ZConfig/components/basic/tests/test_mapping.py b/branches/bug1734/src/ZConfig/components/basic/tests/test_mapping.py
deleted file mode 100644
index c116507c..00000000
--- a/branches/bug1734/src/ZConfig/components/basic/tests/test_mapping.py
+++ /dev/null
@@ -1,89 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Tests of the 'basic' section types provided as part of
-ZConfig.components.basic."""
-
-import unittest
-
-from ZConfig.tests import support
-
-
-SIMPLE_SCHEMA = '''\
-<schema>
-  <import package="ZConfig.components.basic" file="mapping.xml" />
-
-  <sectiontype name="dict"
-               extends="ZConfig.basic.mapping" />
-
-  <sectiontype name="intkeys"
-               extends="ZConfig.basic.mapping"
-               keytype="integer" />
-
-  <section name="*"
-           type="dict"
-           attribute="simple_dict" />
-
-  <section name="*"
-           type="intkeys"
-           attribute="int_dict" />
-
-</schema>
-'''
-
-
-class BasicSectionTypeTestCase(support.TestBase):
-    schema = None
-
-    def setUp(self):
-        if self.schema is None:
-            self.__class__.schema = self.load_schema_text(SIMPLE_SCHEMA)
-
-    def test_simple_empty_dict(self):
-        conf = self.load_config_text(self.schema, "<dict/>")
-        self.assertEqual(conf.simple_dict, {})
-        conf = self.load_config_text(self.schema, """\
-            <dict foo>
-            # comment
-            </dict>
-            """)
-        self.assertEqual(conf.simple_dict, {})
-
-    def test_simple_dict(self):
-        conf = self.load_config_text(self.schema, """\
-           <dict foo>
-           key-one value-one
-           key-two value-two
-           </dict>
-           """)
-        L = conf.simple_dict.items()
-        L.sort()
-        self.assertEqual(L, [("key-one", "value-one"),
-                             ("key-two", "value-two")])
-
-    def test_derived_dict(self):
-        conf = self.load_config_text(self.schema, """\
-            <intkeys>
-            1 foo
-            2 bar
-            42 question?
-            </intkeys>
-            """)
-        L = conf.int_dict.items()
-        L.sort()
-        self.assertEqual(L, [(1, "foo"), (2, "bar"), (42, "question?")])
-
-
-def test_suite():
-    return unittest.makeSuite(BasicSectionTypeTestCase)
diff --git a/branches/bug1734/src/ZConfig/components/logger/__init__.py b/branches/bug1734/src/ZConfig/components/logger/__init__.py
deleted file mode 100644
index 442bfc81..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""ZConfig schema component package for logging configuration."""
-
-# Make sure we can't import this if "logging" isn't available; we
-# don't want partial imports to appear to succeed.
-
-try:
-    import logging
-except ImportError:
-    import sys
-    del sys.modules[__name__]
diff --git a/branches/bug1734/src/ZConfig/components/logger/abstract.xml b/branches/bug1734/src/ZConfig/components/logger/abstract.xml
deleted file mode 100644
index 301f43ad..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/abstract.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<component>
-  <description>
-  </description>
-
-  <abstracttype name="ZConfig.logger.handler"/>
-  <abstracttype name="ZConfig.logger.log"/>
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/base-logger.xml b/branches/bug1734/src/ZConfig/components/logger/base-logger.xml
deleted file mode 100644
index b611edb5..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/base-logger.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<component prefix="ZConfig.components.logger.logger">
-
-  <import package="ZConfig.components.logger" file="abstract.xml"/>
-
-  <sectiontype name="ZConfig.logger.base-logger">
-    <description>
-      Base definition for the logger types defined by
-      ZConfig.components.logger.  This exists entirely to provide
-      shared key definitions and documentation.
-    </description>
-
-    <key name="level"
-         datatype="ZConfig.components.logger.datatypes.logging_level"
-         default="info">
-      <description>
-        Verbosity setting for the logger.  Values must be a name of
-        a level, or an integer in the range [0..50].  The names of the
-        levels, in order of increasing verbosity (names on the same
-        line are equivalent):
-
-            critical, fatal
-            error
-            warn, warning
-            info
-            blather
-            debug
-            trace
-            all
-
-        The special name "notset", or the numeric value 0, indicates
-        that the setting for the parent logger should be used.
-
-        It is strongly recommended that names be used rather than
-        numeric values to ensure that configuration files can be
-        deciphered more easily.
-      </description>
-    </key>
-
-    <multisection type="ZConfig.logger.handler"
-                  attribute="handlers" name="*">
-      <description>
-        Handlers to install on this logger.  Each handler describes
-        how logging events should be presented.
-      </description>
-    </multisection>
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/component.xml b/branches/bug1734/src/ZConfig/components/logger/component.xml
deleted file mode 100644
index 4ea9d0eb..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/component.xml
+++ /dev/null
@@ -1,10 +0,0 @@
-<component prefix="ZConfig.components.logger.datatypes">
-  <description>
-  </description>
-
-  <import package="ZConfig.components.logger" file="abstract.xml"/>
-  <import package="ZConfig.components.logger" file="handlers.xml"/>
-  <import package="ZConfig.components.logger" file="logger.xml"/>
-  <import package="ZConfig.components.logger" file="eventlog.xml"/>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/datatypes.py b/branches/bug1734/src/ZConfig/components/logger/datatypes.py
deleted file mode 100644
index 38581154..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/datatypes.py
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""ZConfig datatypes for logging support."""
-
-
-_logging_levels = {
-    "critical": 50,
-    "fatal": 50,
-    "error": 40,
-    "warn": 30,
-    "warning": 30,
-    "info": 20,
-    "blather": 15,
-    "debug": 10,
-    "trace": 5,
-    "all": 1,
-    "notset": 0,
-    }
-
-def logging_level(value):
-    s = str(value).lower()
-    if _logging_levels.has_key(s):
-        return _logging_levels[s]
-    else:
-        v = int(s)
-        if v < 0 or v > 50:
-            raise ValueError("log level not in range: " + `v`)
-        return v
diff --git a/branches/bug1734/src/ZConfig/components/logger/eventlog.xml b/branches/bug1734/src/ZConfig/components/logger/eventlog.xml
deleted file mode 100644
index 696df30a..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/eventlog.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<component prefix="ZConfig.components.logger.logger">
-
-  <import package="ZConfig.components.logger" file="abstract.xml"/>
-  <import package="ZConfig.components.logger" file="base-logger.xml"/>
-
-  <sectiontype name="eventlog"
-               datatype=".EventLogFactory"
-               extends="ZConfig.logger.base-logger"
-               implements="ZConfig.logger.log">
-    <description>
-      Configuration for the root logger.
-    </description>
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/factory.py b/branches/bug1734/src/ZConfig/components/logger/factory.py
deleted file mode 100644
index abc12c55..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/factory.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-_marker = object()
-
-class Factory:
-    """Generic wrapper for instance construction.
-
-    Calling the factory causes the instance to be created if it hasn't
-    already been created, and returns the object.  Calling the factory
-    multiple times returns the same object.
-
-    The instance is created using the factory's create() method, which
-    must be overriden by subclasses.
-    """
-    def __init__(self):
-        self.instance = _marker
-
-    def __call__(self):
-        if self.instance is _marker:
-            self.instance = self.create()
-        return self.instance
-
-    def create(self):
-        raise NotImplementedError("subclasses need to override create()")
diff --git a/branches/bug1734/src/ZConfig/components/logger/handlers.py b/branches/bug1734/src/ZConfig/components/logger/handlers.py
deleted file mode 100644
index ffca9267..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/handlers.py
+++ /dev/null
@@ -1,177 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""ZConfig factory datatypes for log handlers."""
-
-import sys
-
-from ZConfig.components.logger.factory import Factory
-
-
-_log_format_variables = {
-    'name': '',
-    'levelno': '3',
-    'levelname': 'DEBUG',
-    'pathname': 'apath',
-    'filename': 'afile',
-    'module': 'amodule',
-    'lineno': 1,
-    'created': 1.1,
-    'asctime': 'atime',
-    'msecs': 1,
-    'relativeCreated': 1,
-    'thread': 1,
-    'message': 'amessage',
-    'process': 1,
-    }
-
-def log_format(value):
-    value = ctrl_char_insert(value)
-    try:
-        # Make sure the format string uses only names that will be
-        # provided, and has reasonable type flags for each, and does
-        # not expect positional args.
-        value % _log_format_variables
-    except (ValueError, KeyError):
-        raise ValueError, 'Invalid log format string %s' % value
-    return value
-
-_control_char_rewrites = {r'\n': '\n', r'\t': '\t', r'\b': '\b',
-                          r'\f': '\f', r'\r': '\r'}.items()
-
-def ctrl_char_insert(value):
-    for pattern, replacement in _control_char_rewrites:
-        value = value.replace(pattern, replacement)
-    return value
-
-
-class HandlerFactory(Factory):
-    def __init__(self, section):
-        Factory.__init__(self)
-        self.section = section
-
-    def create_loghandler(self):
-        raise NotImplementedError(
-            "subclasses must override create_loghandler()")
-
-    def create(self):
-        import logging
-        logger = self.create_loghandler()
-        logger.setFormatter(logging.Formatter(self.section.format,
-                                              self.section.dateformat))
-        logger.setLevel(self.section.level)
-        return logger
-
-    def getLevel(self):
-        return self.section.level
-
-class FileHandlerFactory(HandlerFactory):
-    def create_loghandler(self):
-        from ZConfig.components.logger import loghandler
-        path = self.section.path
-        if path == "STDERR":
-            handler = loghandler.StreamHandler(sys.stderr)
-        elif path == "STDOUT":
-            handler = loghandler.StreamHandler(sys.stdout)
-        else:
-            handler = loghandler.FileHandler(path)
-        return handler
-
-_syslog_facilities = {
-    "auth": 1,
-    "authpriv": 1,
-    "cron": 1,
-    "daemon": 1,
-    "kern": 1,
-    "lpr": 1,
-    "mail": 1,
-    "news": 1,
-    "security": 1,
-    "syslog": 1,
-    "user": 1,
-    "uucp": 1,
-    "local0": 1,
-    "local1": 1,
-    "local2": 1,
-    "local3": 1,
-    "local4": 1,
-    "local5": 1,
-    "local6": 1,
-    "local7": 1,
-    }
-
-def syslog_facility(value):
-    value = value.lower()
-    if not _syslog_facilities.has_key(value):
-        L = _syslog_facilities.keys()
-        L.sort()
-        raise ValueError("Syslog facility must be one of " + ", ".join(L))
-    return value
-
-class SyslogHandlerFactory(HandlerFactory):
-    def create_loghandler(self):
-        from ZConfig.components.logger import loghandler
-        return loghandler.SysLogHandler(self.section.address.address,
-                                        self.section.facility)
-
-class Win32EventLogFactory(HandlerFactory):
-    def create_loghandler(self):
-        from ZConfig.components.logger import loghandler
-        return loghandler.Win32EventLogHandler(self.section.appname)
-
-def http_handler_url(value):
-    import urlparse
-    scheme, netloc, path, param, query, fragment = urlparse.urlparse(value)
-    if scheme != 'http':
-        raise ValueError, 'url must be an http url'
-    if not netloc:
-        raise ValueError, 'url must specify a location'
-    if not path:
-        raise ValueError, 'url must specify a path'
-    q = []
-    if param:
-        q.append(';')
-        q.append(param)
-    if query:
-        q.append('?')
-        q.append(query)
-    if fragment:
-        q.append('#')
-        q.append(fragment)
-    return (netloc, path + ''.join(q))
-
-def get_or_post(value):
-    value = value.upper()
-    if value not in ('GET', 'POST'):
-        raise ValueError('method must be "GET" or "POST", instead received: '
-                         + repr(value))
-    return value
-
-class HTTPHandlerFactory(HandlerFactory):
-    def create_loghandler(self):
-        from ZConfig.components.logger import loghandler
-        host, selector = self.section.url
-        return loghandler.HTTPHandler(host, selector, self.section.method)
-
-class SMTPHandlerFactory(HandlerFactory):
-    def create_loghandler(self):
-        from ZConfig.components.logger import loghandler
-        host, port = self.section.smtp_server
-        if not port:
-            mailhost = host
-        else:
-            mailhost = host, port
-        return loghandler.SMTPHandler(mailhost,
-                                      self.section.fromaddr,
-                                      self.section.toaddrs,
-                                      self.section.subject)
diff --git a/branches/bug1734/src/ZConfig/components/logger/handlers.xml b/branches/bug1734/src/ZConfig/components/logger/handlers.xml
deleted file mode 100644
index 041c5893..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/handlers.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<component prefix="ZConfig.components.logger.handlers">
-  <description>
-  </description>
-
-  <import package="ZConfig.components.logger" file="abstract.xml"/>
-
-  <sectiontype name="ZConfig.logger.base-log-handler">
-    <description>
-      Base type for most log handlers.  This is cannot be used as a
-      loghandler directly since it doesn't implement the loghandler
-      abstract section type.
-    </description>
-    <key name="dateformat"
-         default="%Y-%m-%dT%H:%M:%S"/>
-    <key name="level"
-         default="notset"
-         datatype="ZConfig.components.logger.datatypes.logging_level"/>
-  </sectiontype>
-
-  <sectiontype name="logfile"
-               datatype=".FileHandlerFactory"
-               implements="ZConfig.logger.handler"
-               extends="ZConfig.logger.base-log-handler">
-    <key name="path" required="yes"/>
-    <key name="format"
-         default="------\n%(asctime)s %(levelname)s %(name)s %(message)s"
-         datatype=".log_format"/>
-  </sectiontype>
-
-  <sectiontype name="syslog"
-               datatype=".SyslogHandlerFactory"
-               implements="ZConfig.logger.handler"
-               extends="ZConfig.logger.base-log-handler">
-    <key name="facility" default="user" datatype=".syslog_facility"/>
-    <key name="address" datatype="socket-address" default="localhost:514"/>
-    <key name="format"
-         default="%(name)s %(message)s"
-         datatype=".log_format"/>
-  </sectiontype>
-
-  <sectiontype name="win32-eventlog"
-               datatype=".Win32EventLogFactory"
-               implements="ZConfig.logger.handler"
-               extends="ZConfig.logger.base-log-handler">
-    <key name="appname" default="Zope"/>
-    <key name="format"
-         default="%(levelname)s %(name)s %(message)s"
-         datatype=".log_format"/>
-  </sectiontype>
-
-  <sectiontype name="http-logger"
-               datatype=".HTTPHandlerFactory"
-               implements="ZConfig.logger.handler"
-               extends="ZConfig.logger.base-log-handler">
-    <key name="url" default="http://localhost/" datatype=".http_handler_url"/>
-    <key name="method" default="GET" datatype=".get_or_post"/>
-    <key name="format"
-         default="%(asctime)s %(levelname)s %(name)s %(message)s"
-         datatype=".log_format"/>
-  </sectiontype>
-
-  <sectiontype name="email-notifier"
-               datatype=".SMTPHandlerFactory"
-               implements="ZConfig.logger.handler"
-               extends="ZConfig.logger.base-log-handler">
-    <key name="from" required="yes" attribute="fromaddr"/>
-    <multikey name="to" required="yes" attribute="toaddrs"/>
-    <key name="subject" default="Message from Zope"/>
-    <key name="smtp-server" default="localhost" datatype="inet-address"/>
-    <key name="format"
-         default="%(asctime)s %(levelname)s %(name)s %(message)s"
-         datatype=".log_format"/>
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/logger.py b/branches/bug1734/src/ZConfig/components/logger/logger.py
deleted file mode 100644
index d0573969..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/logger.py
+++ /dev/null
@@ -1,102 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""ZConfig factory datatypes for loggers."""
-
-from ZConfig.components.logger.factory import Factory
-
-
-class LoggerFactoryBase(Factory):
-    """Base class for logger factories.
-
-    Factory used to create loggers while delaying actual logger
-    instance construction.  We need to do this because we may want to
-    reference a logger before actually instantiating it (for example,
-    to allow the app time to set an effective user).  An instance of
-    this wrapper is a callable which, when called, returns a logger
-    object.
-    """
-
-    def __init__(self, section):
-        Factory.__init__(self)
-        self.level = section.level
-        self.handler_factories = section.handlers
-
-    def create(self):
-        # set the logger up
-        import logging
-        logger = logging.getLogger(self.name)
-        logger.setLevel(self.level)
-        if self.handler_factories:
-            for handler_factory in self.handler_factories:
-                handler = handler_factory()
-                logger.addHandler(handler)
-        else:
-            from ZConfig.components.logger import loghandler
-            logger.addHandler(loghandler.NullHandler())
-        return logger
-
-    def startup(self):
-        # make sure we've instantiated the logger
-        self()
-
-    def getLowestHandlerLevel(self):
-        """Return the lowest log level provided by any configured handler.
-
-        If all handlers and the logger itself have level==NOTSET, this
-        returns NOTSET.
-        """
-        import logging
-        lowest = self.level
-        for factory in self.handler_factories:
-            level = factory.getLevel()
-            if level != logging.NOTSET:
-                if lowest == logging.NOTSET:
-                    lowest = level
-                else:
-                    lowest = min(lowest, level)
-        return lowest
-
-    def reopen(self):
-        """Re-open any handlers for which this is a meaningful operation.
-
-        This only works on handlers on the logger provided by this
-        factory directly; handlers for child loggers are not affected.
-        (This can be considered a bug, but is sufficient at the
-        moment.)
-        """
-        logger = self()
-        for handler in logger.handlers:
-            reopen = getattr(handler, "reopen", None)
-            if reopen is not None and callable(reopen):
-                reopen()
-
-
-class EventLogFactory(LoggerFactoryBase):
-    """Logger factory that returns the root logger."""
-
-    name = None
-
-
-class LoggerFactory(LoggerFactoryBase):
-    """Logger factory that returns the named logger."""
-
-    def __init__(self, section):
-        LoggerFactoryBase.__init__(self, section)
-        self.name = section.name
-        self.propagate = section.propagate
-
-    def create(self):
-        logger = LoggerFactoryBase.create(self)
-        logger.propagate = self.propagate
-        return logger
diff --git a/branches/bug1734/src/ZConfig/components/logger/logger.xml b/branches/bug1734/src/ZConfig/components/logger/logger.xml
deleted file mode 100644
index f41c9ca1..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/logger.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<component prefix="ZConfig.components.logger.logger">
-  <description>
-  </description>
-
-  <import package="ZConfig.components.logger" file="abstract.xml"/>
-  <import package="ZConfig.components.logger" file="base-logger.xml"/>
-  <import package="ZConfig.components.logger" file="eventlog.xml"/>
-
-  <sectiontype name="logger"
-               datatype=".LoggerFactory"
-               extends="ZConfig.logger.base-logger"
-               implements="ZConfig.logger.log">
-    <key name="propagate"
-         datatype="boolean"
-         default="true">
-      <description>
-        Indicates whether events that reach this logger should be
-        propogated toward the root of the logger hierarchy.  If true
-        (the default), events will be passed to the logger's parent
-        after being handled.  If false, events will be handled and the
-        parent will not be informed.  There is not a way to control
-        propogation by the severity of the event.
-      </description>
-    </key>
-
-    <key name="name"
-         datatype="dotted-name"
-         required="yes">
-      <description>
-        The dotted name of the logger.  This give it a location in the
-        logging hierarchy.  Most applications provide a specific set
-        of subsystem names for which logging is meaning; consult the
-        application documentation for the set of names that are
-        actually interesting for the application.
-      </description>
-    </key>
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZConfig/components/logger/loghandler.py b/branches/bug1734/src/ZConfig/components/logger/loghandler.py
deleted file mode 100644
index a0c4041a..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/loghandler.py
+++ /dev/null
@@ -1,71 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Handlers which can plug into a PEP 282 logger."""
-
-import os.path
-import sys
-
-from logging import Handler, StreamHandler
-from logging.handlers import SysLogHandler, BufferingHandler
-from logging.handlers import HTTPHandler, SMTPHandler
-from logging.handlers import NTEventLogHandler as Win32EventLogHandler
-
-
-class FileHandler(StreamHandler):
-    """File handler which supports reopening of logs.
-
-    Re-opening should be used instead of the 'rollover' feature of
-    the FileHandler from the standard library's logging package.
-    """
-
-    def __init__(self, filename, mode="a"):
-        filename = os.path.abspath(filename)
-        StreamHandler.__init__(self, open(filename, mode))
-        self.baseFilename = filename
-        self.mode = mode
-
-    def close(self):
-        self.stream.close()
-
-    def reopen(self):
-        self.close()
-        self.stream = open(self.baseFilename, self.mode)
-
-
-class NullHandler(Handler):
-    """Handler that does nothing."""
-
-    def emit(self, record):
-        pass
-
-    def handle(self, record):
-        pass
-
-
-class StartupHandler(BufferingHandler):
-    """Handler which stores messages in a buffer until later.
-
-    This is useful at startup before we can know that we can safely
-    write to a configuration-specified handler.
-    """
-
-    def __init__(self):
-        BufferingHandler.__init__(self, sys.maxint)
-
-    def shouldFlush(self, record):
-        return False
-
-    def flushBufferTo(self, target):
-        while self.buffer:
-            target.handle(self.buffer.pop(0))
diff --git a/branches/bug1734/src/ZConfig/components/logger/tests/__init__.py b/branches/bug1734/src/ZConfig/components/logger/tests/__init__.py
deleted file mode 100644
index f8981395..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This is a Python package.
diff --git a/branches/bug1734/src/ZConfig/components/logger/tests/test_logger.py b/branches/bug1734/src/ZConfig/components/logger/tests/test_logger.py
deleted file mode 100644
index d186dd17..00000000
--- a/branches/bug1734/src/ZConfig/components/logger/tests/test_logger.py
+++ /dev/null
@@ -1,233 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Tests for logging configuration via ZConfig."""
-
-import cStringIO as StringIO
-import logging
-import sys
-import tempfile
-import unittest
-
-import ZConfig
-
-from ZConfig.components.logger import datatypes
-from ZConfig.components.logger import handlers
-from ZConfig.components.logger import loghandler
-
-
-class LoggingTestBase(unittest.TestCase):
-
-    # XXX This tries to save and restore the state of logging around
-    # the test.  Somewhat surgical; there may be a better way.
-
-    name = None
-
-    def setUp(self):
-        self._old_logger = logging.getLogger(self.name)
-        self._old_level = self._old_logger.level
-        self._old_handlers = self._old_logger.handlers[:]
-        self._old_logger.handlers[:] = []
-        self._old_logger.setLevel(logging.WARN)
-
-    def tearDown(self):
-        for h in self._old_logger.handlers:
-            self._old_logger.removeHandler(h)
-        for h in self._old_handlers:
-            self._old_logger.addHandler(h)
-        self._old_logger.setLevel(self._old_level)
-
-    _schema = None
-
-    def get_schema(self):
-        if self._schema is None:
-            sio = StringIO.StringIO(self._schematext)
-            self.__class__._schema = ZConfig.loadSchemaFile(sio)
-        return self._schema
-
-    def get_config(self, text):
-        conf, handler = ZConfig.loadConfigFile(self.get_schema(),
-                                               StringIO.StringIO(text))
-        self.assert_(not handler)
-        return conf
-
-
-class TestConfig(LoggingTestBase):
-
-    _schematext = """
-      <schema>
-        <import package='ZConfig.components.logger'/>
-        <section type='eventlog' name='*' attribute='eventlog'/>
-      </schema>
-    """
-
-    def test_logging_level(self):
-        # Make sure the expected names are supported; it's not clear
-        # how to check the values in a meaningful way.
-        # Just make sure they're case-insensitive.
-        convert = datatypes.logging_level
-        for name in ["notset", "all", "trace", "debug", "blather",
-                     "info", "warn", "warning", "error", "fatal",
-                     "critical"]:
-            self.assertEqual(convert(name), convert(name.upper()))
-        self.assertRaises(ValueError, convert, "hopefully-not-a-valid-value")
-
-    def test_http_method(self):
-        convert = handlers.get_or_post
-        self.assertEqual(convert("get"), "GET")
-        self.assertEqual(convert("GET"), "GET")
-        self.assertEqual(convert("post"), "POST")
-        self.assertEqual(convert("POST"), "POST")
-        self.assertRaises(ValueError, convert, "")
-        self.assertRaises(ValueError, convert, "foo")
-
-    def test_syslog_facility(self):
-        convert = handlers.syslog_facility
-        for name in ["auth", "authpriv", "cron", "daemon", "kern",
-                     "lpr", "mail", "news", "security", "syslog",
-                     "user", "uucp", "local0", "local1", "local2",
-                     "local3", "local4", "local5", "local6", "local7"]:
-            self.assertEqual(convert(name), name)
-            self.assertEqual(convert(name.upper()), name)
-        self.assertRaises(ValueError, convert, "hopefully-never-a-valid-value")
-
-    def test_config_without_logger(self):
-        conf = self.get_config("")
-        self.assert_(conf.eventlog is None)
-
-    def test_config_without_handlers(self):
-        logger = self.check_simple_logger("<eventlog/>")
-        # Make sure there's a NullHandler, since a warning gets
-        # printed if there are no handlers:
-        self.assertEqual(len(logger.handlers), 1)
-        self.assert_(isinstance(logger.handlers[0],
-                                loghandler.NullHandler))
-
-    def test_with_logfile(self):
-        import os
-        fn = tempfile.mktemp()
-        logger = self.check_simple_logger("<eventlog>\n"
-                                          "  <logfile>\n"
-                                          "    path %s\n"
-                                          "    level debug\n"
-                                          "  </logfile>\n"
-                                          "</eventlog>" % fn)
-        logfile = logger.handlers[0]
-        self.assertEqual(logfile.level, logging.DEBUG)
-        self.assert_(isinstance(logfile, loghandler.FileHandler))
-        logfile.close()
-        os.remove(fn)
-
-    def test_with_stderr(self):
-        self.check_standard_stream("stderr")
-
-    def test_with_stdout(self):
-        self.check_standard_stream("stdout")
-
-    def check_standard_stream(self, name):
-        old_stream = getattr(sys, name)
-        conf = self.get_config("""
-            <eventlog>
-              <logfile>
-                level info
-                path %s
-              </logfile>
-            </eventlog>
-            """ % name.upper())
-        self.assert_(conf.eventlog is not None)
-        # The factory has already been created; make sure it picks up
-        # the stderr we set here when we create the logger and
-        # handlers:
-        sio = StringIO.StringIO()
-        setattr(sys, name, sio)
-        try:
-            logger = conf.eventlog()
-        finally:
-            setattr(sys, name, old_stream)
-        logger.warn("woohoo!")
-        self.assert_(sio.getvalue().find("woohoo!") >= 0)
-
-    def test_with_syslog(self):
-        logger = self.check_simple_logger("<eventlog>\n"
-                                          "  <syslog>\n"
-                                          "    level error\n"
-                                          "    facility local3\n"
-                                          "  </syslog>\n"
-                                          "</eventlog>")
-        syslog = logger.handlers[0]
-        self.assertEqual(syslog.level, logging.ERROR)
-        self.assert_(isinstance(syslog, loghandler.SysLogHandler))
-
-    def test_with_http_logger_localhost(self):
-        logger = self.check_simple_logger("<eventlog>\n"
-                                          "  <http-logger>\n"
-                                          "    level error\n"
-                                          "    method post\n"
-                                          "  </http-logger>\n"
-                                          "</eventlog>")
-        handler = logger.handlers[0]
-        self.assertEqual(handler.host, "localhost")
-        # XXX The "url" attribute of the handler is misnamed; it
-        # really means just the selector portion of the URL.
-        self.assertEqual(handler.url, "/")
-        self.assertEqual(handler.level, logging.ERROR)
-        self.assertEqual(handler.method, "POST")
-        self.assert_(isinstance(handler, loghandler.HTTPHandler))
-
-    def test_with_http_logger_remote_host(self):
-        logger = self.check_simple_logger("<eventlog>\n"
-                                          "  <http-logger>\n"
-                                          "    method get\n"
-                                          "    url http://example.com/log/\n"
-                                          "  </http-logger>\n"
-                                          "</eventlog>")
-        handler = logger.handlers[0]
-        self.assertEqual(handler.host, "example.com")
-        # XXX The "url" attribute of the handler is misnamed; it
-        # really means just the selector portion of the URL.
-        self.assertEqual(handler.url, "/log/")
-        self.assertEqual(handler.level, logging.NOTSET)
-        self.assertEqual(handler.method, "GET")
-        self.assert_(isinstance(handler, loghandler.HTTPHandler))
-
-    def test_with_email_notifier(self):
-        logger = self.check_simple_logger("<eventlog>\n"
-                                          "  <email-notifier>\n"
-                                          "    to sysadmin@example.com\n"
-                                          "    to sa-pager@example.com\n"
-                                          "    from zlog-user@example.com\n"
-                                          "    level fatal\n"
-                                          "  </email-notifier>\n"
-                                          "</eventlog>")
-        handler = logger.handlers[0]
-        self.assertEqual(handler.toaddrs, ["sysadmin@example.com",
-                                           "sa-pager@example.com"])
-        self.assertEqual(handler.fromaddr, "zlog-user@example.com")
-        self.assertEqual(handler.level, logging.FATAL)
-
-    def check_simple_logger(self, text, level=logging.INFO):
-        conf = self.get_config(text)
-        self.assert_(conf.eventlog is not None)
-        self.assertEqual(conf.eventlog.level, level)
-        logger = conf.eventlog()
-        self.assert_(isinstance(logger, logging.Logger))
-        self.assertEqual(len(logger.handlers), 1)
-        return logger
-
-
-def test_suite():
-    return unittest.makeSuite(TestConfig)
-
-if __name__ == '__main__':
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZConfig/datatypes.py b/branches/bug1734/src/ZConfig/datatypes.py
deleted file mode 100644
index 3ad41675..00000000
--- a/branches/bug1734/src/ZConfig/datatypes.py
+++ /dev/null
@@ -1,400 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Selection of standard datatypes for ZConfig."""
-
-import os
-import re
-import sys
-import datetime
-
-# types.StringTypes was added in Python 2.2; basestring in 2.3
-try:
-    unicode
-except NameError:
-    have_unicode = False
-else:
-    have_unicode = True
-
-
-class MemoizedConversion:
-    """Conversion helper that caches the results of expensive conversions."""
-
-    def __init__(self, conversion):
-        self._memo = {}
-        self._conversion = conversion
-
-    def __call__(self, value):
-        try:
-            return self._memo[value]
-        except KeyError:
-            v = self._conversion(value)
-            self._memo[value] = v
-            return v
-
-
-class RangeCheckedConversion:
-    """Conversion helper that range checks another conversion."""
-
-    def __init__(self, conversion, min=None, max=None):
-        self._min = min
-        self._max = max
-        self._conversion = conversion
-
-    def __call__(self, value):
-        v = self._conversion(value)
-        if self._min is not None and v < self._min:
-            raise ValueError("%s is below lower bound (%s)"
-                             % (`v`, `self._min`))
-        if self._max is not None and v > self._max:
-            raise ValueError("%s is above upper bound (%s)"
-                             % (`v`, `self._max`))
-        return v
-
-
-class RegularExpressionConversion:
-    reason = "value did not match regular expression"
-
-    def __init__(self, regex):
-        self._rx = re.compile(regex)
-
-    def __call__(self, value):
-        m = self._rx.match(value)
-        if m and m.group() == value:
-            return value
-        else:
-            raise ValueError("%s: %s" % (self.reason, repr(value)))
-
-
-def check_locale(value):
-    import locale
-    prev = locale.setlocale(locale.LC_ALL)
-    try:
-        try:
-            locale.setlocale(locale.LC_ALL, value)
-        finally:
-            locale.setlocale(locale.LC_ALL, prev)
-    except locale.Error:
-        raise ValueError(
-            'The specified locale "%s" is not supported by your system.\n'
-            'See your operating system documentation for more\n'
-            'information on locale support.' % value)
-    else:
-        return value
-
-
-class BasicKeyConversion(RegularExpressionConversion):
-    def __init__(self):
-        RegularExpressionConversion.__init__(self, "[a-zA-Z][-._a-zA-Z0-9]*")
-
-    def __call__(self, value):
-        value = str(value)
-        return RegularExpressionConversion.__call__(self, value).lower()
-
-
-class ASCIIConversion(RegularExpressionConversion):
-    def __call__(self, value):
-        value = RegularExpressionConversion.__call__(self, value)
-        if have_unicode and isinstance(value, unicode):
-            value = value.encode("ascii")
-        return value
-
-
-_ident_re = "[_a-zA-Z][_a-zA-Z0-9]*"
-
-class IdentifierConversion(ASCIIConversion):
-    reason = "not a valid Python identifier"
-
-    def __init__(self):
-        ASCIIConversion.__init__(self, _ident_re)
-
-
-class DottedNameConversion(ASCIIConversion):
-    reason = "not a valid dotted name"
-
-    def __init__(self):
-        ASCIIConversion.__init__(self,
-                                 r"%s(?:\.%s)*" % (_ident_re, _ident_re))
-
-
-class DottedNameSuffixConversion(ASCIIConversion):
-    reason = "not a valid dotted name or suffix"
-
-    def __init__(self):
-        ASCIIConversion.__init__(self,
-                                 r"(?:%s)(?:\.%s)*|(?:\.%s)+"
-                                 % (_ident_re, _ident_re, _ident_re))
-
-
-def integer(value):
-    try:
-        return int(value)
-    except ValueError:
-        return long(value)
-    except OverflowError:
-        return long(value)
-
-
-def null_conversion(value):
-    return value
-
-
-def asBoolean(s):
-    """Convert a string value to a boolean value."""
-    ss = str(s).lower()
-    if ss in ('yes', 'true', 'on'):
-        return True
-    elif ss in ('no', 'false', 'off'):
-        return False
-    else:
-        raise ValueError("not a valid boolean value: " + repr(s))
-
-
-def string_list(s):
-    """Convert a string to a list of strings using .split()."""
-    return s.split()
-
-
-port_number = RangeCheckedConversion(integer, min=1, max=0xffff).__call__
-
-
-if sys.platform[:3] == "win":
-    DEFAULT_HOST = "localhost"
-else:
-    DEFAULT_HOST = ""
-
-def inet_address(s):
-    # returns (host, port) tuple
-    host = ''
-    port = None
-    if ":" in s:
-        host, s = s.split(":", 1)
-        if s:
-            port = port_number(s)
-        host = host.lower()
-    else:
-        try:
-            port = port_number(s)
-        except ValueError:
-            if len(s.split()) != 1:
-                raise ValueError("not a valid host name: " + repr(s))
-            host = s.lower()
-    if not host:
-        host = DEFAULT_HOST
-    return host, port
-
-
-class SocketAddress:
-    def __init__(self, s):
-        # returns (family, address) tuple
-        import socket
-        if "/" in s or s.find(os.sep) >= 0:
-            self.family = getattr(socket, "AF_UNIX", None)
-            self.address = s
-        else:
-            self.family = socket.AF_INET
-            self.address = inet_address(s)
-
-def float_conversion(v):
-    if isinstance(v, basestring):
-        if v.lower() in ["inf", "-inf", "nan"]:
-            raise ValueError(`v` + " is not a portable float representation")
-    return float(v)
-
-class IpaddrOrHostname(RegularExpressionConversion):
-    def __init__(self):
-        # IP address regex from the Perl Cookbook, Recipe 6.23 (revised ed.)
-        # We allow underscores in hostnames although this is considered
-        # illegal according to RFC1034.
-        expr = (r"(^(\d|[01]?\d\d|2[0-4]\d|25[0-5])\." #ipaddr
-                r"(\d|[01]?\d\d|2[0-4]\d|25[0-5])\." #ipaddr cont'd
-                r"(\d|[01]?\d\d|2[0-4]\d|25[0-5])\." #ipaddr cont'd
-                r"(\d|[01]?\d\d|2[0-4]\d|25[0-5])$)" #ipaddr cont'd
-                r"|([A-Za-z_][-A-Za-z0-9_.]*[-A-Za-z0-9_])") # or hostname
-        RegularExpressionConversion.__init__(self, expr)
-
-    def __call__(self, value):
-        return RegularExpressionConversion.__call__(self, value).lower()
-
-def existing_directory(v):
-    nv = os.path.expanduser(v)
-    if os.path.isdir(nv):
-        return nv
-    raise ValueError, '%s is not an existing directory' % v
-
-def existing_path(v):
-    nv = os.path.expanduser(v)
-    if os.path.exists(nv):
-        return nv
-    raise ValueError, '%s is not an existing path' % v
-
-def existing_file(v):
-    nv = os.path.expanduser(v)
-    if os.path.exists(nv):
-        return nv
-    raise ValueError, '%s is not an existing file' % v
-
-def existing_dirpath(v):
-    nv = os.path.expanduser(v)
-    dir = os.path.dirname(nv)
-    if not dir:
-        # relative pathname with no directory component
-        return nv
-    if os.path.isdir(dir):
-        return nv
-    raise ValueError, ('The directory named as part of the path %s '
-                       'does not exist.' % v)
-
-
-class SuffixMultiplier:
-    # d is a dictionary of suffixes to integer multipliers.  If no suffixes
-    # match, default is the multiplier.  Matches are case insensitive.  Return
-    # values are in the fundamental unit.
-    def __init__(self, d, default=1):
-        self._d = d
-        self._default = default
-        # all keys must be the same size
-        self._keysz = None
-        for k in d.keys():
-            if self._keysz is None:
-                self._keysz = len(k)
-            else:
-                assert self._keysz == len(k)
-
-    def __call__(self, v):
-        v = v.lower()
-        for s, m in self._d.items():
-            if v[-self._keysz:] == s:
-                return int(v[:-self._keysz]) * m
-        return int(v) * self._default
-
-
-def timedelta(s):
-    # Unlike the standard time-interval data type, which returns a float
-    # number of seconds, this datatype takes a wider range of syntax and
-    # returns a datetime.timedelta
-    #
-    # Accepts suffixes:
-    #    w - weeks
-    #    d - days
-    #    h - hours
-    #    m - minutes
-    #    s - seconds
-    #
-    # and all arguments may be integers or floats, positive or negative.
-    # More than one time interval suffix value may appear on the line, but
-    # they should all be separated by spaces, e.g.:
-    #
-    # sleep_time 4w 2d 7h 12m 0.00001s
-    weeks = days = hours = minutes = seconds = 0
-    for part in s.split():
-        val = float(part[:-1])
-        suffix = part[-1]
-        if suffix == 'w':
-            weeks = val
-        elif suffix == 'd':
-            days = val
-        elif suffix == 'h':
-            hours = val
-        elif suffix == 'm':
-            minutes = val
-        elif suffix == 's':
-            seconds = val
-        else:
-            raise TypeError('bad part %s in %s' % (part, s))
-    return datetime.timedelta(weeks=weeks, days=days, hours=hours,
-                              minutes=minutes, seconds=seconds)
-
-
-stock_datatypes = {
-    "boolean":           asBoolean,
-    "dotted-name":       DottedNameConversion(),
-    "dotted-suffix":     DottedNameSuffixConversion(),
-    "identifier":        IdentifierConversion(),
-    "integer":           integer,
-    "float":             float_conversion,
-    "string":            str,
-    "string-list":       string_list,
-    "null":              null_conversion,
-    "locale":            MemoizedConversion(check_locale),
-    "port-number":       port_number,
-    "basic-key":         BasicKeyConversion(),
-    "inet-address":      inet_address,
-    "socket-address":    SocketAddress,
-    "ipaddr-or-hostname":IpaddrOrHostname(),
-    "existing-directory":existing_directory,
-    "existing-path":     existing_path,
-    "existing-file":     existing_file,
-    "existing-dirpath":  existing_dirpath,
-    "byte-size":         SuffixMultiplier({'kb': 1024,
-                                           'mb': 1024*1024,
-                                           'gb': 1024*1024*1024L,
-                                           }),
-    "time-interval":     SuffixMultiplier({'s': 1,
-                                           'm': 60,
-                                           'h': 60*60,
-                                           'd': 60*60*24,
-                                           }),
-    "timedelta":         timedelta,
-    }
-
-
-class Registry:
-    def __init__(self, stock=None):
-        if stock is None:
-            stock = stock_datatypes.copy()
-        self._stock = stock
-        self._other = {}
-        self._basic_key = None
-
-    def get(self, name):
-        if '.' not in name:
-            if self._basic_key is None:
-                self._basic_key = self._other.get("basic-key")
-                if self._basic_key is None:
-                    self._basic_key = self._stock.get("basic-key")
-                if self._basic_key is None:
-                    self._basic_key = stock_datatypes["basic-key"]
-            name = self._basic_key(name)
-        t = self._stock.get(name)
-        if t is None:
-            t = self._other.get(name)
-            if t is None:
-                t = self.search(name)
-        return t
-
-    def register(self, name, conversion):
-        if self._stock.has_key(name):
-            raise ValueError("datatype name conflicts with built-in type: "
-                             + `name`)
-        if self._other.has_key(name):
-            raise ValueError("datatype name already registered: " + `name`)
-        self._other[name] = conversion
-
-    def search(self, name):
-        if not "." in name:
-            raise ValueError("unloadable datatype name: " + `name`)
-        components = name.split('.')
-        start = components[0]
-        g = {}
-        package = __import__(start, g, g)
-        modulenames = [start]
-        for component in components[1:]:
-            modulenames.append(component)
-            try:
-                package = getattr(package, component)
-            except AttributeError:
-                n = '.'.join(modulenames)
-                package = __import__(n, g, g, component)
-        self._other[name] = package
-        return package
diff --git a/branches/bug1734/src/ZConfig/doc/Makefile b/branches/bug1734/src/ZConfig/doc/Makefile
deleted file mode 100644
index 671d436b..00000000
--- a/branches/bug1734/src/ZConfig/doc/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-# Rules to convert the documentation to a single PDF file.
-#
-# PostScript, HTML, and plain text output are also supported, though
-# PDF is the default.
-#
-# See the README.txt file for information on the mkhowto program used
-# to generate the formatted versions of the documentation.
-
-.PHONY:	default all html pdf ps text
-
-default:  pdf
-all:	  html pdf ps text
-
-html:   zconfig/zconfig.html
-pdf:	zconfig.pdf
-ps:	zconfig.ps
-text:   zconfig.txt
-
-zconfig/zconfig.html:  zconfig.tex schema.dtd xmlmarkup.perl
-	mkhowto --html $<
-
-zconfig.pdf:  zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --pdf $<
-
-zconfig.ps:  zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --postscript $<
-
-zconfig.txt: zconfig.tex schema.dtd xmlmarkup.sty
-	mkhowto --text $<
-
-clean:
-	rm -f zconfig.l2h zconfig.l2h~
-
-clobber:  clean
-	rm -f zconfig.pdf zconfig.ps zconfig.txt
-	rm -rf zconfig
diff --git a/branches/bug1734/src/ZConfig/doc/README.txt b/branches/bug1734/src/ZConfig/doc/README.txt
deleted file mode 100644
index 99d2aed0..00000000
--- a/branches/bug1734/src/ZConfig/doc/README.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-The zconfig.tex document in this directory contains the reference
-documentation for the ZConfig package.  This documentation is written
-using the Python LaTeX styles.
-
-To format the documentation, get a copy of the Python documentation
-tools (the Doc/ directory from the Python sources), and create a
-symlink to the tools/mkhowto script from some convenient bin/
-directory.  You will need to have a fairly complete set of
-documentation tools installed on your platform; see
-
-    http://www.python.org/doc/current/doc/doc.html
-
-for more information on the tools.
-
-This documentation requires the latest version of the Python
-documentation tools from CVS.
diff --git a/branches/bug1734/src/ZConfig/doc/schema.dtd b/branches/bug1734/src/ZConfig/doc/schema.dtd
deleted file mode 100644
index 37d2ac3f..00000000
--- a/branches/bug1734/src/ZConfig/doc/schema.dtd
+++ /dev/null
@@ -1,99 +0,0 @@
-<!--
-  *************************************************************************
-  Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE.
-  *************************************************************************
-
-  Please note that not all documents that conform to this DTD are
-  legal ZConfig schema.  The ZConfig reference manual describes many
-  constraints that are important to understanding ZConfig schema.
-  -->
-
-<!-- DTD for ZConfig schema documents. -->
-
-<!ELEMENT schema (description?, metadefault?, example?,
-                  import*,
-                  (sectiontype | abstracttype)*,
-                  (section | key | multisection | multikey)*)>
-<!ATTLIST schema
-          extends    NMTOKEN  #IMPLIED
-          prefix     NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          keytype    NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED>
-
-<!ELEMENT component (description?, (sectiontype | abstracttype)*)>
-<!ATTLIST component
-          prefix     NMTOKEN  #IMPLIED>
-
-<!ELEMENT import EMPTY>
-<!ATTLIST import
-          file       CDATA    #IMPLIED
-          package    NMTOKEN  #IMPLIED
-          src        CDATA    #IMPLIED>
-
-<!ELEMENT description (#PCDATA)*>
-<!ATTLIST description
-          format     NMTOKEN  #IMPLIED>
-
-<!ELEMENT metadefault (#PCDATA)*>
-<!ELEMENT example     (#PCDATA)*>
-
-<!ELEMENT sectiontype (description?, 
-                       (section | key | multisection | multikey)*)>
-<!ATTLIST sectiontype
-          name       NMTOKEN  #REQUIRED
-          prefix     NMTOKEN  #IMPLIED
-          keytype    NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          implements NMTOKEN  #IMPLIED
-          extends    NMTOKEN  #IMPLIED>
-
-<!ELEMENT abstracttype (description?)>
-<!ATTLIST abstracttype
-          name       NMTOKEN  #REQUIRED
-          prefix     NMTOKEN  #IMPLIED>
-
-<!ELEMENT default    (#PCDATA)*>
-<!ATTLIST default
-          key        CDATA    #IMPLIED>
-
-<!ELEMENT key (description?, metadefault?, example?, default*)>
-<!ATTLIST key
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no"
-          default    CDATA    #IMPLIED>
-
-<!ELEMENT multikey (description?, metadefault?, example?, default*)>
-<!ATTLIST multikey
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          datatype   NMTOKEN  #IMPLIED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
-
-<!ELEMENT section (description?)>
-<!ATTLIST section
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          type       NMTOKEN  #REQUIRED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
-
-<!ELEMENT multisection (description?)>
-<!ATTLIST multisection
-          name       CDATA    #REQUIRED
-          attribute  NMTOKEN  #IMPLIED
-          type       NMTOKEN  #REQUIRED
-          handler    NMTOKEN  #IMPLIED
-          required   (yes|no) "no">
diff --git a/branches/bug1734/src/ZConfig/doc/xmlmarkup.perl b/branches/bug1734/src/ZConfig/doc/xmlmarkup.perl
deleted file mode 100644
index 769a17ef..00000000
--- a/branches/bug1734/src/ZConfig/doc/xmlmarkup.perl
+++ /dev/null
@@ -1,59 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-# LaTeX2HTML support for the xmlmarkup package.  Doesn't do indexing.
-
-package main;
-
-
-sub do_cmd_element{
-    local($_) = @_;
-    my $name = next_argument();
-    return "<tt class='element'>$name</tt>" . $_;
-}
-
-sub do_cmd_attribute{
-    local($_) = @_;
-    my $name = next_argument();
-    return "<tt class='attribute'>$name</tt>" . $_;
-}
-
-sub do_env_attributedesc{
-    local($_) = @_;
-    my $name = next_argument();
-    my $valuetype = next_argument();
-    return ("\n<dl class='macrodesc'>"
-            . "\n<dt><b><tt class='macro'>$name</tt></b>"
-            . "&nbsp;&nbsp;&nbsp;($valuetype)"
-            . "\n<dd>"
-            . $_
-            . "</dl>");
-}
-
-sub do_env_elementdesc{
-    local($_) = @_;
-    my $name = next_argument();
-    my $contentmodel = next_argument();
-    return ("\n<dl class='elementdesc'>"
-            . "\n<dt class='start-tag'><tt>&lt;"
-            . "<b class='element'>$name</b>&gt;</tt>"
-            . "\n<dd class='content-model'>$contentmodel"
-            . "\n<dt class='endtag'><tt>&lt;/"
-            . "<b class='element'>$name</b>&gt;</tt>"
-            . "\n<dd class='descrition'>"
-            . $_
-            . "</dl>");
-}
-
-1;				# Must end with this, because Perl is bogus.
diff --git a/branches/bug1734/src/ZConfig/doc/xmlmarkup.sty b/branches/bug1734/src/ZConfig/doc/xmlmarkup.sty
deleted file mode 100644
index 6650f319..00000000
--- a/branches/bug1734/src/ZConfig/doc/xmlmarkup.sty
+++ /dev/null
@@ -1,38 +0,0 @@
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%
-% Copyright (c) 2003 Zope Corporation and Contributors.
-% All Rights Reserved.
-%
-% This software is subject to the provisions of the Zope Public License,
-% Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-% THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-% WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-% WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-% FOR A PARTICULAR PURPOSE.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Define some simple markup for the LaTeX command documentation:
-
-\ProvidesPackage{xmlmarkup}
-\RequirePackage{python}      % fulllineitems environment
-
-\newcommand{\element}[1]{\code{#1}}
-\newcommand{\attribute}[1]{\code{#1}}
-
-% \begin{elementdesc}{type}{content-model}
-\newenvironment{elementdesc}[2]{
-  \begin{fulllineitems}
-    \item[\code{\textless{\bfseries #1}\textgreater}]
-    \code{#2}
-    \item[\code{\textless/{\bfseries #1}\textgreater}]
-    \index{#1 element@\py@idxcode{#1} element}
-    \index{elements!#1@\py@idxcode{#1}}
-}{\end{fulllineitems}}
-
-% \begin{attributedesc}{name}{content-type}
-\newenvironment{attributedesc}[2]{
-  \begin{fulllineitems}
-    \item[\code{\bfseries#1}{\quad(#2)}]
-    \index{#1@\py@idxcode{#1}}
-}{\end{fulllineitems}}
diff --git a/branches/bug1734/src/ZConfig/doc/zconfig.pdf b/branches/bug1734/src/ZConfig/doc/zconfig.pdf
deleted file mode 100644
index 3d0c0849f9d02bf5d8c1f9cfd4b4fa0155d19ae8..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 94001
zcmce-WmsIx)-8-X!JR;30Rl91Hy+%AySrPkAi>=w!6CsRxVyV2xD(t-Ah-q1?Ih>C
zd!K#I{Z96KpXcKTYjr=Xy5^cSYt|T}X3;5%Nw9!`5HtvY4Pb9<4dCNLW0f_tvv9Tq
zfH)!CfS-S8tP)l>&Ss7PRtXy;XEQM~6MIuLGywrLCuc`9BU?1Lg##@e`$bNCpR0=T
z_ZY(%71T`)?6&@dk7qd!IZF=$sVCkL>YUQLOI_}}N`8zmk#e!2eE~wieEKfA$LW5Q
z2OpK~j>bRtI5g0dr+Z{@CXC&l`>kazjk(2lQtz99Kz#MK$40p>__~Xa!(I?DKj%{(
zxixkcxXH^C3KEg7GVJD27SR!5&3_+1r;40FQ)J4P-jp|WuJUa_E7rIF?dSmBQ?+kC
z!!L>HO(J?q-0bewkRfCc@|4M~a1!5l?fmW|Yk+`S>?Fc%IMCAo)ILd^?aw}J=y)2K
zun~o;NX*!8h@-#OqiTdH!YM=`&}5_=&wS-!`gnpMuh$h1qu`>xHS&r5m=umQfJ*oa
zKW;$@SO*5vUo^+}m=)i|jtd!&D%P(Ik6KBrY4I4C(S&Cwr$^S|!<z)KcYH?_Y3#E<
zFe<B`I;lj3A(kwZs|1UU6Lf8MG&F4ep{+x@;_){4qGN1-^6dho8~cTn*qh${$=v+m
zxRSl@Jzq@65{DJZ<Ndd6I2oK9if_W{ARZjHBaZ^Za4X?(NI_p((XAc<sT*}1XPW)t
zF2oo09kk|3C>7+(D=G$X9!&_EP>Z}kb<4`t%=ns-$36KL2_9(UE*1+{Gme5Pr;TOW
z>UsV}TrK&faznkXhHO}V7+%)%()WrDULNn%vJm9EYb9r8D#P!Cd!|*T-s!zHuWo1e
zX>A_a|JIMy@4jaszuIw9v;UD@RO|40+!ONIyen=ltLKDdlaT$lK3^QwwmO9&O2c<<
zUwt{tzglXzv})EOymZquUwbi{Y1@g%hL2F3c+~Adwv)R2_LGnwu|q0Olx5Wf>~P)E
z8+uwBo{(@lZu>NgnMsw!(y_gd%;r8{<&_KIiM2Ge1g_9c1eNKM%}H*vt<B<w4<#3z
z+?c`y^LXyAGFP(O-3n%3bncS8Fu@uxctyJOvJy2I#>|fnZ{Io&!voT&ijG8<Or3=p
z_43=33Ox*hFT9^R2QI1N0RA6hh#;6#ov<sfR!6mJdw8OWyTx>HVnXgi9puSI$9VNE
zCwf{=N8J%#1odTM!^H4ThN8pHN99>k*(stdh+^`<Yu>fMZsxBPtrcKNSJA8xQ(Mqv
zyEf>d(wR0HL8wON>%I8Ng_lsq_we`|XBE7<yRfLO@T=dbh1YfszNvj`Te=tjegu>2
zg4a&@;F{EduVl{kFlF0zhnhW#=*3jHBb-%g%X?3`5}f3_#kZHwYQN}vo|U(~M`4tT
z4xpYDny~jfwnezwREDEkB5Ty8sfNwW7z_qWhx3oqQV^tQqRZ{34x1ZU8zp&5Ns3;G
zY_T0<mu9q`F}`&@T&ioGRpbh*Hy0Q_t{phGa3#=HP*-yszZ{nk-%5Rw{ymmn$cj}o
zpyjhTNAD_9+m`U>KC*M(itNt~_sK`VE%jXnoasp&y<_H7u9EuC!ssK!_Udg>`;F~-
z7uz7qwzoH=<5F!EC^Wg~F5@3e3~z|<2o<vZk&&W9%E@b}T*kvhYeuh%-d8XLf7GVb
zXh#tv{WQHyo0p~TV*vlTyiAnnWg^|<FMuL238oaif1_9Vb61#jxU4J91e5@7_V59r
z8|NvxJNycD+h9k#xR#Hmyz1OReg|*IIhRr}EnYEiJDCeke&n}Bj*L;i8*I!A9XQB&
zJ9e+Od@m43eK}*dNw#}_^x@k|5<d$fexEYAYYPqs;}?^<8AY4r>`zPqIgB6dtu3F*
zX87D2cj_}>?vfzV-8}H&9Z0o=5>Qm9TA(_7xE6N5gd~XRpArEPo91#Wu8U`x6sL<B
z_TRk1PZ;VEM}3WNd>e^dM~_Y!@~Qfuqkii~^Ajs(i>nj<iP28>L(*8)%_`_<-0zi)
zvm6f2*FqsBEOh(?JEG+)DV$*(s`MClEAtZ%rCZ8Zp71OUCHaO9$=&7|YJ?^q7e_<!
z-zc#!T`i_s$zLrOZain%xqwfsW7wFcZ@+a`4GLqjI()GgScLZdoz+?2M_kJ;lFB}B
zZ}k1ch9_hhfeoVxsQBw`ywuU!>-&T{EAA}ZBtGrm@Sb)T@4^#O01-PCuVPP{V7yFs
z__NO05i|o1qtS7FgOB|fq>{c+56N-KlBc;ivEehySmw~%#gj39wOH;r@XcqgnYP*J
z%in0%mCc@eGLhAx6crn1W6)Z=cSBV5VfW0;&h#Ja4fGDWR-u9av}UQgJD34j6^$&+
z&{#$7?VQc*oSgta7ByuwR%J6MdlyF&GbaG(*B5eTrdCEG_HF<jHs}|eATWT72dsw%
zZSDwd2Y_(?dQ8~P&K~+JIsnj*$CLq}pT7qHvH$wS@$2&QVE~Bh*C*~@7oMLN@UM2@
zU+uuZ+JS#P3I6pY_}7z=Ur$1Q8VG>=cr#@H<X1b$uXd1M{pms1UBEva@YAk;uG+sm
z@Y7~ARuOwgQ|NmA^ZnSMZ>DT!;tbGX=imUc@o)n;*dRb22oHdRoejVN0t4AWT+nA!
z0IX`tQosH&SUNj9@UpVHy1D{A>>bR2Cib?hjDPO>hi!kOYc4PV!ov;(v2jBE2LW=h
zfdK4mARq|zqhl9i=bytTYi0KWja3*5LI6Lkd2X+24`3AoFgQSmm5mL|2!KM4KfV1A
zzx+)TxY&RYXxAWa5Ri=n0O8^Rf_{$t|JH;bWAZml;N}GKu(Jao9Na)Kh!X(j-~vK8
zpr1LpfFRDFWAJ;ffc(`JKZ5XoVFx?Z4lZs07|a8NK;r@ejSn#B$4vN5f3W}6jvul8
zH|+Qk6<~HCJM;<$L!*I{2O9JcAcT|s_if<#s|`OU*59z<pJM`o+5wFT5H|$K&Iy9r
z1%<^Bp8p3se$4Fug&k~cKz8WN2k~$Mxj3M9utCR$`}h3;V*86fekP&6VaJcL;Nk{y
zLgN$~qd+i}1c14@fE*Cc-}VIv^j91HezpK}K;It>og<ta&};$udF2Fg|Go`>O&34Y
z@89slKjH$+4xJO6P+xFB;}Yr`=!Js^^8aAQ-<%>K==gJUK*s<A26A$9LFWk@^fL$m
zW(NVG1nu_?`D@Pjxd8ny{PAO6KywzDg9iwP=8PY?gOmGjIOMN6<L3(aH|&7MCy0j|
znn$5A!4A!*KVpN83&;)O_<dh+{v|$tF28@%1}-ij8<+zcmp{h=nm0IrJipe_-^$@2
z?!VgbW9|PNHvA(#L7ePRUqF{M=y*W5p}ygQ0J;Chng;%BzW50~{)Qbt#sj*dgE*kM
z9Ev44p-2dN4`gHi{hamhHvIj30fNRQ7>bd=P?W(Ay@DWYKoI!%a~AloIqN6v`Wt@u
z=XgNpD+t;>6lw54rw0hc4Fx^Fk8;3&O<O-<-`_9;Ivmh!4dMK;vO+C^W-jPL0?k^#
zJvsi(f<MQl1Qc5Tgl7`aY5NP1vPwW_&o4|1y#)h)0b1xSI{*r-f8bc?{g1r$&wFUj
z_=TCFXpU9U(cVPG42n&m;9X1tz^ZEI<_!2V#^v~PDEvd20<&|o{jM@KqGcPsRgd}5
z-eK&Q6Nb#)61Tc+zxV=<wlQaM1&JXAT`{`s9ca`<fu+TB%D_N5_l`HU%?m}5+<`_3
zUtBr*+eYFu*za4w6W4J343s#%m+sj5goi;y!R8ldJ((Gl5661ghhWyoz~!@vn9QL}
za>gq`!S66HKGZ%Doqr7O5nOE;sp56_;Qrjc7us%3wgSeZ_g=-z9`h8q_NjKgm*l3r
zEw`3u*Ozhoc6lvaec7JrVHE*U%?Qu57<xzl{%w*$T_4u0@iH3*Uo7G?4QbZMoH@PC
zRj1eVS9S;~zPEH8x-atPHcVE}#P3oj!-)_b%TLoGeB44|t9$gr)vMovzKd?dMpSKv
zk}>3F6Tx@uQpNzL3@#)ouI%D+M+Y`afjs`#W40h&)v2kk?wLWO2gPo-tdB3GP&$eZ
zjr97nlf^1b_|f?Uz47*S;CmQ2##R%<iEgRvzF+ky2uA9m7To#Gnx7mZDx7ySFY8fs
zb8o-AKR#Kve@|QGMDyj^h}U|}f*3!5O}`cExDF=MclBIP=TRRsm^tR~COjDtBpyG@
zEtNl-wa(wg;N(b+f3OV)hat6^BJf@0yp=Kh+qZLmr8l<E6X3{~IX{1y8sFaW5`8W$
zx1my?+RV!l5q!KdJ#kRw`$=YfE1i^}u^w}YasOgc&AWnBn{O$Mo8-9tpu8*+>HTYR
zgK>#TgcxG6@H4OS=E=4ly=b@%OK&^!XRN!$+*6g4y$N&8bZCdP+Mcx1@`9x(%&M;s
z&zkEmdlu+ACJbSat?K4A)Cbxqd7jVUcqq|r9=Nhe1x;lxt~;A^q_jnmC)Fu>z2GRy
z;ASmGdY%GNDX<Ucwv^#!%8BBVEumu}V}<V{9njC<BfTBn%vG&EG3RaJ&@3mV7Fh1Y
zF(gqaD<6=*kIM<#cJ6lkJP;ESXJ)=6vQp=@MJe(it%8ELGc3vLlA6pZN($&KobMzh
zrq_fI1!4Luma9H4ix*lil)$>gM&JnJR&_x{33?{ZDoyEtN&)imd#$tlReB<)=6m4x
zE=KHG>73L^5wc-#IDdN#Nrj&5@`z_B*NOyL?>wH{FsszKbo;|~lesD(^yrOU0pVtA
zzKwXIP?-wn(wD!`_Zn3wG<nOrOMZY3+0DXG^_yl|Cfhf>Q@*g!_H71P`|Wx9zszuW
z&`$mw`NqvW$#R2T!KE%$7d<flz)B5E!;h1E&lVFS%PxFst%P~_ZHet(>Pvtr94sTP
zmIR%e2H~^WZ7)w_Wqjdz(cY1kxP;OY(PY=CwCjVI=_{GE94{0gXO1v+KXP500FIl|
zCC3$g>B}vF7|iw7&&7dFPiZLkWokGQre<j`B)mSuu@2@Qj^HS2Jn`WqF!4VIEp7@g
zn0sb!g2&y8D~b!p52=q%NJ!>j)8o*OV@>?wUN-eP+=|=;Ot%Nh@;IO%h7M%-nF@9=
z#vcc;bpysz0k83TRk1^8Q#5wVJK(8FO!eR9+Bfr@-qDh!i>q&<_V04Dfe3I`$J5f}
z`Oe?78Zu&jKS{<qq~fA_d{K6`!`|7`1IH3M?3DjFxDAI*pfAEQT%!f+h&KJM&`Mq(
zn<N}|#xZ3F9;V48BgtpP_OX4*YdIe!=HbRy+A`^5#OR4qkd*DwqX~xTip%qZM9mk$
zQTX`Q=5}sGNQ4+m5-es<pJ1M_9m7w*A_L1jrNNOcGF8oMpAs!93O5&#d-~jpy#@>0
zt%wk2!9UNTJ~&T-8dp{C9g89kjJUCvJjC;D+M~099##2Cy0_sKITQ&ywg9A8uX;1_
zriRl?*(+)$#ac0%4I2GUcl<@CzwM}6;jQwib=^f&pQfD2pd#}=kDm@{Zr~ZPmN|Q$
z2<mWHQF!CuWs0M$5y1Unhx}Ptb%Axa-^dhkx}2nKstAkR=V`U=7?eH6Q9ElpA__<?
zlIoq)Hov%m3H>eZGlJrBeU^S(5%ZR1O*Ea8VV#Kat8VrHepqM1(~Bg=<x6^en~YQW
zz<qUNyf1WJCHGhey|iBMZ9^hkw)mG+sTEJqhIFd~)Gdcm2{qRAMi1xHmnp&eXT>Jv
zt^0suK)(a*YH@6D1^VI;jN*fdib6Zyr+aH0UM0z%PjDkI8kk39L=0u;IFYgiJy-lG
z9SuWZ+ehN8p7aX=K!#!;{olcujhClgYL01que;lo7B*%DC<@usBpTsM?p84~c}Vqg
ztEY2=CQNZD{6Zr<7=#N^w`@9dSlPNboJsSfnO&CHQ2XUciFRz0jcnvBUnj9rXfQY~
z;G+cIc?2uRzqGH1!Iqf7X%VTcXIK{@AR2k2=SkJCw2^-(LG9kzf|r?TTN7m8aWH10
z>7Tku3`v^aIU_PJ&aa3X!mKMN-LBU|yev6QXBm3RpIjT|lsqeq&;|$pnRUjfe6`vw
z{|$0)&!_2=m;PuScOsotlRP*@RW1&)jMk<0{q0tJN&TgkpKdeSFOBG3^jy69>Q$p`
zS+Ry(7OmA*mvr~*3s04h{g4aUIClEqK6CHhN|~RQ<@r{-_#A11O6dEubDZt8Flrd@
zF;Q1hDdQRv_C-A%hBB*3OgYin&Y(C|8wH+*u-c3#$$QQ_>ry9Kdl!Xc#j)@k%tw81
z-;=|OrJe7#QbvF75T-eI93u?c0T(Rgj=?mY&TuN*thI(jTFF&ljG2K<;$}}94{);Y
zVIbho(u=#5>gp~k8jiK;EJ64qc@s~cwUi9Mpl%qMM{!t`iiW+_A9HNdWP9E8?EMmE
zP%AU4=MsJ2xg>|vau`BoJB<DZ$XlWS03==1#>4J`9R<&tgXU%!@yyp2FF5m?KKK=H
z$Kp=#;1KTOh*yvHo)Q*rH1LR>a-sHhN%p2$Jer@F{<&(esqRAW*D2O<lLU3`AzRc-
zg_3$x8^T1F>bA<4i1+6M6YcoZTz!$5DPSbSui{pn5+W=3-&|y;1|N3LP4pV?>y3jE
z70L1hyfbXoJGGExt4|lYhU$;HP3^xFGOqZfFLdlWKbKD03#-e!L}NgQVIF9hPOQ;D
zQiHdgex%n0!Ba)tRLoT|=THs!(wJ7ZXVf)_9|WNx??92SNDfM22$Mx#o)sOd_PM(a
zb&Z$C*gJLMiLd7Okx0oM?_3j^Cp5PE%xSY)mX>w+dNT}G@W$(wIfzK|gN@nDda3~f
zFTC(FVVVktA_K4yG|eQ{8s;BuK;Yy<Vzko|UWP@4qT^NBaA5?<hEJZj4D^o4yX(?7
z|MabkVjvaka%#2VEAs6@CPvhVaDtovwaP-pS6ao3@32Pts^0&m@j_X}Us=&#MBbkO
z@n2M4C_DbC`~F>-7s_gXz*UZ4xR&D=NQFv4Kfe4INc#7MM=myQAXImRK*b$yZV-SA
zDy;CZ{l3r!{wsO^Y2)8u1^*x?oIFsH3MH)|_Maq$i;W$~&Gq}bBlxe}{il!qh8a+D
z!pQ|yfq(D;$S-{hsvSUi^zZ9z|7OF_0s0#@fPM&l+)(lNhv@pl7f|2)5Mv=+oIp0N
zpM>#02;rvoCQd*HcV|m`JD|Oz1*?sfF{`b;sf&#n3v|1|+{yyFci?2^2()yzwfVD7
z4t>)<;#Uly^D~~Ilt@!c8vykN7dw=_vvC3JTx@LqCB*(en*Ct-U`}oTCsg$S^ML=Y
zE$5HDgnv5ZFI|EmQ1u470|DWIwuLhFf9v-@lfC~dm-~b6{h1H`55r@XfGQIlzZl_f
zu|3W|3)%myL*V-TI)oWbt>{HgY-k+<8#aEdzog8bdKE(q(g)F%2t9yMJWQS$ulf5P
zrjIZ0j-K^Uxy&!rx-%1Mo)C{-k6>7}n;#F%rBwkUrd+h~VTVp#v?_djb(h;BPhBYR
zt(tw7$UY7%KR+sIwYf_G9i<k^JDy#i@Q(-Y!+oaCJ?mLIK7G#}=4dy{N3qq9RWpCv
zygnRHmf=RE8`i3=Yx$hR%w<6fJRvKYQhzdrJhRw#3gA_`te&d1ZS-c{t}3|qUYuBC
z;9S}_oubT5;+_ANHB$JYLGG<vKitLwXPe^dZN}n=q}v__f~MC2CiYCXH3^s=FuI#g
z6!=oc!+9#GEnp%-v6}Z?Le$IBEgcI*sx$^&Y{1r<J*?V71--e9lyvL%@qm*kfr!bB
zJ=5i!PWx{PhXKNE%#xzYN3dSDMML%FU7{)G7gAU33k&!4REUa1GPP|O9<$o&w9h-5
zl^<Bqqr4C#2IKt(mbyRpu{g$M4T4+lC--&(YzuPU?YrXZ3A$!xMWeGK$UX3W2K#;5
z6(=^;vK&v@n!v3dIY#t$uE56SM$o+W3(7EOAnfQr&K!<F*19aVO*`u|vr7%ide&ox
zdA##TyiBiexHEm|i5wpSch-RmB%^XGWs8qyCpWLh^hNptPQbK0?=&u&SYFV~t0QM@
zxo5>zbS(ad9Mx-hi6hRcnDqQ&c-I3|0E%IbtkCeR`#}6>G5C<NTyb_e?fH;T^!?qe
z1@cjRt~m__$czPLNec8r{3DpPETYm1F(_7;7RY*%IEVPKe6z(k{NDh=0}5rY=@GH`
zj9uDKKe>D>JrmlYRYu<3f5?)km}9@`#m&!@&#7^&k9neMs|!A&73<GC!TA(n9ml?g
zverf92#qz|R?nP(t#n$Zv~1lv;q%*}mtq3Zm_)fz@S}SAzOCE%qSVa?64Nrw-E0gI
z)01B9EG%h=B-8o}-w)J-#(LPpz>mHldx!>lsvk@UAk7jb(QRymFe(IkX`NJU^^MCv
zjxKh5xg0b+yIWFR6pQ{E2xB{5kvj};v)>M1+VYv#X33dYRSb{zJ^)!N2?*~LV6?|+
zd#l#cZ_=c_4Z|e;olG(8{(d<i6TtoDOz~Y1XcV4;;EZiF!ZHKVC~Y`L5~<II@%|I-
zeWD)D`MVh^&I-}bHq3&}4T*i{Nhi#SSW_bOskSBrAc|o7<yT3jxU{|=M`0kM#3qH_
zLp!#oCgse`XDjQ%h8~_{StZeh(TR7$ZnB{@>qV^X+jdLn{5+Cbs3KtzxejvUyYcEs
zc=T@a`10W*&k-=tC124D*N5nYw>GX5$~B22tRha{wut9Hls&$3)u?|cqzv6bnVqH*
zRE0;ZB{Jc8#o_ifIV&XhocyV$R=B*9U_Dn3%ahxp%rh9WCEJ59xx51ohL8~-Cz*ZT
zN`xDH!+`f0cLkoK-QF<R0va+U+WxR-i$cEG8XBdk?cxr4;Kl}ll!Zr*i~FSKlqEyo
zi!hh9riJldGCs2vLoLlq=r@8vMOI+jX}e#MacvlLyt)}|;}W&)k=_tOCBvdy>h5D{
z1>q^#ehi*lyFvLls@2|mHXoM5h8QZPJ+KhAnvJ$$hj0?LF@K96GJy<JH-T2PqD!Q$
zODnE#3l2dx=Jntl7dGG7c;l?Ybc#XVHCuFtzt1E;L<cLaBk!=gzfjzr2Et%P4^|?x
z#dg4`Q{46sIrlNlMFL`Ls?AI}uhw0Nz(;14ZUGOEO?gGi2IRdqFS*g<y4xJGyh6nT
ztcuENKTH(y9<P)+zaWUq%_;*|PA)!vddc5L^v*K{9F;P&vPJJjz!|il#wte;XEBZh
zv$mB#Z5f8_7VB|vX&=^_AHE(5|B_FA{zirjQ5QnGxdyXb9M|7FervcP8=7dQA~P|+
z?jGps>UsP%%T!FWd=7R%oZ~Zu?YThl;<cN9n`SYh&aS2q&3kUh4o3YNoR*wPDG5p?
zH!_~Etf)z#`h33S+GiUy?awQ7>HI#HbzTy~K^zI{hf<``%!xF7nuHpMoqWS03J1v5
zk4}jxC>Eq!S;U;M?odfuGz<vW(CDZmDqbF}Fl2j*iz>{kpB87H?;zvM&6z7zHYf~v
zSBWp{&km=C1j~;ePYPNsnuymZI=p8aY?5g-5PD6R!6{hpR8W~*3<^1WrBP?h<(ntW
zTFFFL58S7ls#3S7)L425;ap;v<VGcCP@63&m>C$eY165?xFtZ(p*F}g8iv!y-}fLW
zXnL$s5;{W(UjXtrbmjg`6+d5{xlfvPUh^8hUkleg7P3RhXP`9sZ#bCq-*=k-f`fl9
zo&SP^q1x}?z`=iPtV5UEe*q9b`~L5k<!5*Q#2tSK#Xt5Be?jSgk%#{P2CRQ*$-jpY
z{=BjJPmI6=-7@{nTE#xi{m4ZL?C*yi-S^l}MN6l2N;vT+Czav+6Cf<KBcXM*0<04y
zab85<uhKugOEmqIyd|1Vo^}O-JcHb(4tzB5Jl?JF9&_qp(4yDc@hH;Vo!Q%6-aBki
zEDcVj`{3cqme8^hcqXD@JB68QyWu|CQqCxUJ>F8GQ+M6y3D4Zc3!j9oRqxMc++eg7
z(#`d?<^4kEk}Fc(iOKGnA7qS#RKIR9-G}>B-|6k;XADsK`fWBD?H7?ICbgV-OuyHK
z0=8abpn=Rxhk0kjbj5TJ0jCPWo9Sxh<UPq7cUOuxa&kIl!-5}>FKC9v9CcPZL$kv)
z<F7i{FN6j8-dEH;weKv}p6M^Y;6xMQ!t5+-KQe!5Be_s6>`CF0Jx<7G?VG>4&VWo>
z^@`nn((FS}t1_pfX%YvPb2``KMpe7x{(f;MJqd2e)wPn^B--=^Jf=e1b``nX#{e$-
zy$*g>H*Ocl@&3fhFs3}m*L^F&MHN_qdz^~^a~$rh7rs*Q`rEVvF=;xAdKc_W_#exZ
z7<<%Nw`4Oor=&BiDtEoBcS&a75jdEtD10oi!;Lw$PnFkFLn@LX?BAP_Cb;wyg^-4C
zyoFm1>qw6?bFbG1XMMV9)<}N-e2Z&dA^s%aLH4q%^CnnI0=XN3+_Dw9r{Mi)G<(8h
zlV_(ERSdXzM?u<6N1=~|8PCXINs#QmYWazFB2bGOT$YLe_nS#9TX#8L2^5z+Q>-%G
z=6LZ@f@Liz3){bD6~V!qxr#S2&`w7D1piw%!Amz-Fm{})b!U8!^G=L|BT8vt68k)4
zo!lVXWxg(RviCI%wke-sgDS!ED{AzhazC62oXIupO-<*$+&AdLn{=ZF&k?V1g(!qN
zQW;=9%n^|5Y>S+%<Y1-DJ^`-r7HK~9o*<kmtTp*#f6WX^@K<5Q;laXrB!(Tz^C}8!
zUkNYC&x)ygBLePSn$d|}butd8mYk98ru?iyq>`-utG8J6S}S4mW{naib<IE?McVgY
z$)3t@UCWC5%}mX7diNzi4DdX8{xO}6MCECet$sJNjb$)=;t*8_RTuMjorxH_7d#A1
zogS-U^=|#Bn<Sw$`g4o4j_<!w(>_U4*5`KA90l%x^@amsd%m@rrmcm23r47XX1&uU
zly5YtS7mF)HJ{b6K<ty>J$Wad2va(Txb41Hn>Xw}71qCM=U<jH4zJs%>X3pM_Z(4a
zP5d3_{9MmVB`M&Yqn=2X*=PTbo)|8Wxld7Q96^X$k()im5f_oomf5?hOl<^51v0a<
zV<E}&LG`=52+Y70cF{qz8~Re}#=CZ_cYv(S98eKs#ECVCBbM;hI9FoxOG<|!0Oi^X
zpsg}EJ+!nd#=dKd2dh{;J^U-aN|%yxyYKW)vvPYo&%Gr;{%InmS=lWbo(8GPZQ$0E
zhTOtaefXSah}^zPoksjt2?Ax8i&=);v7e~}E+yhFKHahLnDcv+`ZOnB#Y93XST!;E
z)neWElo(-#J%*1sJNZTD5^CLYVR9Z<lc{&vX&0%kigL|QRX5@33fPTXD^_(zo_X*3
z=qs8w#dPm5=P^6HY0e~ySQ5K7JwN)$DP7M~?Y=_2+(}N9U&1c+IR2im;yau9@d=B`
zD%~*|`p5H%X`0@rVK*NbHa+bk9@itInK?%aDNdH`D!JU|Q)ZL-&H6|y3v~)7@9|KT
z^;$kBagDBSf!}M{!^miaUr%LWc%;C1UMAHw_qE~>xV4(wSh*>4RAsn_)(7q|CrW4g
z>Qul0M90LH)}|NhT!KsM<*c1gom68(<U6RhMXoABPI~I!lW@mgL-jQ6VF|0)I3Q;d
zI}1BdbjrIU++;vGdQ9^DV?c9@|C4wmk@cmHw$+{L#M?N}BEl8Bh?au6Z`2}BZFu`3
zhIR{%vy}NM$**5>SKJEY9?jc?i$4_9wNp{^kEh)}@4A1#=891l&De)gI*Etpq|&PZ
z(Jf06p&aFkYP+Ys_5cLU9o_DIB~2z(gcCT}9*t#vi2sK3UG$FGcuw<X=>zBXE!?B4
zhGSo{fs<LRZp+qy@RWF_7-S(IESE=c7%KS1?MEpJg{3!#?)(<bUNQ;DpBCT@33osa
zv|IC?!Es+b9d7L4d4+K?`NK%}f~g!16~@6|)=gKv0}Rx<ubl!fD)@>Xn(-u%MV^Lt
zoU+h3id!25*W4LtzaI2@eqvlOotV#xQOWs&iTcJF20wiv!$bT9ijsASgSo&5nBd$m
zH0sQ%@6;9eUBQYxE`7^5QK4_<sK&dzzLtH0*SwiT(WT33$GdPlHS|z5<t3(b(laKN
zG2W%~&7gAH#o_VP-(;d}COdq{D*4QMJ0$-uAMdvD1k%eq@Q%}k`Bg(ln3-SlbUUd5
z{$AD#moxQlHm^f(aQhls$t*0Iqiu`Pt?~eswe(FDV`zDiCA+?uz4YDQ-15g=g*P;k
zcy0{t?a`~`-?hBIeJ+bkZ1AZ$U1G!ArziF-&otY&CV)+_(k(yK@QsSg-?9+Y5n&$&
zNE0e2bkiWOSyx$#ZX|;gQpnPc*B&sw+}5bw8=Q}}8~!`Y`7`wRk2Rgp60x74=8vLI
z&YzplXso{{_x%ax{250;brPtg@<ZwSZ_>=q?*4y(IZ$=z4;1##jc;yf$>(pvoJ6hL
z=*@bZ2k-A_-(zxC3`fGKFRTLyt$?sVNi_Bb@j(4Yg*Fo}E=@m7Y+hoVM;~EW6o%$`
z;ztI5BaiEOXWyQ^{lrhONvd0p8Xl+6)QNlb{^I1Ygi^KfgBW4S$mRs52eZCCrNWL6
zkwjO0o4VdZ#>au?Cf~DfG{ceuBrgFu%v)Bz48s%ZA<yNutXUE!`99YBl5XX)QTOAt
z&=M*&fBGQSo2@TB<wz~p1q)76-`;&GjJj`0&nhd1Psg`&5rGj)+kUI3e^~++i8TXZ
zdDQUD$cI!(n$~_Qk}RZ6F9fPCx?UO8y2NFZ+U;*T;>8B(l~R}z<35cw^%-oke}>y`
zQN*LBbLMGZ3}1mE(`0Ye?Adf6J0)N!TF>$|{-7|<?yML<;;q&hBf9)H6<Ei=tMOEo
zgLUbZ`_x3M#bSD4tm}HOFQyrR-Al6yR6Z@w4_Jq84pXsVxUJTV)eMfvqEidOu(HqI
z%_>fe<m7DIzW}<&yYj8gPg^iErisgOB&2+B)=k*GEMqL(x@=?HVw*Gstw?;o(9?T-
zFcm>36Kmb~UQrKh>LPdPKB!YuqxPbB_7&m5+HzY|y@Zf!egmt9tT0m5xN?Q|)--XB
zjWdnF*{4_Oj1-bhq_WD$6S6IYKv^k1H!b7vVPu`OjmoYx*5$;7h4QKyJR3EOFPC(>
zo3SB$a*vd|J`fP26{m{F+R6tDX4(u~jUHIykM*k#ZH8(wtUVDtMAPntpUxUhmPRP}
z9)v_UpW#|ik&J6hRimKS?M$YhN9B$}F{K__&8ms)9(>b0xm?IjXvNo(QuGGfWIv+4
zznYE)X|bD6@kEf{a@+_;Rr>Z)zgw{w(_sp%9q%d>{(N2Fm5O3Oxr|+^dgC%;R9kID
zN9<XVTcajJQ)IBckq>%6rHHRGXoabf)iqnV3BE=9Fm>X3+_c&+4Y7d7rb~WuNzBLO
zRS&LVXq9~ZDlbP%d;MW*;Of$fUzsvaR(2jsS2s%;tZkT|s<5VWdXM{nC-*b~@Y^(e
zt7a?0E^5J&j_W8(rS=Con&PJ7K(<!j1zu(uw74kW(QIQBtj`UjMzZ>KE0|8NCcc~)
z?m%)1`O^v|NaNSnjBKnC(R+0?DKLXHRvKI1)fF_eqCdr}746zjvtDs@%^tl&O?q`2
z$pZ(c8dUvs;T4C^W>L7|j0sPaVM|bd@Mx8%c|TjB>pBeGv#8J{|F}l81^AW9<=aGt
zI3d`99-pIj-`03pp9J~fSz`Ar?4UPrh;a9M+-q2z_U~}>WTbm>glq5H4{YXISXP+<
zW=J4}Dp)$F&Bbibf=$p@?<k-2{Tx=1HiKfnD>{cbmvz7mL<^M=`L0V^KGT<VGkq8?
z8!p<#fcYg5mqg5J?}0{Ua|YN8o3&Lav1N}OO5QBMYR~z>(^)e-?_=YB#Jv7m<Xr0#
zwL>=`#wU`=6QMjGIb_F-|DD|{x1(B0MKNaa(&_Jc$T}}85l3OfoDXfk3a)+d{xsq(
z5sjhr!TD9VrYj8MDr37yVmfkU)blTwlhbQXWt7HDH?Q=aBXOn^GjQ&mj&VXCzhkOb
z<r8`t?m6X6Wgd8hV=jtCqZrY?xa~7z4hLsPbb%4FL6uJg$*O8JZpqK4#P+a$<O|Gd
z9UDuYeBLEIz%^mRAsj1AJspGE$}yK1MuGCUSWB!RxMw0H++}0qA_A2W8z3l529C{N
zc{7U(K*eW(wQfRJ84Q|cMqKC<7AhS~453R~f$7e%;Z7$z?i<YNwuzg_vSB`?HjFEB
zj-Tca^W*uB;2oI1uFjSdBE>6AVIdkNFn+S<0N(W~LY~Lm(t8&n*p*%I$xx=6VTZkR
zYZr-!CA2J!WTZkcu<fDui7MqyO%X3==Htk;kbENdP$eBvw>JGWE9(!BazTlMWSUJ`
zDK(~YMikFjk-U4TV~o5>_b$@D3%U5xecJMx7u0zt{Ti}KkN)+>gYvl!%oW3HV)V4(
z_bOn+{sV-0DW|;tcpA>eo=Ji9=F?`}qp|CFer^<bz<8it$tVS2JA{^_bGoN49-YpK
zC}6MaB_hMuru8(}+$UTm`SzpT&k&GQpI%LMElavOF!N_(MAj1I<|Ow!>!-D7(!Y@v
zi++-WD|88abLIX5>)A#X(?bb3RxCb&IlTX@iGKs>gKQ^(DXqoL#$w#UYEl%EcEvku
zs{zB4QNW^?d#-Rs-4_<UbQV-rc&r|H55G4ab|+So4DQ?K*=T@ccd@6|!$+_7rIZjD
zgEjC1!64&=rHPch4?MP=+IlD5fVl$=-tq>P!c-U7>O+^#FZxs9Pr@)=h%<biM<sy;
zPr49<t}<+A`H7hlZ<E##CFvlwWR2?5g|PG=WPH(cU>>$Ve(~>-%#2Kzb1mlS#b$jM
zgvL2y$tFjX^Qn*Rxe(EN%o{dwFSyfEURJwY9LsPVBP|k?%_3aXEe~l^FqI!c9Mxsw
z0B`5`k<2EHjXQ&wlGxX;PY6XwO&teLD-ouQ76Gs<`c9t*C~Dy>`=z5~CljU*SJ2nc
zmnh3ln>?~^s(>=qFOfr&?i1U2yvaY*6SW&*?<Jr=W)6GWa})B2Q;+=;6=0VDE&f6>
zm8z+eUObicU~QqoX+^1iJ8qIJe)aJx@90Z&|6x_jk?w3>fK73)jUsh)@F$;dJ(h1(
z0V)%Wl<t!peR7`*oXpRj;|j34Vlv({JlTHxMy^(*Sr4`3=?8La#LPz}D@5Z<^ey&x
zo7mWirjL9Dc<wGN9+g25%TJl{`P6KD>hngZObz(q8bgH%Q-Xz3V?$J?(BCYndbTPA
z9<0T%KIx6|X06CL9-;Bwpi1gXlXd60+(y3UI1#r06vQ+5rFCxPI>b}G>B6(x6U+E|
z`s`hY6wlcL>h-sf3VIS`+Rnpjo-bM`O0kkAlph~jYY~KqVi7*@`Mf^7PDal0^$X&`
zMg!o5*|g7KhhM6Bwol7l(am9-=xiFMeMKVs-Xg;4DCtW!c<O*3a2f%F60e06uXo!i
z)#QT|ViM}#@R)4@Rj(Kc1Zv^A;gZ3WtGk;y#btXZBe09Rkt&0-`z(Cw8gGB8_&Lf}
zX%LHL)gm*Vzr4UemHoGBO$u9jju83tL&@GPv>pDS0Asp-G38u;F80=D0yLy-m#`j5
zAJ>QLw)qRWroddlvhmJtd)U@n7@&&t8dxgrjf_+`HL8?K(r1!Z-$+)9lt7pyT?vd;
z-@8+lXZEntmW5(y>vkS`-MAfLhE?ymxXAYpMmY?Orrw-1F(7%Z#tM9{A^_BF^UG!^
z#5U5HO@5T|SZnQ~8yrFezQA$}=1Jh$UIYrj>d|Qf2O7Kz=~|E2PzPvH`@hwH!6Re$
zG)CliQPVGqwQ}I*7aN*%FKL%H;)&hFaiA!WpCA1?iO=+0q_<&4GLA5$T-2|hpd)p{
zZBUNp$n~p8$t#K=>XGT`(NKp5c4wF%Rqy(2U+O78lb{9UjrBTV1p_j}rmr<&Ex0eM
zHwa0`>Cr6~^85(JGI~YAO`av}h_$`X?C3}C0!WBF>eo{?8=E>E%Bww(B<|=&6nKHQ
z!|Dp#aoXf_Gr{;uB7j0g<B*G`ehu)Dg_1&Cw&T&cf&A+1Wx6;uY8y$@`Th<pVX3l(
z-${%R`KW8hYMT$9O+<^~+2&(fG_7X$ehn?E8XKqfo&n9L$*vWL4+4YZFBzPfiLmN~
z&Y=HdS3AGMe0|3>_-~wo>(6_d|B+MtgxdciopSv=kM+Bpg7tq+igEp@R{hs%fuCRf
zlP~<vAuVWuKRY}0kQTI{n;Tk^@bi!sCpVCr?Y9ctZH$b~Y*_xWHLd+?ZyM<2V(jE>
z<?P~YWpDSFvUBJWus>)9lm-3t)_-R~Tz{^C_)9Y|H?%w-s`>udWafgt{$Dqlx&EwW
z|F!Lp3JD$_=#dlXv9$lt^v~A*Nn`$FANtR7^MBNs|Ftq11TDt+2k-k+mGW=t(9mN0
zKRow81nu{A=oxLT=*@l{pG<@721)-uGXp+jnl5u&_?K(w&m{fkeX?k+dzr;Q@`0ZT
z`Lvca7faO-^1P~%#2$uk(HS|cDBl0p%xz52ovDc$ix56tq8TJyxH0qKb+)^hs$sy?
zBpt}ET(bB+-QDw>Rof6(cj-|?@v=?co7pRI*BtebXgBW<sjoGOF4iG?t_5kN-xnln
zMn4J^e7SgeHgv#MDFwEpPI#pJ?pQtfi6G&T`z%6)G#s6BhO<Ee9ZTb8@Os?=UB#m&
zQAP4Ep`1qnBkI9lZdm!^)EI{0J2LB{YL^2M3hvVCrpHM2o0%7Yf{HDLpTkc_5I#j{
zA|5|w7mFb^$=*47<~K8nILSM{fSTZ<r}C8(tb3*R23IV_h4{k-m7twi<LFIw59~T4
zLGwU!cKC9UrPZKLw9nF<1$Uo|Oq+>Hrqy00W(i!#LfZ`do*dcaBDWF-US4WgcUe92
zxav2IXsHjT%bR5Lm63+;M@G?~M#TYMYte_64mF3$v}ZLFGbdYkhX4+m6BnuW3xm;*
ziGe0MrabctpJJWGwu(rQpIG;otJTm4UZ8oli@OXMYrXc{!LS;&mIz}A^B(CWoo!Dr
z7gaIWn_-7a&N#EIq(P4Ih&}DXon$J3eyivfb{-pM;g2>3D`$%-rrD;LI-|tEPt~6)
z&KFg_5oXFf8}5h6(;DM${EShA66CyizbU8pII=~Yo|x0-%v)ocpiKYh0aZ^sv+XfQ
ze7r~^T&~oH6}~J+I=kGQcU`ENGm5qU*$1*?eQs=9YVdZ#7&%#~wIz>AeYy%X*H>(!
z5F_{Eji2Dim+kFLO5-5Ms*Y)wqjq7ZO;Mg^_9&_&O24)!4bl{x<`?F^vVG1{T`dz1
z`^vz18y@p)u@nb2<9(j#Yq8OO!kn^4Ib9eT2*p8XAhHYDLF>T&#P!V>h?h2FhFBjh
zTAV687_Sq4?wQ$4c2E>(RSiMvbLTyIii6z%ngdUy{}j{ZqU&e$>mHGhl7Vj3Z#6rU
z_5k5C_=_!f>o~8ylx?&{^0E0E><I4&nCVEE^A0=18fJ(g1)ZwceP0sO0Jve9Xq(rA
zEWr*V*-8>2qBL*xp2>;!5W~nX(LN7qcy%ZLswpA(;+d=_$GxP;BJJWN>p*jbTU;Go
z>N0}{$?nx#+bFm&#E#lAx2Lw|X`xupv<7YGOe>#6kb6|iD=L250-)MW@3LTOu{~`*
zwY!~adtHfju>1Cy>iUz<$dr^Pah$Lx3nnjr*JmEjEKnpl(&=T~Q6xq^P(}j2oBTE|
z|0PlwJ;k%Sr*v@fkJ{Z}!VsGu;^3{qnzJEH$JK}}0jjy7eZWua<2Hu7EXcF8n6;;`
z^Yc*)nhz*IohY~+HJ&9n!@g7?WI7CbSxZ?+aiu$2hM~CA+alCj_t*<jXPnr@G{+7$
z3X?tokh+c8IZVIyv;$y}C^0VDaC#_erZ+478&7`<l2KtnRHCJ_Wv9VwqN1|&+y#p&
zG<HiZo8q^9)+{{1Ur;0Eko#d+qWkjS=n9z_Rx#ch7ANBci7i#(9WvgK4s77uM2zzc
zh?EL1@X#?ADQGH>U~q013Zb$n3FqA7`;J)qX8=mfEX<`#aGxi{`AhYwNRxrF+#IPf
zEyVcVb}Vp}dK9(E8mSe+urcjS*ucF>vE<0s<~4mJ(-@{hcgC=jq(jAp49@1m<SC}*
zIhUm0aEt8x_PkZ;rd8<_Bx}$te;&aj$Q-AoXFZ!oPEzK4A`5>1xSVjRA$(fG$!ln>
zm%2#J3V(2ZsIipn#wY`eP#MIQV;aVxUikJX;&2q+uA$~_6J!BJwrL}R?G!@;Kjzki
zCZi79_9$*hRmr+f73b2V7=`6I0dhu|CAk7QLJCd9h7jYKa2WTlU7ey3-^j)G4vkw*
z5Te+yt}GvkcGqcqw(&7HQRH{{n}i@?(i_5aQ4_Um3Phtva6x^%DewC2Na-g_LCjk6
zu%Z{S1?F-ZrgSck47F(5lZXmnV2)}Z%jOH0Hxt{I0FP5gjNBTOl-U{}J~rt#AA`^(
z?$~%O<T+VF<E{yNAx!9R#=Ql4jYVco)=y34E3>_Nduf;EYlo&x5hBGiOlA4aeCnMs
zi2B4VGapm&n%Q2oyzca~8E|zUCEkB6w~^G#*|)yd?~27Nx@}q<v#}A=FvTEQueuQ~
zYnV&vocUA>letTUH?;$M>P4J?No6NB?h>ulI_r);F1#n%QS?DOBk*#PPY27Pzn|`{
zAW$*~&oHrl_2GDDXs)4xs8WjGDTH-LM!u>oMHIQ=%_foPoW?l4$1>^TxATI5tjhtG
zccN_lDa5Ia7rK5yOz`5llCKdfWma{qH5N!ECZ>&e^+M;<sjs<Raf^Ip2aB*B3^XPZ
zixvZPS|94|NbFuqCB{xABq^>fhvDiGBR|2-01|`Dsu5|BVx;awqQbGAT!YkfJmA`M
zvY&+X=WnlG739>4C)4V7>{K}&_>`6O(FVO-Z6!|D?KZR5CtTZ(vxpj2K|FmjsQJ+%
z$rZ0<-FSeum&LSXd`LwoZB*+jfbU|tp@F!z{rkSg>gys2I3xTA93OkS21aTSWF?h2
z8IUDDw-t1>g-RO=zYR!Jo2v0cXdPQ>x!P=*V)8ZnfOs^2tlRFSFw1K(^6UW<JlEIt
zA(Wu3p)on1v{f47HHEB(HC9Z9YdMHYNG!U7@d$G{ir(U!o4_vs*JcobkTld|^}U*@
zt`d2ZWc<NERe8CLCq0byxB8Bjq3buM-Jr-vi{UQu-;IhC;kguFU$77m3ybG=*eXjg
z66ddaxp>knJttc8K^oKUGYrZKyT9I<OU?8_)d)>>xEPTmE@anQ?opWsufNkNtv}s4
z7>VvNRsw9GguRh^VqkxMw?9HwYg&*~p;X?bTQFa*s=gY)@gy2v2Pf<7H9n*6Ey0zy
zDgIsq@x~;MHaOIr=7a+qk;oL~wN3)_jTmVJ5;0hh4u&SB8wR3AMLbXfJEryoS*$zw
zVoWNw1zfVCRD%#`pI!ymM57Q$QGZ0ro<3BhH`w(OLt<@r2Fb?Z6_^%Tf*c-aSLKE2
zmq>&7j)N4R)eAYF#^v6?TV@>8=8y8hmMvEb9|3}G%dCUP`NP_cxRXjz8;ZWbgaJ1(
z>p>-PeBwL<k%7~zPuI#jXML?L54Z&I9t3pXnVw!6RTi+z2Co%N2A(dU*Vk53%JVF|
z>FTJ<e8bp%rl|L6XrFf8^Mub$nuXPTY`1mj(e=bE*~8fU+1a>Y41Y&IY2l@0@?)Xu
zk_4u;1(GTI+$q**%?n6~U|cs-!Ag?*(>)Fz7&{w1ZbuFuuk>#z-Emv(U$L6@v9n1X
zOk0|d+?GjQTX$c2eBD3G8yM!NCs{FbGbIge9ZH3q;eTGuBtIZS^AauV*2=0GE#6VJ
zj=UK_j(+n^jd}H0FnGqiWmfrZl?X(SjZnC>7tg%K;WA$I`;>2wo1jQQ(S?MO;qc6i
zJ%htK&oNK4t!~WAQiX#0pd02q10W%zSqD*j?eTdWR)Vp*2lxp}p8hw7(EiX$21$wv
zfsa7Rs60B0IRHB(`N{V>R?<5L0YSURpM24%BfXRsOlE+Ty92!wMZzMg;oalb9^4}2
z-?$Ls*!z^X#|H*TmdYD5xbI+Uls<H1D#cTM3Uon0UGq%3+hfq8Z8tf7H$FlF3TH#!
z@Fgf5Q4fMSI&y3B<6N5NEtcs;oqWaqwWYlZC-{KK$L*Qwm}0~#jIY3Zu7Bfg&^qZq
z@b!P>Z9ftCzwkEbxxoKb$qV<NPZ>ff$<Mz3I}7^R-9NeAf2AY;w<_8{bCCZjqy0S!
z@*m1&As}d7?{DrAjr^b>jW|9>2J6X&2ZqDzRC}FBsJNzt^WwO&;B=U-L%5RqEh{6N
zA*Lb7@^=BX(i|}@=V*Gao32(db%K(1-g#@Pbp;l1+h79zq-o^H!=dYyYek2WwNe;0
zN91jpj<bPx$KA}{!0nN~ugU4lr+8Y*$LB9HM*FRpBA7S7sI+}ay&M;$e4$ND5vv{h
zDM;fXg==#2ZsAF{xp|ekH2!R}Sx1F$j56ikjJ8e_^B0RL3QoP{GeD+Px=&t`L*v_c
zMk50z)%>M)wNJ@3iBtrH^COrl*MYTh$+VleG*PGIf(ZrYH4l}_j#`3at}?ab;X8G<
zT&w8iv_oyqCG|)9*4_+PpzbKi8ZTPebzH-zRn)BKV%AsD45=c|IE%L5y0FBoJdGWS
zW-?25nVWlz>FZ&`8NFol>X|kFG1vA<<BPqB2J7p&%+r^72h~gIO^bH-UQRiL2C<rD
zc=5oF<@UR_@5ND)0wZF|TMc0^4(37vzEBQPa4Ez>*1mf?aVvdSnG#ek$;~ub=CHl6
zuHyus<>sp>tC+f&13%_d$3|H3y}GD(Pl>m1jAa^%Z0%Gbbc@I=i@wM%=0_Y!dLr)<
zZ7l1Ezj<eqh`89EwUy<fA4rxY)MBrg#vCNdvHDodTI^A2Fm+j`N7fm>(!CgQd{C&L
zcLsD**P=N3;x3F94-9G;w@=_Iv7I{{V4-;{RE$-YUyKEV?H!o4p&8s2x=Zv~$xrIz
zloMHr*{bVEk2HJ@MUKcJ%{YSNjEFg&U$aM&ngilx&9{Wr5QTgg*$gA;{B@5ho?K#+
z<k1CP^n)d03%2>kN+g~=n1-QtFgnf}-v~Ber@j(8>DJiZ*>#$lX)P2p>0$|Es@=56
zfGtZuQkhBB`;_=%AT@0RmYFx3p8(b2bD<&y(`#ivHVy+}T<+>t>;<=?#|u6{^~OOj
zO{I+6R|c0ONGQJf-l_L5HIDX06_As%5{pdWo?#WVGjZ{kk@LL6=CtwCj)KR!tAoIj
zcX}4ji$g2KIluyT=xJA@CgnA~+&2AGNP!w*^OFYHQ3~y6fdtRKL^LDh*(C>>G*-$I
zVj#Nq8%*WB_B>-NR59R;mm^cKA6CBa%RdpKZB56o<T`=Tizin?Jq?&4mkf%5k9ojz
zktjC<>B7C^c-ZKh<d;0E6>(8Al&>ci!n}j-mqxs$Ce}-lg=bvAQxN0IYO72*zhZ`W
z{M`TeHP)E(885tP%C!*kbM3Jh>5{z6Q)VMi?5iNgq9ECsFfb0!3&yx%RwFuyGRyaR
z<-*-H@#|XKMLlwka6$A~YnMocn10sC8`i{b{@m6L(R=b$<W5~}Ja{M8`ZvLSmA%G}
z<2$*Jc93`Mfw`mYo#A_vthQf>s)Fw3s#YrFBd*agZ;OjMh+(KUz{)W0@!u`<B&ft6
zz5L3DRu`g&RIh7^(y{IDi(J~Hzg9N_NC$!YgeUbZgLyg=S>*uu(|l@*pTnIojyacS
zj_^+KYppWk6&zTaftCmk<!8fPO=nWvM3pG~oMw7krXL%<HgjR)5t8RU)@s-c*J$eR
z_M=Tn;pB`?g=o0$ADsd8KeQy@b?sEGIHlsoq08$i&Kyb;ySxYM*Oh%YJI<Nv-4!ju
z0y;1!VRco`@!BZ6Ju-mKPZI%z$H`b-Thn#x!}a`>Zm}(hy0Xnl3jjuNmirZ7MkTLM
z;G-IS)R1`c4~437vP$&Av7GM|w1lWo?2QI-;9*q}wGrbNn`xGVixj@OUwhq<Hlqf8
zEMGl!jH4(O`I2rqK7=Z9wGDaA^U)~l)q+;{1n{(r%Fx_%WdaPR9FJH@`7}TYW=>#m
zU_5>E5*YAFnt^BF!8G9hQ%o4wKy`65L0YSMZ%7Q#-;V`@N?ZGQKI4W7m1oOcilUKA
zjD(2=QRyQx|2Rsu#8>5|m|ona?(U3+M0n-FbFW9w-B}JF@6^rj8e(YE-5cto6Nwa+
zO=yJ4mgm-!gwfyXTI--N#<SJAfaL*ba*$_A>fJmXa;_Y`MhLFN560rife=(2UUtgs
z$l?`VIsPLLQWVJ95+c~?@gTtb!z3#VQHAmw7(9ihGKLVt)4VL|$1_J_>~Bc8oO5s@
zS;Pde@04$NR+lUm0K=B5b(|!M<poviGUw4GhsktHrgaW3-=Cv$`4tW(q;r=wz9WI>
zI<{v*?&nVmK9^0^{7M@-FXj04JZ`un-#oNBfACzw*Lx(cs<!Yf_1$-ikI7B=!D$}*
z4e3MlnK5LS?rVU@szSa&Xm3s)u|JY_|J=iXmz1Qpn(%B-6~zPIn|>v2&!71EqVg?L
z6TT&0F&7ySG3R|qR;K+E5*Xw>IhuzhP)1tu&XU$M1sEz|lbcDR3G(y8L8B^NbY!87
z=*oxbGmnm^b|NvZjNWy$zOf{qVaEn!xz)b*uA#8GWJSC;i+{<2Z}Fyw-^d;}jGDfj
z+CPnrlEF85k}LE|HyNhl#i=PZj_mEbxh6)1n*YPuIR|IDrdvO@ZCf4Nwr$(C(Xs7x
zoQ{o-)v?WvZR^YIIdi7x?5S_|Isc~Wt^0nTs?@uF&$_O)iaZifI$6IX+s9$CO%nJt
zBo0N>yte=vc(2~wRKlbQSbo8y6QodS9j1e#v3q~Kf4@L1cLsXq)p@SwY3&eqe(tq`
zmQUDMa(kscL+xyYISTZ03wQ!fmIs~3+29titT*DfgI~-$CW_9vssJ>V+1FXBUGt{c
z^qYXzk-8EK^EhU`DhR&!I-MjpeaP>myb0xp49s*6%vm*ci`?m!abr>ztFf4r2HCSU
zzr>Af;ILkWiwrv>biY=phdGdI$p0yEe<@WL7TVRA(*}$=buYqV-9Fr@j;E%+$bZ!i
z!=KpL147zvn&D8sM?He<O9Xgm7#{?nNN!#s!kXj<M%6#-iw;OcS=U?r76Ip&V&~}k
z%ZgYjm$ZlA0841bT6<<}51rVW(+AOOlR#4`CJY803@P;)!NZ>-SaKXTA`Ny>1fF7W
zHl3d_{9!!SUP77`Z3o~6v|u{Ge5-UDsTC^HDDBrzLO*N?sFtF;iDAUx_5PvasB{G)
z#}mXubizUD%_T<u%Sg#Z4BVK>M)8XmC$*R>C<lTL#Z827+U=2FzEj%AIda<l^bi3~
z=0++7s6&w$JG@ScqAO_@dh~dK5j`8BbIn_ovF`w+rL)U@W&eUv!9?ltKJDr>R&lhF
zC!r<?RpJ;Uxh8g4-51;9xy>6JOwis`s7}T9etg}JjAD+}5QLM(XP@t#qYb@B>fg3_
z%`RyvKDra|O6@Csepr}>Xb}%@F`0^mScg2hzZ-YA&d+F~HJA#N2!Okdb?AzcjO(;B
z(9r!kd5|r83>#1YL#kvRG8__55Zz{<Jh|_V`uT6KEF9&qaH$maQ~Qw65*-oQ|FKuA
zTg@EkoN=xcmDr<GkmGfce<Gi={sb%2X$Y0<Fv@_~cR*!I|B593K%e)3h_;mvfLukI
zI8_|(E9x(#>!G?X4)_Rg7)*@!vq#5sSF7RfYm*<9M-MgO`6;NXWtbxb=eogRes81r
z{)9i%;|jkE8@?f2XU;f42Lam;*E&=BFen(V%oRZSfM6E~<Rg%u$#=}0V<w?HH&#H~
zuD$gZRPhoLWVg%60ON4~+^(b}m_)v%3S^&9jY?6DrvjUC7*1y>=O86QE8c{aGthkQ
zCH6(@<m`n30)_L^FI4bEqh+@U2#Bi~?bG2w0v|2E#?G6119MU>p1z7(*ZyLMBDusJ
z9ekcR#W2RkUUsp%<Ea)aU-NA{GJ}b8U5JGe{?S0qN`{~J0^SCFGEF%J;*83ftwb8%
zJ6T8uk6XlG4EhP_{o&`FZ^d)NQ3f&6D>gu;GLV`aDQ|`TBHDnX-osXDlltWib<4}>
zhiHZi*Ea)ikg8r;;OR|KXTd{5vdPFKlhTrfKB2^4i<*0*;OxKXfkdfpQ7II|21ZP_
z+3^oYIYBhl9cVIR&^%uK$wHRtS8W0MEyhCV=^Mm^cpOT2*wy8gd#2v`5S<^tz{K#E
z!&2R!#_URs7nfK?(L`1(EZF04aKlM@VnI`ZweQch*L|D4YiE0Ki}*lCiehhcZ-1<g
z<J<K4_@=Gw=6JQ9?OeT=n@8I-fD-wpld!+GvHqj|#i<2UY5yY=NicEQPYEku)rwR=
zZPR0=-usEny-bIs4DIJ-T#kq02Ow4IwDF(u>~|OE-{aXIWB<SBFS7r&jRf{T9GTyu
z{yuZB{)vKr!?VAg{TJ};ca!R$c=kJQ{caciOG?dej?!;IHviU9`b((B@jK1_!!2U@
zbh`c`s-0;{#qO{obv{&I(z+rsd50qf=%X)Nqb=bC(iGQnkN_4m%^MLm?Z)p(!<@!$
zQlU{MCakOnW$)I(;tMN~@*ZEQT-Emn_Q}zFH%K{%)V3c~>0H7&e|PnJ7~spZNN2Hh
z_&5Z)Xy&6E?YuY+bJDBy?fgI!-c{+RNWtFT^ZkYSK=e#ikcQNnTH35KyU6E2S7FPU
zMx|bL=RzMq6zP($U$ksU3sSGKkGFAAKCiiAxSRyOX^DTd5+=5(_pGxjpXpkY;2qv>
zuXEvcXNs;{CO7x&Li$*z8E);7A2VCUbJ0BKTPn)*+LW+dryaic&aK%`U%hFJXoJ+0
zpBLo%RJ?NQvijQIPe=y!lLec3G3&W*SvktN01DWYT2!*fh9a33{y-^~c4r}q=b+fv
zt*Z3G2Nv75bn^_9>!dG#Wx?cPkIiN_sO;6fg%*YC=w3Loxbxld_VfMhT^4Kmv3%V;
zCzI)u+=nF}jjb^>TdUt(!;e)wxqBB*RqX5ZvxB^as4t^~dSa#_bQcIyo_gO*?wxP8
zxY*9NA?ma?y&!*#OFGq%V{{%rw|PjtwWG54I;gRbw^hG&>;g`SzmcYQ+8ue?HWAdE
z%4RlI$c|xiMAS6~R|*Dy7a9ES%C(W=b9Bpj^_9lVBVQNC4t}p}<GV$2>XEr7V_8wE
zP5Y5L0Gx-xkM5>30Vk{Gfumj&CJ%;ioqQ%$^`kS3f~|4rW*N(P<e%L5nLzrzkiq5y
zre{$ZFaE)9BRdXyT$#V*E4xB9C?qL`1(rh0c|m+~z<&Hp*=iZFJZPcd<CCq0u}ft_
zn?jp2)h%T0Yk*65t0MW(v$9Q7W`qm^Z|-k)u@{9g!UuEaP}NxvMZnZ%)@5C5+KP--
zQbwXODuAf6KP?}6e$en&)ljc&7DCa!TzOUx=n4~=i*@EvdCS^?&Sc0o2|rMa-SgO-
zq>!uuO6Ycq!RNjN=>|~X#uT+2CN=$4tkDxJJ}-06*7Hv0n&|Qi+SG&QB&H*zqnBot
zn8>)bq`huAv?s6|qs?(=a~$xZTNH8^^|&o+dhTqim`xOp(8zl@O(M<4%>KdKk-u~*
zQ(uHmDX$qfR>$kqyJ~77tQPs3Hh>S4!RdyG$kt8O@uW@=Lj||0p&kBsZ8clbB;#cv
zx5v+Q7ofSxRVQ>X$OwTw0T^ssN@7_y)<jk~699X0fp8_UWpGaY{jIr{Bnupifd&Xr
z1+^bsi@20-yAwEBDkOI*b$FU?oJ|X3r3eM7$IQ4YyJO{XxTZ=ny7WIAW7)TNUG<aR
zv^nkr2p^=Y6#T=NDK14T>N8MVBewPNn3EkH@hKrEXkictjkL?t3y=ec$e*}xrrF?;
z(xkL6B@cek^?G?mAJ?<vZ7v-CK%t992NSy=cZATqxZ!2YQU`Wa#`Y{V=gQbiErXVz
zPJ;n^aUR?qgOhkgmJSx-1?>e1r~|w<EgsGH9?nUpQ$I?~s~}5K5|~xK+_#6SDHG8C
zPDQEDfV-f+cQ5!LF-G;38XTWO9K9w1V}$k8emfRc?TZmX4$ie;{y7754&uEyZzC{o
zbVDFxY!z0_rlh5aw_s^hths6zcjwwdRVQ8hD2zKU05)VQhw*h4kG@nd^H;AqSu-F8
zM|EqCLYH5iM(V8UTG$j`SYO2NIKSGCAbX}DdE#A@D1#c7LfK}a<}Xk_BBzBZJu5*!
zsQQ=9j^L~L58A&I0Mb~ekcaHa-#qp@4vQ~2g0<>&#8P2Na|xMXs@$ryF7vPieo=z#
zdF!wTa<4o6V%X3gPgirgsMJrrj|iX8=a=CMcASU7HhWAC{(YA9s*Wva-vB#qO5Y5X
zqUsxViJ-MK5g=Ee!u;kFVCeWX1g%bf693(ss$IqO)NDETgNg?cT1a-TO?kQ?lkQh^
za#?$p>~IgWun}!WI_HA#<dLewOCfZ&?j1q6_RfNJ9+M^D!Gdes-%d)~CIn!dJqW^e
zFUpOeRnYkZ{h%Y!&>!;S`nq$vF=C^`h*nb?KwpY0rMJ<a0JKkc2JkPAkopMMkZ^P%
z!)SW+>+HbL3!sT=r`etEQB~^%gHd=79uN#?Mku^Pw|krq&CGex;Tu3pPTWz|R17j8
zcg4`GpuvS~NQDPA9Rb90dgoMhI_!^sO>ZMP$ih8lWpScT1{pNp#!Yp!NS3%T4TCRI
zbXcJ{4?9=J$D+o-^`x5e=u!4K;}smHjIL@#Jz!Vh3-Pi^3&;<`C8G$uLvYj}T`4-I
zgm83PWu_|=&%2=FdF8Lf$mZyyl`%v(nUdsRq$)*$&CLhAv3I+k=m`O0#uhuVW@UXa
z0%Jxn8SO$a$Yxf#n)P@2#+ae{w{4F0Ip}TMn4VqB-xaAj#$7h&PMi+mTHP2&d@&oT
zHRsZkcEeUPnmkpFYNFmb!!mXLb*U$lYN%mazuYQXLP7~lqxNzv&!Wy|HA3Y+z!|Mc
z$JLjtECsoyqU`Q*d<qx)x~vmP_&L}(rZ{TSzRcgB%hlGtP)snJ1rML>H=hk`k9e1V
zP3@$ef1eJ89l%nw69PIcxqYiy4wEeDrF~IhCH?p-_;LPge6S!X9rtqs!15wFKvX(Z
z@KEEB6V69uY(O>22lc=QAWD}<(c_1EgdVrhl^=H~pO?v_9_BkkpPk`Qmke75U#6{e
z>(ocM9gXhOV5awunIGNe<1+pT8`0|h2-|ztb@Vdz=40)nC{rJsB<fZ4%*Rt%QK&e7
z9F{`N)P<y(cf(a+BR-iSu0vKz73B$qOB(;UGE~T~eoo!Re53_L+XD@;zTkKIh3Q?j
zXDU)$mYpXliJ_>yk-{)j7+Pq&*wgutm5GBhjCSkC*aav<tg~c`h90AxMPk7bMGpTT
zDCeqUSTHZdzhKUZGUCzrM0CCmP}_wDK<+%h`>5o2BmX2={5p+j3yB-jOmHs^n+*ui
z@=cnC70Ya5N)eKMcE9;ZOjr6y48u;6uXooN0N$^PK?Xn&9F-G;?CT7GJLBFJ4{4c)
zzLia(=oyq$k%uH-2EZZ#&ISH1k(Jd)^A9M$)svr__d#9$V?xQ&UkgEax{b^=*pdvl
zAkz`Bw%Tgg3cN4h<7K~myWQ<}utcT+2?c(ntx}ZWDoQ;)l>3N6Y6CX+%UGND`rzRA
zx^V(XX5zuw&mn6=QiMGru#d2VxNpQof8@0>=StdhsW0-Z+Q=Dwdl!^D=(_4gPa6>d
z>e>T~saoBZ$6=g2&51HFl|P#;St94NPunw!c0_pxzpM>cK@LQkK-65{GAas%EyTEa
zTlD)l-k45u+g#g({0KkgugHL*7JC)FUtf>qyBWBT`O^6#2zSNs3U4I~(h5C{D>}mA
z>}@@>-47O6M~Bth6xx>UX{?FAow%q_)vAX>@7=!;s3Cv<E*LaRv@7O4Vrw*?{!5D8
zSF~HN$1kVM;zxfH=^Ve$OZ{7s{)aC8R}baCHf;XGKFyy*`ezi$zliid&i)G`{r5GT
ze~R?KAjW^!?!RmL|DA`z@jJ)-Lzr`Xewn{0%!@QP?bbd$lm|V7$D;Ps?=%U|9x{pz
zB0y_*+nW960UNmXb*lGJ3zFCp*tr4kSJ~Szl$Ii^nP|T1;f0}n+hc6_VQ8s@ui>N0
zy2V5=g2}omFOgkq+S-0dzZl`Kg>^R8js)0NbJZ>ux;9?j-y}?w)UXN_Ce|0%vSrsD
z;z}sp>?+!SEk!X8qvD4XM54q#cMI;$btxFckz6|x3&#!B;@#I$xJlU*LMfB^+_YX7
zu*=i)MVDt->NW0oek-tM!6~nMCxtO#kr(PYbMe|#zJ)PdB+|B+ZreW59IPnds2Y%j
z|EWf=KYw?tP1A^?fmB$lpbsB-9CrNkn;Dkz3=H%_CP|bkTYH8|CZ#SKEzeqAijPZa
zMD5DV>QcTlb0L&5(j<+?Vf$I__pH19iefx<dpEHzm-b4BY`v4ST66Qc`jk?zN*kD!
zI*~z3+F$b*qN`Q7k!>t1^R)0QDmg_w3$T-B3$7HQJsQiet2PX1JG1#S&~RqdAr^}Y
z5SoEhDtvR7ZSf8*ZGAs_!BOgNFDYs!JnnUAfeeTXVMwf>Mfs(LJ=4_UKm<I-0(~KO
zE&*-6k3aNl{yM%zwvCzA;;S*}(K_-em9!r~RE@w7U>5N8H%PCwLE@R+v_CneA({?N
zzJzKzAHmWN*Tp|^7y@=(V|D^Qt+fTvCAf+5LGwCf({-`wo5cZ#$)x=;B|0W@o$6Uu
z8bf|~Lobi0HD;&e-xLMjjWX&F6GS1YyJ_Uj=om%#N=3TG1QxbpsQ1KAD@TZym>$+k
z<uhV}R=Gg~I<2etfbX!H3`c<}wNzkOSW<^UHCxOEm=ob}da~LGqc#-JC8MV2!?EDg
z7y_$Ir1M5U(GH!WcgGVFL*f>&xx*Ywo<bs6&9d#}g(@=m!9!KIt`J%DDhw9RGI;Zd
z4@wmycbR+1Fd8I0el=YXWS1*~FtQlH4F-<tc+Rew_dfkDa0Me;FApNBJC<5OFg|<Z
z-QzF68z@f&glM=+4UjJ#`?AOhh=0y-i^$&jV3f%yp)1rTO$^+SbXOw~M)6t~R%BW0
zGbN!D?;wN0>@z6DOq4|2!{kbzq30KYGeZC<m#k3{<h5aqkD0kF5#c!J0mEXC;OY)7
zgLx`3w9=@e0gHVP=YRf4Vi;CXok`wq_O*%EIuwRwiA4qjZalWV&Dd{ARh&n27jLGJ
z+1iZ>;4~R-#Qv@i7nXW#h=<fXd%Y=#gX?sAcX4qQy(B-!3<<W_us_ixROkEEnM7cn
zsooeH(}==>n>Fz;ByEyM!iM8s(vy5*Hx&m5w^Av)*+NoqMKM*=T;UUltIOHC!&Ep5
zxOw4DHM35@X8xMW3lNW}31#Ul?u8poaLvW9o$(O;P}+KSgsr$daXjI&{bfR=0Q#@Q
zJo^Fe&DiImZXYkzc``nJRI}HnNdq?UO=_SHqCO;QbbELa*ZD2g?gQ=R-WcYsSU2XR
zgQ~IZ5z(NRAq>?*f~NlB<a1f4feuNt_dRyAs9M>wMCn+(0OLZi=iM+S?>+g4Lv7rw
zBGuoZMdo(t_p?!Y$#blZz|*k-(_Ic}gut;)@h7vVF|CN^#K<#?+2aT<*<nAz204qQ
zB4j&szCU7<KjK&9@R6QQTgQ;;r*`^qI=pJh9&a<Ad;7x6GV?F3f*1)#mU`4Sx7~n6
z%s;RKn{080hptP1qZGD?3lqlOoObuI7ivl%5)8B(;&4lJG@c?U41RcrYV7I-3lIkI
z+SCj1JX7DCSMg&B2j*b%NHfIcu&X+ZP8fIi0q7RE@8iJE|ESATk&+MoY!vtOeq41@
z#S62JCsI?c7eCn9zF&*~T}3bH=BOlhBEJ5L!UYy6Yh$)ZkE^L+1OW1<$bCU219PXP
zOOu+a0wagDkdE!vo#dz#0Lj*xV12^W@k6a;Gw{U{ONAMjr)deMct=jU8S3|Ow;SBP
z^<Q5qopUX4*tqg%7AWJXqzqYt;acfq;N;NKww4<#^nuhiiK4<B2R>gP&IC=cUbc&j
zW#UTa#h2#ieABF>r7R$jNjCZBFDsbKD<oVpK!p-D0Xv!jgjIx$!l9YE{tn<e>tDWm
z?6jVO2O1m*f`|?3{_XDe(_j=Q3lyJdttNDI_g<PilLripf*V%+lA~BrR_h0pc!F$B
zMGjeC-%Ka1U(d_hZ8y995Q~-N&dXg%<RgAFZ`Ua)ht_|u4iew49WUM^G=BWi9o4WG
z?stQ~f<$~ZyNu-3)N51X@pfW$C!I|$VD0gS8R;RVfCR}Y+p4u+m{dm@nk^w0v_m{G
zR&!*4)a6Q$xbHp=?q=)=dx1H6p*ewff9t2HT?X}C<TNM0A+lggR2X3%2vZ4*gfRz|
zK*<kqMtIx<b#P21jzYYy^#WLc?&aAZ=wnwwj=bCXe&9n5?~Qd&k23asdB+Ih%M9D%
z&uMj%>jq}3<N|)x8F{CHcbG0?3YrN6KY?gu|DMPz3HLR0Ip!+rQ6o7UP`TQR(0HzC
zH49BNfkHDnbu8oH{VFy|T-In{QCRd2<+#G>@4rmy0dp8vK=CVvo&beYg&6`oBfRpp
z=Ib?VEY?AO*4bNnNsXy)-3$_cg*|d~zW9lb`KZbaxif=@aLt|Nbsb$c=THO)ZQquK
zCL_|(1#E6rVJL92yA)%>#~M{L<Yk`K1f;k1I%|FOl+tB*G!27%da4fQ-4s|H3?j$O
z(mKgJ%h&idgj5mLxXeLwSkVaZ962pQ7AcsDt2T^xt`vj4y~?Q~*m88r(fZ`eP{c}&
z;x~A&h}6L7J6}DUGQ5a0bq{|XLytC1xF9IcMzCJiYZadI60~dn;cbV6!<`O1(`2q?
zFdh!KZNwg@=cZ=#VbeJBLqVM;nttP%M<8d#0Nh;L)`2N#tg=bmWui)jh_L&0EcsUe
zZ>v1FJPzudR2%}QZ)m#3Wh}wnC_`w6)AFu9EFE_=W5}OBtS^s|_r0wU*Xa5T>tVlI
z1&xNq_i~18eV&xSW<qb>Ml~3>P^y!1W@$c}B67Sc2~4oQavK03d9J~!%kBh4VK7Qy
zbVcwjdqgSh+4FPs<RYJ4Ll7yo9v*H&7u~iv1zh0_^aEk;utS(M2#b=+FmENo8T_n@
zggtF@=LNX<0%kdI>)#bckPry2b&l)1cCG(TYff^VS2e)@gYO=cI9M60Z2_af<Zi)j
zJf5TR=0evMbz$Me!6MJmp=!v+Fdd0if@8VA$LU9Bk5WXK1x+otoEAT|3`F>9Xt(qZ
zhdq;hX_gTZUPSTGO7@fqw!0gB95~FY6yFVJ><A2D{;J5UNMI1@LL5D2W}5eIJ$JF=
zMH{JnG`AljqH<Pe6oiB)84<ylSTB44ksDp3E&-_n5(M=n3b;Z1?zVZk7Tt3h#!}r_
z?kJCW2SgNjX5hWllHFxbqu#Fv?8|F*RI``p{?W*VQ!~RG&E7B*^ICFL$w)}jt#sX4
zv0b<+=TV*YPP`!t1vlnXEmaai6hW_|mwi8NBn{qy`naqsbs>4Vl<=pB`}j$}>{gSE
z^UV9_dzTU0=ZC3T5rH3fjyhD2g>=Q4tAdjwTd#>8_B=N`&q^$>C3iT!Dmca|(6atH
zKzU+LY_F&9z+oWfo@*-JWu})>lHYT)x~u0XWX9q*)%|s{-sO?e%w3|Gk!edJ!TAw7
z3mw{Z{5-%oP#>je=8O~7$^lze0cJqI1kstKzYbyno8AE$dQmFqrWZd}6J?hv0t|F~
zQ|+Xb;%@3yNUYR9ZJON=b?s!6G5|1b-EXOs?dQ1vxy)PkWCnyWbRS?ByY`C(ksa;Q
zpzp%_E`dUPJuxMFlX47O&hbLoUpGu3D`4CZ5QIU^J>Yup*MVN&=$VN7(~bzlGE=*E
zKQual?$h48ATsycnznc6V;CxngSrzqa9V3ikV1gEoiM~_MjMX!to2=d%!cPtssd5=
zh_bEn`<HKs4QPKRq~AS)e@{q%m<_+%7#x3W`TWQ2p#LJIe{KhH{E<}fFIl30{OKPP
zWPd;Y|6Ae2^jR77S<Fbl^0^VjO#fLO#KA#F|5>WU@>xgmS>N+lQvUzlhS2|53gY;E
zuI$rq;rPS1`H#^Ee>|6ee3^fu-0y7mJG1=-#QVSdEuW<ezv1XVxQ_jk_WmN*ooTNB
zEkD6;v&&E~!gH00*jK#3ULcay+<!mc+#&LQiiE#0r2|bi#XG?p8~tgAs}EB$iB#`g
zvEfjtR{bkc8KD{bO(wkNhS(+fMNva9l~eABxG?9BP23CL5BG<GMa^|HBB|TAosDsC
zn$GoInX8(4MVnBeaM;%6#&I`=!kkGRUS%7hY?WDfk9ukg+vyaZJGL`^9hLfuNzg=n
zP0QI-uEHy*4P{Q+t8IaS%nsh0?^WE5>+)!?4qlD<ug>Ka>n)g#9%aWFp*bUA7x-O(
z6V}7as*>>owL2GbHaiC(Yt*!ilo}v`L$em%R)%qTppv!2QJE~-vedy1Xsq+nhwT(G
z?h%m15VT4;S4j`^wlC)bqM6rqg0#pMG~|{@C2Mv&>caL}gj%s`NT%`Ic-i{8aP#N<
z*+bq-xaIpEtX&7SP9FeB2gKYt$JgqtLGDW#G|R6Jel7@VLkqh@2}ipf0k38FZQg8C
zuLB#p9KQSD%r<fZ4mCX96<FR_D}b&v&d6M~CHs)wQ~Z_W=N1pQW9doAh^{qX*UC#A
z)d<rQ(DC3n63bo_gMnu(LmiNm1O-r-7Le*H6|ksc)<%F#5IKIf(G?(YK+Tmyu>+`+
zF3Ade#m2qpsKM<CU(BOsEBx{WYV1%ZN?>IPNOivfww1Fz9u-3J5(4OhfFX2{uS+fy
zN73j_mGY7hv1D9W-D$<6>WT;LQI1gdLj4$@@ocU|WKdTwB@$V_XgdxD@oTRWF%d>9
zs{hidjRe>HIFJBbbHn`hgaablAwvYX3W2)qHhN~<o3VNla;r$(AmeT+fd)mMMv@6N
zPb|T9fLDv`h43jINcN=xJe@qHM*x{R?u9-Wy5#lRSF^d^3kA+vrlg#Bd6RJ`mv1DK
znS4(jRU>p{q4R+mOn&YAT1Cry`W7bIHsM-Xd94JZ0{41%7ma4Hc)C%<e*dr~Oq5B%
zuJsTlfpT9^fsZi;o&JU}dSd`^lof``FV`TB{PffRd@R|zQLD!ZTG9;7-L~gN_zGz<
zSp^R=Z;!+V)3^5OVAS4z2;An^mVij+O?RIz6>G4!H#9x!Cfr#oUFn;$E-fN~cT0%F
zd=Z{iBBYVw{uM0kotl`T1*Wg#aBD0L0+xUq9(>_Erd4yu;nrL0v3MU-gj!8c8W^qO
zjkS|)`Gc8lmvUdg?TqISkO;BvBP#qy)GcQU$nouH6fR<;pMplUx>^pZLw&Ne0}*_E
ziQ3|jQ%v>GiNPTIRY}*!ETKB3YbXz&R@`-Yx<^<WQ2h=2<~eYO*#&1?m@n<G_!$UD
z=)`pK=2yT%ooY%~nvm#UxeZ7!m<E%{$=YXhOmq`a)=QWpAJW|SEQV-QVXQ|P=@hTR
zK3E%J@1Vp7%Ch!RyO2Z;670`|s3%op(0F2xxWE~K<hQ<7=K?r<!~L_)ya3p8q&(9l
z{t{$}BGfHd0|E4Cg8`RNp>4b9a5OR<U2pRVQUE|_d(*DeXuyhJd7R%pMSy|*WDNEe
z_*W8_IND$;79he1e<XPKqxXl4J9B2D6P#mMYr?TsLIO#k<ODD|<>%b>6ht2&U{^!Q
z701)7ZlK{Y+L6No8i2!LH*IH&OA}Z+^t26x=#2#O*^9H<(7b*H0x}$xP{pJ+P8ug!
z#jEIPU8!uMtWAH$0Kn=t{k1HesKP2B@{8o{5iv|#6V)D)*`trrDV=OlyCCcv@VD(w
zo|K&ujBiuW_{0r4EeCUr=Gge3IrKqj*W#Mxpm6Q+1MK<^7bF>jC!Ed6h`}MIc<5C|
zX_CR=yRePWhS~mF$4f8jmZ1KO%q_b&fac>W6p;N{`O)UwoJV_<f?GCf2+LK(*F~p(
z)+?wKp~2h|X!2Qsl$ouDosfwfu$qsK6h~8+A^b2{qm1CrbzP+E-3cK#Wpk(WPdE8r
ziRWi#Eu>BxLOAK&`ptCqdDv>9wy^|Qs|Mq6oM{$P{8JuUU}yyAR2=6En2Vtt6LuA&
zFut1OvZH4!TvyE%pK>1b__M32qlA22LY_0Xzv79mV_u99mE^GC&AV}QA!b>KPO8Q(
ztFbNgUKckQhT4oSP+*7@jVTC_mB(_D;-}IOQ-y#}ie+ACfHBNtR|=?eJjD<aZ4}E*
z587it3;Gny3h)JxXcRSq_A1}H$E5>skgO=(K35<zx*fYW;@6b<pn;YjGn6>7l{LGv
zQ+N!aG>SFZg?qo&l(WXtEtXrR!$jQ@i!{b|ht8G74UQ3weW=a9@2E5ihhpj%u<NDj
zW~)Y_n;F2XhDv(g9i{9#M<xo90BaqVaiF21hzPz!>QUksv-lYFjDn%2N@7{A%+?j7
zeQ2?;aL0Y<%D}2+&d({kUZg`rSo$x{6vuo4C<c#0EEvbuyAnU!uef&y(&{5|$zO=C
z@U}o2U2V2YKba8&9%`cQ)5$*W_z4{Ar^1+f?B(}SZgW3><0t}*hWzsNJ6|q>3r>#J
zNEqVDkqg_|>W@Sqg1U?0RdZJw&f6T&_AKaV6cxV_uGFgbFJc*o%~87*&}r;of^Ub(
zF&(zpbj7R}8J7CyB)X<kOT12bc}TFTf&<LB<>Cb;fhY}>TY_Q$`Vk^py|Z{KNV|Ah
zc!(p|gI-Hc3Q^Hed?Qw#4`rAbkbe8<kd^B7s8)6io?wSk?X7g`TMk3oS%q_;&KNLP
z!?oWasd7Zaouze$jMS%NL5Q^B8<ITIj_f=90(r?b+|fuijdy0?V9TX$4(oqT7<|R4
zWC>sptQNvcHT&86{VFnMHX~0^ZEC9ZU?$Ch!2iV@+%Z>+Zps9RgfkJp0Fqcp54VC7
ze>8=O9n;F$+@HUWcEZJ9EK0?wk)RtUSWvecG!jHscL9bubrlY)fDln5*Wsg{Hb<DR
zLFZlDS27r0-S}$6su+_Psnt-UbQE(Go!S66lv~NC$~N<$lgiy0&xD=R7edxxXncy=
zSpNYx%W>RJrxiR=7=7Ce{V7fkm4jrsv9IFIR&9)63ULycc^@N~M-JUnmPIWs`C=o0
zJ$sPyM5<}|G-)e2x$W9>Bw5;>)y56PNq@$j)3-Ik3@&Hiy#(~kFN&_e7m#dXhUh55
zMU9Nomwkbn^h~_aNdJCxG0G1TLS({)+wIN;V8N53olJ~eRT17MjEAZtku2kDAmHI)
zUm!A^J2BI>C@h&$T)4YgUG?#uu)5g2D3Nu`u!j}k7N^7z=kc`2uob8$^X>9F6qQoa
z1Ry9HF;ahy@%^xUA?O!6zWh_;K%&bpwl+U8;`@C!1DM=<OjjNDd^a1#^b*}-=llE0
z`QryM=qO&5lJ+vFHH!<>OQk<>XoaxD6OJ^EOo<&q-rtYk0}LgQDk4Ov4$9t=vx$&&
zy&F>C)zY3gTL>975UOz<=ZM-&+BCkzQ)Z=1^PS!P5~i?1Uvv_0?1~_Xbal%K9c8#R
z0a4Y2ddwA|hm<#P`T3$yM}3%qrbKA~<9w|{pi1b8g~=VR|Maxj=?a<{#15xNKIf*I
zPjG{sh9!j)iGk;O1$^rfu+6VFJ;My+qx$QKf!kiNHNsauLWZHMBke^8C=aHOq8i!P
zJn1F6Lc~ujA&f1$y2NgoxV5NVUT3Aab1XFjC2=&OUYYO~;%$V~>y~5jM6DcSCY&WL
z?`?R!O;*^@R=2XNz>#l%mG`-~bhtJEm~SR%5^%ZCcqO^y;{DQ{{Ap3l$ThKFy=OoV
z|3}x${mWrzOXPW1Yh-XJPbWVY665H3tQ%SFTGhqI+M5XpGiAgFAhQ4LFK}r_M|Ydg
zwM?KpwkWZ@Xub^sBZu@Pa51$(1OH$^IIOgAKS15w1(H8u7X$r&bf*5U;Qz&%`W%<~
zE9*UfvZg*U;$N((zn}h}y(tFz|KVx<%Lf@hyWQCTR+IV3U@V`x3I8+a;E&t>8HaxF
zNc%k-_xFyp-$u@Ur>FmRw*J?_`V61drvH2d|G*)(&l2Fjh(nE<HnFQMh@U-by_-bn
z)K%igs1#5n7D~Ht=-Tlj(9Gyt#*w<n@uX@A?N@Kl6ZYa7buG=ATM=*i5rv^llO2u?
z-l#qJdM1X+S6@XKwQS(o?9T1ZHyk>t)Z<6y-zKcvs(C^Ebp1L%w$HQ2zDQw?{_3UM
zT;4ma%c>z7J-u$?!C4h$0k+A+ixW>ljTzIU!F14oH|kN$sTQ2jh$k^!ebcQJiWlyg
z!t2wl6hV4*8&Bk)XRfq#>Fyirt2#5Es`jW^uMm=mC6?pSV#M$9fJA0wIl$#=oYhP1
z7=RoVvGw>Ml_Y8>?X-@Ub_^4ohYKq2E#@vIrR^phkAS9)(NZ<Qt0fvSo%vWR(<>17
z7GW^-2k<kgQMiTa2o;-GE-&y{pL(sbf<(k@;A(F?1=Cr$m2U)?9M-c7zmR&7<?A-h
z@Sf|-5hbm}RVi7Gs1%}XnU#UT0&GmWlNg7`y>BZ?cf{cQp%F>J4hx15^eiH~2EGMa
zf@ji$I1<=pqBE->Db04tO7U@`|3K_#dr+e$)wjoJz##Y5#4#%YHP+(uO#xHZ^?s4}
zLaL!^e?H?NAbW+vB;7oohjFTER%LIW&5T-hr{79hOo$r>LbD1WMUWn{igRG-h!BW}
z#ux>2XDxkS>7Lq&4B0H3$wfUCVIX2ida&$Dcmfo8tAem2l`O=AS~rY4qF`%Bo>gU8
zg_u0`;W~}9H~adc*`|IL9`D*>Oqk}hsVhZ@BP&)vY}CJdC(DGg29ObLL8#}LGh`}2
zAqM!_LN*MxCj*1HzZGhD_A4Ai-_VbZbBh+z<o>~mOSPaGuxMMcCX67J<NbirKrDV0
zUR-`}@oe#A)SpJCTRV^sWdSjknWSee`hqR?S0dS|${Yp6XECd!A;BiCTn%cg0z%%G
zb*a4*#-b2`jM8KZ4E1940I}6YG?6&DJRy3yxYyjdFB-8|BoRT2Lq?U&q;+8qLL*!9
zITrfsc!9V)@MO2$d1}D0(o^xJb}+UJ1!E)o($rdCNfBt=FZm}o{0Zx#MAD6Fl;GR&
zmBRoxL652XK_a1=-||9zv}B&r?JZ%Z38*45CgRW(>MXy(?`&tsqfVYpFxM*#zT1lf
zB8kz>+ng37LoCF;`0@gi!$y<0mb$gh;WjF*ds{-=4a7qm{BRX+yJtQeL@f0m0?;XR
zI6WNvp{F3sfXHi9_VvKSdvU=f^68g@af<ewy+LDW0Gy9jKd3M>RC$}D8Am+0SiNF9
zOPHGMoqhg}0Fw+*)7WZv>1W2M@k(ah;_4;z%uOrSX@~3#i6&w75=AgFObU$<eE}9I
zm}`ivAlc+?g1~iMi~>r$Z{AR)52s69l?VTGfJfGMz$f~YkR5UV+aD%_SR@MSmKggo
zRiSQqk^nJy(If{s3*QI3)d|!?R_oN7>uGXg6ngx9DZuSN-X*0}QA^qySjA+0PU#Ep
zLlLPk2cY;v{X-Z)B?WWH=@B7iKSK-M)nhiCmv72o{9axuncF&_R^MQoL2Celk~pkU
zJM?jSS(17_TAyw?G*#H9kHa#*N?h)VVTeTu-E*z+-}xtJc$C}mZoC>5f>IyeSH<OA
z??72>5kW;x`e{a>me62+8(c6lK~UBvxl^Yp6q*3IjXR*(26->Gu~K3EQn%F1gv|ef
z(7S|<;_@<^Z{4d?Lb$MCrU`UFXckfniIRGDuFk}^5I~mK4ut5caH;{e3IeiMi+X!X
z4VWWk!$3l#X5WB!R5^F<vqg4e_N6DE-w_;k{bwSs$Vdx@lmvXR|9FDHl=5!E#gE)7
zW6<2Xv~Mq<UOF#GDC)Mm1Xe3%q`C+YJM4xQB&JoX=dN=4U&x{i)O~^4xgGX9s82SC
z3;WBP+>v-B1*C4f`3LKcvvU~<5@Ja~HSvV2n&ppDv(L*CFlHgiC?It6Z9T{IWvan_
zS`}B^^cL@Jmjw@xwLQ6VXfk(U7LBRbQ|W5UMh@z^X$kmsIL<ue4XXW=zBsNfrXdiH
z;?FP4l_*nf7v>$pMUwIm$;_$KtgizuTrz9&@~ckCTVr0u?OKFo&FUcMA6np@;7O`q
zB88k>+keMB((}1X3^qSy9u#?>%`{`ahk~XS74rrua@Rnt!rKxJU=GpjP|ODnh(e*N
z6z_dA03%9HDlRY3i$!1r^Kxx^-pMrcKAk^H4A$4<+~D=|q$E^ZdD=pyiNWdV;***(
z3kC4a5Vs0lzFChThjF+KuC+bQAI8cp0|)4e4#BLxP^}VLEh9D47Bn8swocSiKR6n+
zCo4>J9OGOd_X4fbgIk;@YNR{GW45mD-0I{YBiV25lHt@@uunyh!W7V?_C^T$7H`sp
zvR^<yP7}t=JrPIesI`cSW`X0}a0xPopqO|SctbOW`Nf-FtBFj!FG>K*7tlf}L)G;_
zNgsMxDEtwu#*TsiW@|~Xv4vp(@~ek45993zCOOCa)WA|>L0b1V9NkO>lI{Z?5A)5X
zeXBz&b%HHbM^T2T|JhIJa}8$JD&x^jM|+hkm0e8WR<8;r7Z22Nvd<XNZ>>FCw$-mg
zo+lz^+UP;bdOyGRO1i0^;7f@kf2keutKvA!`_8gv8%0}~j?6f2(+ID`uWA#vux4QJ
zzT`@P2`}9#SvlX^G(*>SgvOLsC=L9YKEY2F<i>7r>Zy%;BTptB8M7oWY`YQ{{8T|r
zVm*~|r8ji03`bNlmme1utLoRRB?X~-wqAV84#CcuT7L<<=57w;{nRoUe+zPvR^-77
zyFuMCS$oPEmi=sCD>-2)wo8|4H3KZF=LN_rG7)BW{xX5z?UDz;{wS4UHckcq9o9S5
zV7#LwE8J*|iY}$nKP>!(d9l;ImO3@7YNKk^Mxz}j0R8wgSo-+VD~+(mkHFgb6hADr
z=-?<5!1D{p$%iT9k@o!syg&liPtxS>OQX_sM3)PMFs)T$oM;G~-m&FFpv;$>{DpD!
z*)t~OIo{o>_#yOCiF5fxZ(GBe!S<uoiGGsYbd0q4vp}pggL&Yay1nZkX?@dmv|)B-
z_r<g#FPvIrcPLP{*xeg!?H8?+=tdQsx1SjvChwKaGw^%h@MT{IdKs8>_c9h*r<l;&
zXo9ZIbtOK0dG>&t9V)v>94(~Or25T*6G_|+w5Ox7dh5z5&>r5TJb<K7^120FU|E;0
zAj=3qZ?-hu%Gr{|O`4ZfCdH^|xFWAIC-;CY#}4jtW*0lp!hZUYbUg>gP0*T4nz#b@
z*7bc98IIMzck&jVU&R@f(e4{$75BX0N-VRg2mTsnXM-DaFfF7-F^Ue`GjuuWBwBu}
zk71C_TVnQ{bCE&y2SDA+J1mPB8!Csv?&vFHl5zL$t#}5CWj0V8^i>}~t_du(NKyQN
zgPeRC6Xj(hJ_On%GMtHLyqd7qTX?{f8dOEY;+%Ow2jiir{dwb=!JKV^0~15+!VYJK
zC3`ORcnC3_YLrE9gA<5EXU&)^<Pn^KZB!!Vbm68Z0!ZnSv8@TSh&6hywMlN)u@L0|
zUTxATYsk$C8eMEQza~;~-i3vxIlhIK`z2bN+h8|RmHnPtBgb%6yO<{S`oL?O0q^iA
zc<Av=TbozCJ%t^mJ2Hj+ASV6;P?~T;?B5{Af3$l3u9E)+IT$|s#s6>2@h5cnYbFZA
zr<?OnrT%HYFnroP|Gn+P_*pal>GOP+f3bfSdHuQk>+je7Gj#k;5&ugM<~Qj0EOh?t
zqyPJD{<qa$41csi|Kn!=AdtUR+y6xZ8PGiW?Dj(X==2Mj{xS{3K%G#4_X9i*43_Q<
zp|E+iFCTu(I9&I{P>NS#!u6v1Z+X^3niE(QaAEqimf~<>eIBo&5!xY_a#cu;lExH*
z+214j#+x75^txuJKXVDbt37D39d^EQlpb&JF5TZRzFZjiV}X~78ynPVN>MTztt~an
z_+W?h9B5ept=>0VnamIji2(_l1bX|`37=t~SC(O?_E@weF$A>eSXBnS=*&;I3|V%S
zEDX*>W$E(q-f^j?^h=Td3Mq@zU_LiA0MDn-za?&>S(>1OIt49L*T+i*v(e?VXWG2L
z&bl*~!fNo~M3=@QHT>2^k|r%&{PuORn+h%n49yx}018t*t5)-b2AL|k9@$<J@(Yy+
zIZ>#aVTavu7-&&H5mxdHV%NfbNmEnN))g|BMoHOl0Y>4lFpM1ASz4-zS4E=!`jpY4
zvrjhusfy`stQI9HMLpHcMga(wBev2CzN8A$-J95X$e#7$G=};N;#-EfR6fjxUAQzj
zc(h)xe4~zBVua4dzKETNEsI{$cf4{uctMk$*RlZV5@&bOk{}bz?4mxWzMZTA;7PMO
zuKB=~k_i6z#$D1zX|ectPDIQXdmoZT*oOn*hdL`<FVSV?`y=hRY!J5!*16VNWZz}w
zO3}fd&DWnJ-Aoo9f(-EB{cI+k8e2X^I!hk$ZmvI<z^pK%Z8lM!-Z}d`mA+BP<+)7_
z%zeJQT5fT5YY+)(bKM8H&qN1(UY|Y#gv2%#90Z7dRSg-dTRF%S$kPyJ>`j|~vFDjy
z&e7}&CJW|kMSvAF$I5BqnHSMxX=JiQ_U592$M)(-CbYm8Id$`p%a-|(`_+pK9R~~o
zt(ZyWhv$nWgTAQbBrE?_&rtZeGdHXA%;U<Ov@IO>C4}YL!CR9B{QAt3s3`wdCZ3&7
zV7K4+wl6=Q)HqYc2^G087C5Q~1#4`R2&DBB`CP%iQgEwMYUm>IW6&zJY<2j46O>#I
zGG8uJ=!gr*c@L&_8b)knvg--lX~RW&+ye)bA1C8OWuz(p@prBVP(4hL!>U^h?gAGd
zPU|Y^6F)aC!K?Zyk$&lk4)D_91kSWC(Pv-x1@Ss^$)Ch4BwDJU$us0x4{f<uL7A<U
z!SnDQxI@pdmw`Plt=;+dZS+t}d?pODIHhpIUc)Ie1LQRen6)P7=}Ix0A2Mj%hZLaw
zmIt}@1NywfP~42^=g#h$U+vela$vmq7jksLWCXmTLe5d|2djvMP!&#k7MehVuY{{b
zbQ29>?7(x!%S=BBq^7NGvBh*}D3Ec*3bx29Le9LPNv}N^t_QpRG&$e35}C==G`og@
zi;73|J+G5prdx~Vj;1Kact3Y|vWV7rD3T@9FDO-hv)0D`+N_4kpQEbX4X|#T!)H?!
zJ8GrG_cn`Cl~J=){i!7rzx-|XX@SL@I?7`zjZK~2PR4&eNqooSE3SZ)DG(H>`4%c>
z3nV>pRwQarl@FZ94cv=eXM0yX%yEylFiMX>;H85x!t%$#{^uxyiU)F4XBT8h=+D?G
zI6xe;ioR$>g@mis_x<zzumIGDr~v^_U7U`O%k?*+U}@s&#9|%)Zbb?kI@{i0e??-=
zXL#}4PJ-81{EZJM0e4zPcs~aGuHruT@6YRkl9T)-dPd&nmR<swKmG69H}*+ugX5eA
z=@q*#>r4w-iN@;qzGtAr-v`>7f@{sP#_;CuSTP5ih?~j0K?#9%aVmS?h67z2d|6`a
z#jr<g2LsJ{A<Y6(QNZ$&@jT#2WQYh(*bp$4jX^M{AaOyn)~4LcfZ3S|mpgXmm)Dp&
z%b4vXISvhj+u}!7F2-Er5N%RR!)*hdWeAUiAUP~9A035|FA?;h=2E9PlLY@TEVmeR
zVZs+>R}Qd+RqgRab1Mh6V2nQ=RRFcp$S19F*dY9nTM)B?SE*G^t2#oc<q_Tl+U%1!
z4v^(^C9!jNm#rS^*fe+xl%MK9#{&4NOxaTtE~+z>a8Hm1i7SeJSAs;D!2Xsi)&RZj
zjx-I{sIdZ@Hv&coYyqU>q@iI%6cmSE?m+}hiyX=l^-Y?+jF}7r7|1IP&JFrbs-{<1
z9_G>B3GO77KdS#LOedod0|GFb9cSlPXCC=UglBv?9+nb)J}Hc?L68U|T}T6)q<A~o
zW#{oSTJF&p%&C)D7Zv8BY{#BNsOfrY-ETlR8p{GrS)}EUKOJB-vC*oQG~fhD<t#%1
z4-lG#-lK>U?~1Xii;&_;y*+EkI^u!#m3nYrN46@hx(+r;U!cRv;~na{mawJ7I_02q
zlSRby>L7;XH9$tzJ)?Ng_{L%!n9`s_1H+UuACjwu;_LM266=)HC^DC2^l(qHq*BoK
zo=$_?s!)7_YK^ht)WK|$oDjVCnZeKr1GU1Ng?v!u2+7Z;h!TS1zMk;;M^G_Gqzq*@
zvE!<}j<-sP?a<dE+=dF&7wqEP3O+{>`wc6e>9%C`dxtFHx^L*HXIBO3n{>=>6ChC=
zD6&jSx|p3*9aq%W6bOy_NC?fZ7Op)l3gSfqzI#U1peS~y_~dx)MfV1OH;8>m@xUfk
zK4$Z6$#}>wukCeDciZ>4Nbw*)=m1P<P@34FPEqv_B)A9H()}Tr&gbGhe8Brnh*sMD
zoVjQ{Xh?U5!VQ%~RjpVIPB^;~(49T1L$TUfyqoIEA9;naK<pe086Q8l24b_d6rkFD
zH0t{RMf<lGOGS|*kQSpM*FiWMT{e#kg3s=Fngp&@xv-MR^YAxXjLp6bZen1W4C~@=
zZO_%vTk&{&<kHNAM)^ui(^H^lL1ZeYjX=Clu!O^)pW~3-Q3sEYb=g7s=9IAuzF5`_
z5h?6qq8OMy%V6m2Uf3OD7yynL#(J)~po2^gtps&Q;o((op8Kz3N2wk`v!gwv?G+w!
zfyo4M{%q!!!-4csf~1;JcQ;wluGKz2lOZkkJ6m*Gnx0J=V$$-h!|@33!wdknI|^0n
zzLhA0TLSDt@Ad8%0vTZ_1e~ZXO9Jip$p@k<8h|*#>gM?8D6&nhRhd9&UwDc`duYL3
zN8ikdW9$(x%h_dmYVpw2#OBLJy`D|r>K6gGIVFJEW%eObJ+<ipJX>KF`S11!pqdIe
zbq>}mI)g~>_#|-^2J=Rd0hp|*{34z<rD(3=hr*hZ0ih8E)X|%Gh3Fz45XtIK4dG|s
zA1Hb4@qr8)!4^~=!ZoIVRBPmCSCE0OMEWvD$a;6q3)R|m+b>7Rp(9-F5jnf^LkLCa
zp&+TNovGe%MBhQg3XpJ{_8fRq6OPkCe&WK-kj%PS{pCB$5@ax|8NOf)8AFr;z&R2R
z8zh54`_J}rcD^o_$-W8&*u2AE;~>81n7pUjllak;ia`NWm;zUEMU&r2N4kOFIDouQ
zv#<a<l^FbkpgJH$W(JWA%pQi$!VgqfVtb!(PgZe>TT;bmJ-CNs0casxtIUcT7rueK
z&dZ58L+UBAX;AwTFw+rof<QRC=|NAn<&zitQoQ4}top5vJe&&wBxrgCA0wgFjd}Lr
zDx(`%HQ4Az4;Sf7mg6u^m=z7Qz_;b#H@=@@t*<jZDw?oUKG;B?kl}%j7oT}3%YGOb
z=pxOca7$j`>4F54tZTR^KVAUf-OM2Ab||r_HXTuc_C+jjFpO}#fc8gVe=x}urj21g
zj;iqq9FYZ?c)cB;bmgoATflB>8;HwxI};{%x#uDpRH?U&V?@gN<pajpyXF(3o+FEN
zVmyk$0O)3v-wJN!f}6Y<t_|6Iz-d)oIQ$uWKE?k(iQ?bRxcyz6|10=>R?Yn1z~_Ht
zsWJR<{9iCz82*1BWMXBcWBr^%V`O5dV`F0Zli9-X$94aK7yleS`<KwQ-<JA+b6)-%
zaQ@OFKf`BN^go~Ge{l>0%jebpBF8lTvwyH9I8myjWPVf;kCd5Yil5L14$4FRzVJ?i
zheun2cZl|FldGOdQz~UPh5rit%xRzPLSwWQnvX-z>!#5;<fQt*g5%}g)b(xUVLx1)
z$j`T)A<VW8eu6sLHkEoaGRre)vvJmjg|?xH3Tl0dq;gb8=cwH3NO@z%Q?=v;DaB>*
zreoSd4JWsk87dpcmPJQ*eVT5{*6X;<Kj>)&Iy<Zg23H3s+_9XBbNbLa*U~F~dXMzW
z9&UB%OOZ3~_-F3d)6qVxcyodxiGg_`jV0(K_of?)FHnh0bDOp#7wvUBZJsXB?obN~
zNBM|3e3Crzh>#Qt)(>iBOX1!!fzTi<QCOW;6&tvIr3_e=FmQN}W)pgml1xb#X#;P1
zd)~HuFR#KMK2{v2PheNp&cI?{Af!0Ou=1fVmBqeE`#I}W&%ANMYlcAG1@<4saOqa)
zSAw1=6nDo`Ht4TQ6HJ~g>N4JvP5p?21Z-hwSggJIwsHo+D!q~4dJ;j#CUw+0;Q+2V
zBW!z`1t9(?Fq3G7p+2EV`J9)dgx04hwk?j7^fSV*qS+FX`{#0+Y<%KHRB2OQcVOxU
zUb~H4BZ)=MJ=AFrMi?nte-ey8qCvwTq5xAJu>{VfQ5?Hq_z#T!X#P|6;99$WUs2<f
zieZ=RY{`rHY~N1z569L7Ssj~eE{AJ$1LoSi`pgdhpeR*uy2t^RjVIgk1C_}Icwq$3
z?>5A#QQ!O0EX*5#E52@;gAb`5(F+vund0??husPR8ndr=_-Uw#-@#S#mx2;!_e$~e
z2#m3T+DjlEq*hiz$e9J)Z~9a)Md`wSn{i)2*#i!Zp}P0<RzZ}pvPI!zTU2&pW(ds(
zCuEIin0aPXPvL<q#d%q(Sk7-7%&B|T0-&(ItrFP<6?%JN#d}*iQga-xS24wbR)zIY
zXR!s>L&<feupkNEXWH1_jZgb-A+E~YJOQ@@ESVQWTe{*#3}?hHWwsJSd3$NOB7;4S
zNt5rK!5(K!fQ+9Or1BwN9B~~p$-pUNv`1N;d06p6GId<Ow_R*IF@vcNf}*usCRi-@
zPC66#@nuFD3E09|np+*V5TGLp%DuDuX3t@O8ExMb1Q8%kO};ZRuFA@208IgrNXc-k
z3N|-Hg>a)_%tJ<8m8S>VJygAu6ia{xC_M^-&=TPm<{&NoxO?300oP)8)P6$o|FQPY
z(V6Yp_IGUCc2cp;itUPxif!9ADmE*&ZQHh4NxeCz@9D1YK7H@$cl`cIGM?la`#bhX
zvOasRx#pVUL^`F|Lm>$!^jW}J*ia0T;Rrp$W)XYQeO$S4rmFhv{JEyZ?8Gjulsgpy
zQ#8XSB1d)4e~|L33t5}sxtucPC*=`c0xk#~lFpTKy#1P5RQtUl7$cpj3vOgvAWBFO
zZvR4;%F@~tuRF0L-GD~W-O<zcQ>H8(T4Jv=ST~=XGv*356F{zdQl+Vv2R56@G9wJm
z3gyv~{D9c2ypDW;N=lMRR6r6X=dDz=ACI{9EG3m2=&2%1<=bfZ{FuqUOs`@QiMmP+
zKihhG9PR_=+ycRx7>#W4ps?ri7pUobx3;E`Jd7;rJ86sYa175y@~*lYu1zup&oUZ)
zh75M@W8p^<bhhtOPv(j@iN4bMcMR7`7xOwk!z!s^qkER?#t5}fNpH&4e@S_We`7#F
zbU;udEn#n)A6Ge#X%p?o=G7#;G6Qg|_4N~pxazZ7Oa}P^z?R`eC9msuM$L(OEV13(
zu5ULm|9JNH3JRWT1!kQU;VkZb=SJw!G>-N4Mk>2lGbE@!WH;-vofpeoaaJ4=yu-3E
z!q}ezB_};%<N_Y&8+gM}_1CPRi>{@L@xFA=Ksqj(te<i|;5@R0Y<sUQO0L&sBaGE}
zy!>n6fg*6V;4S`yVCr82HYjMnT?69skRh$MoEJBCE!)3v^8*PAeyQ7nz*W$0Q@esS
z)3U^|LZ`eB&n<=z()aH>ssNG2T6_+oHec_$<@o7ujV}e0hFG<xDyG}ycNsz%_SyM0
zX!LGJ{&h!wbbOHbg}^-zK-j)AK)ZIiEIoCP56s5{C5VXOC-e{A*Q?=76DH?(;{zzZ
z&_i&H)~f``gMFG1eLVE<&fl|xNR|v<I?)EA1?+0&hDsXUI;uNX9vKhU=kGi*`P1TS
zC!c06jb5<jV*Ggjq^|$k)Bg{3{ogWp7#Mz+6aP>5Is?PMrto}N9sW`HeiJ4ces{+I
zpSJMd0!jb#8yPr03j4>l2v!y*T8<BI#J{>I{&wC!DeV6!=6^5XAG;lXsq6nJ{eP{n
zGyaN6e-w814`at)EbQ%Se~ajORmJcOtqWl%5@|<kbzdUj0O24>;k6_aGfiEuu#~)!
zxN-P#sn|~?QEN15j6`6{JjQl<J+CS<YzgCSmtJN`Yk)}p++fc7(`&8ml}vtBAwj*?
z)59(t?~xB|H{7yUtibll__&dg+Xl<U@M8y+d1`m`)!bK#-L4=;=m~Cn-kUka#aptM
z<<$;2Wh@UEBdjGJSk?oV^3_Av%gx?wyiQR0a7(~kV~SEL;oR?*B=)hC=lSoV>o~1m
zFnKgl{&^+Z<KN#7d%ZiLh%p_`72jw|;Z*7xaKUSsgtU03R*GH#pr2K)zuLRF1iQZn
zHu{MWk%~}4w-{8<t2uu%3rN?P?K0N1##Y29qL<_2ZmPh<Kr@W$+A;dl^7gPe-S7(7
zPZTQ$x26f%@w0ogW$E60(uHuyg|~w@yd87gWk5@m3&Qz)(zx^ToPKTPEea*Hja?@5
zhhSy_m3JPK17RHv9UKv+in;cN@jy+LbJ29wkL3zsz01*u3v6X8(vZZ$qg9GY;jDsH
zibxR&C)vO>dU`q#gnZYYZ}(_g?9Wp?e5+Y(v^IFx17c?Uh~hU8NB3+}(8@FkQhYyw
zQq2jO`mr&?_44g;abDqS-fK2ieLt60z99^rJkMd@fWpBQ5&L3t(VAB?#Kojn;j_gF
zaRM5ULOF&e#(dG!_4m9-vnX0mQ-!<J$33Fp`@A9nm>G%!bguATmVT>Hj8PHIcC&QK
zTqaWKazi^g_lZ!^MAsxz*I=<{?eXxs;A<zuEVHf`=I7Z9=}8jXV(0ebNxdX6q6j42
z3{ia&EwK3D%Av1=G{VB$#|O+~WKG@lA&EQi^e}q6-NZkzyf4l`u2jVa@FlW~^(lFr
z9?_<!zp!b5?P^eLv!0}^(?1mMBw}QHfT@LH@h>8`KOFF{0@x~3_-{ldfstXQL;<E3
za`qQv(1aVgcnt!7N6n;oH-6!$zkecScJyW9y)t=B!wvLBK9jWTOTY%prjg-hWInK|
zUK4l?FtuU+?wqrX+h)JKtZYq~<$=WzJpE9C=^6{(aav6-U_wVumgtZR+6a+31s_p-
zaZI(pj^qg0*B!nQRcKX(d~o(T`x=Zj^mf-1#vhXxdSDno=$cW{&6Yu#Q~&21#$$gk
zb`r$8*0~8PVqtt(>uu@7QPB=R-7~2IN3n)f+c^YU%>ku`AM27fG#p1x#4U8Sm#F*D
zed|xb>aj(|yv9L1ViB<OqI@kgs)ZiWcmVo7ingjAXo5W(YS4AL-LH!6UGpNCIBkLj
zsv!fAM)Xhmf~fWmH??R9x0V|#4l>?LPXZ+YBADMQ%<zEJ!Gu)jgCc;0tnY^y0ba0m
zF=5xbX@t;c3G0H+G;%$`0Z8yv6~84bNmhpLS5$GF5b8|m!DWIXEAZ<Jo*$vhUSLV$
zvNz=<h1>t&i5#3Ak`yIC%PLB-moDCTxma2hX7GSQ5o+RZK~biF^O40_N?~GgdJt@k
z5JQ#`mk<>(1+n0oS*XcgS?r~ru_9ZCkn(vh!NWHL4R8Y9(_~=VL3jqy@PJaX$Y7|X
zHRDpFG{c$Xh0n8m>Zr_~?BXas(_-y;lRcfG8b&HcfT{%9%1`f3;g8Jm#GT>y<+^pq
z&)K51_uPJa<c-ymA6}I_ua+&h>*BX8e9?uQdd}$q<e{s1-`+(WQFvmv4~VBTt?b%^
z4(KduxQ1=2I4tx}#<s)05xJoV8{GqZ8y$(AG_ee)e4TYQ-qWDo>=MgAK>qNNa@xgU
zpS)n>QTa3%bkLyoJ3pG6cX}Mdk&Tw{r$@LHkqBA5?H`YM1z=)yN|>n9CGxpL!)mqD
zUKUAZmSRdGP%GMkOY0!&?<bn6UNxa=mI>HH;+}DhA#b_vb6!#;3bGr84^Ehnj0+0(
z?>*9%Dxl#2#1veYkI~JRS=(8}LZrSlX22qk@s7yXJCDD_DufYTSAC}=8z(LVho$>A
z%GR%>GWsL8JgCxa5pB5MM~O95c;9@CXnl+7YD3txkHm#h5uTM?VOvsbtu?gLpvJ>^
z;6ZVdMHPB*wQ5dtulA|ax<#qa*6xk0Uy-lRjC)jnP;paXwYdvNCLyA$>k4kr`IfQI
z+J;Mt+{je~XVQt}qHA?aXTybc6l7z6W2XtsK73><h<3YsG<ZLukv@S+9z~MNhZNAs
zc?!4&_Y~A^YixfOx&<AhJEFx;tVB3zFPW`UvPMD5`uMpZu!UZ14+hB<6sZH1Lm-S@
z5xrp$p7r%vU{_L!x!PMOmJOZs7}i8DN;&%m&2q-B29h8-Nt-fZ7mS9K{ktZ+y(Ok$
zhJKBprnf-~AM6n`B4~i>yv2Lt^L3F+>t^4o^+$QESnAjYhad#BAOCCu+6bTin2ukV
zcfdI99S3k!&MPiPqotUCj-tjHwE6h{y_Zu!@Pe)|<Zx05nabpwS*$p|^CdNf`}MAg
zHi*tR&A9nbF!ZST-hZbZ#{b(f`g;Zc7Zu~LSw{b$oqr7LF?@7ae`~vH{gOlYx7Yq)
zk-spm_p!+9qfPrUu1C-Ep<1vLurRXGvV6b}3j;GPD?7(;<9Y;i$_nCt`#@pp;9$c^
zM`vhlU{7n~>R@VZMQd$mLT6#7Pxn6?y)3LvOw6oIXiXg~Ehv9E8S}3w_#2!3ez*RL
z=<*-pSN=g)zarJIr1gtO^&ew;f049)J?$T)#m4rRbL%TGRx4*eU-9T57Qj1F)w+1H
z?I=+dD{tqZ+fpgZIqWT^yL8w1pW6&=IvblTB$L#679$MqE-yE4;xs#KXKM~@FeX*n
zyt$;}qtRWU%X;6m8&S;+>1<rWOnY8WG}Fzy?e<sBCbxV7h)^h6R!7Np3@$HKk}Pty
zXJ@T1eT{;Pw)~!ybE}RX#<2c{lj_n@ktzFj8t>j{0Xe<g=%BUUA(Q4ZT}7i*YqoN8
zvlA>bUfy8yRs_SP+<E@&v1KDX`+E!je29m>?g=Dcg4DVNpO0XsnzaXlr_Ko+9Sxf=
zj7Z$eXk-ErtdLN;NwL!rr-q0u>_bC4)zSH!?~*_(NS_GBgk%7|@F*;IZy#Gk)@=Vh
znO(P}`a8pFS?m02@@b?)np@GMzC!`OyHYFBPG+S~Ey*nB^}gJ8=KSns)%coPw-*6W
zwR40SJ11V3;M;zSwq?bXg+X*BJ;KpPYKkLRE43qrHUqYp2YuM%npl1b?8s`ebfo@6
z3W5ca@S>yeb3J``i5PN6wq<Kapb|zsItuw_qzDvddu&Zen@!yeyysL#K-Nlx-2^-^
zwry@ozNtXUdFoJuv<^~X<C&VJJ<J#6nG@@39`-Qquclu*Tw;e@8i1Rl+KoU{_>U?{
zw1CitN^^yomoxYGVxag*h?(1&2p`la#4w>Ds<oCgO8vGzDITcxM!3(JvI;s1O~L>r
zvcJZMHaeupP8L<1_&%=y<L45m65oRZl#e1<tpJL4ZE?~i%nE{VY5Pg2Ndq^29i?ma
zzw~DmO%&i!F>&*rQw~H(b7Azl=E#6>SoX~!Bpq`VnyjdmdR3I%2ehUdl`NbEzX$0a
zI(`bf=+p|%nN@%1Vli1ZQxsR>xqJOKt0&{uTUGvAPY$-YoENTe)s>UHA(#LO0kDS{
z!zc_LGluf%t}DI(s1))|xqVa#D)qBw)rb=6NI-eFk4|DEKD16@19T%BMoXb(r1YCg
zybm6_q~siI9%y0!CyChi_87oGhQahCo=l@fMG?1e06u-K%H#3^Y>-+{bXBO33bR2W
zPXH6<P<SL>MEQ5dfbKLbM)Es2wM>8sR>Nu$USq=mO{K?A7@rX+CY+1o0J;X`<xxrI
z(hyAN!4eD6<Mn5{U+jpn9Kfuzw=&mDh%j=sG-#9wYx|GSxV&K+40c&!BS-=mbyvN8
z1@<|r#y0t^9q!foV(%4;9vuf5D=N%`#XA)X7j;%3z?8zUy)&TLqKv&P(>Iu3OxVW@
z))jK)WpHHD;Bcmd!D*89ah99G5kwG~$gY=4_|@5XaFdr;><3Q*hz)=;xb-*KEZqwi
zEmkw!a$WLC2iFQaEECgi!w7Iu$`;bTtejS=R)|Ni*M6N6!9O~8I5hWFaVR5iV?QpX
z*SpukF?<TRw&PaeY_y+&bs-djP{<=DN>j4ZO{Q_nF#=WP`u;*FaT~u9WmU7k%_U<?
zF!x#ujvbL#+u)NAqaS_98ggtWwh&=8CxJQ!^A<kn<WBU-+ObbXd5vpZ*7z}D*$e6t
zMe#e(KB}omIQb{$mU_udw}(w9PK6a3{iRkZ^Byqd!k<kyIgJIkm(<xWQjsXC9jMwl
ziIYojKgCmNlh75ouPUXMlvYiz$ymRXWvlBxj@1S|yEVQ<ZPwy}tvt-hTBu`g2oM43
za<k+qXUky4y;(V`;L<gYN~b_!PC{n$WxBkbo>pZ~y}Uvxa_!uM0H)ui#7V;zTohfn
zaOUgv-A8vB76oW%()tvkjiI{^3%ZOpl@6N@1`H^%eez<<TtrkkB7-72T;x<mAK5P~
zq(xKfnl&U#;Zr?Wo*V~J)KOh-VEig^q<s$#932kWP)3kx5waUR(L~tN_S}ptN<VFF
za+J(s&9AyU7mkETdTU*WZGhspzbfAY>5=zN(BvE9gR88oJ5<!X>A+TMAmPITcuzli
zu_T8|?+YXD&Unkq7NCaD{v63DNdVuDX+83>dwA4Y1>Sb-MZv?gnz>HT)HcwnMi}A#
z%7+afPp${KrI{#7h;*fXw}&h}4>{E$tt-Mkepib`fFUnVXZv$QE!b{Ii>{SBBou)<
zv3&=Z^GYP%&3M8@e#|i`DR8exs3ym#e>xQOs(j(=vZb%%tr-9Gl;R@0YHL<O4jf9U
zr*mFOS6Z_0ZQ%{DH+?$88)>iX?kLdqj!3>GvYJ_)&tylvi6RDK9e!GxB`;$>Us}Ig
z+gI-IPuE_A&*XUYJWxVwlQ+FPvpehV&gwL4`=^1_9#4f{AWMOkJX&~YfM^B2W&P>>
zmx^d>3TM&6+HP;e@)T{@1sCDMGQ2;mCvaO>_c5P3mJ^P1VJhUkfdgdG?f-;@ewAYW
zT=`-AU7Yzfwt(RSME*)g=MNU*<t6yI@(1leF?=A)KPxzlzj)F<)~B*DGt+X=v;Dhx
z^ZO<L0H$BMGJlR2|4?k`KK~sx{vuNR$}xYq%YI8r_>1CACdO*i>cG8cry##x0uT|c
zNl?DnBE=RO+!O)-x|=_DpKA)KD76!%!nT~#-6KyjrnayZ4mNaoP_W6wn?z0MmaK&^
zN+6XCn^{)yH18@(QL8ui+PZgDtu)$!S&J(>!hz7U<kf%&qqI(=toP>o%N4LD?BlU9
zIG<}Aq9829y%V`13aGNZDdeY>vwR5qZVKzImz>h^IANK~$re5<XIn6o3%=tEys1lB
z)o++bx68BXTlgy%`LE|P1vb0$gA}24pI%N3w<yk>l0W-(lW63T)8V{hof($T%fhu8
zzMy}uSQT6}Hkm9ehEwkDQg}ST+ZQ3km`k-cp-GW>R9-Eg>gCc#WZWq8t9FunZEMcP
zyc%eFFz-CW!vqGOb+l|G3Zs>bndjt4(Khv3EV8$hE30OBI~DHp-hzsCWumpD?dUlu
zTajha6iMl)=&M=VLl}hGbs=X8yUz?0N)&Io(y^z<4UkO$_k`{wf_llbb^d;BMI^XC
z0}#TeJU$!eg%jK?g}b1RKKTtUo_2M|4hzVKU$-I_S73@FDcX%XZ);YC%(v3~Eu>I%
zZe$&|l0E%;l@N6+@SF=_T3+b1lBz%n=|KK~#OX2FI6M(1FsyqD!CYK$Xi{U#w1Eh+
za)`vf)=0YnX1p&r&uV1F0gVS+-)gYiUloAJn;M8IWiNl*wiKA#OCFZ;hCWs%T%7h3
zv$#=8|AxS3F`$rxiB(JmZ*zRZyYg39@X|5E;u!~H0WFk+wR9vT#!$aO>YtTVDFb>L
zE^K;Qkln1{s{2H7<~d*CcqR2OS0oj6j{@Q7vRqV2%u=8rgFRD5Qm*(gdUx6?EMMKk
zx$Tz9hhLp9F|p@<M9Jb8D0zHB`wkMC+c$id#Ncf)4KI5K|2@nHzp{a?h;*|Z;_;og
zdW))Vl-TAgD-$g!h61Pad2@ZsLDA%3Df>+gbHh=oXheq{GY_tZ-)wL9(fdp@TSA>n
zaawMNnuM`e8{zdk)P^(vs?gIBTyvkRoN9FDMdd0p<Pf&=CEi8A*8w5lxMf}`qQG?w
zb8Hs|64lS?hdErb9@qP|`AXbhEH7dC=8?{;!b>Y(P9sb6p;Ahk6IJr%cy4$y<fXtp
z@DdSgAoA9lNpZ=cn-em}?-woOK%5Gs&Iw&_sub*tJN8b@?i~T$j#iMl`i$0;m>|0|
z<wRVE0cVS|sJA1awFhSWvd91)G3()_6ltD9#gJ;SB|A(jjkb!}Aq_aluETx53!ZOk
zfuh}0?$oI*d_%$Th=;dIj8rbRyS7^jp$Wia$iKz!AMdTHr3F5h25Vrduy8+y>V?B6
za<5+y%=5>}>kyfZdA(97R{Z+90t!?(;~VX>=${gZUTf|K+^c$7S~u3Mq6Ba16pCVq
zog%WI6Y<!`7N|$<_k2t~8rnAL=iN|bzsg^&;Wdy0=Dqp0kzTlW8@yxsZMss~u^HaJ
zpY%>=D#(b+9}1+wBS6BOI_<L|RSGjd-2x!ZEU7L-BJhTpJ{;XbWR@(~S{c-Z(_88E
z?h?9RuH`pq?JzG|SVxB{njT7mtw0vI?>sL2-Mbh$T9X529QYml?nN~YvHmMf4VAj0
zVGlx~LN%t?PZ%P#x<!5|Pj?Q-YgeM@hy?Yv1!B7~yKXU+r)V7B9iayUQs0<XiBwLF
zA%bDxC=Wf4HBWmE`}GzQKQ9o^+hAM_hWe1R=2#7oA-2}2gOa~V$&1lT00ww{zH4wd
zZVJAc0|5xb4fv)8LE!GM!}5X(=%0Lq=LR`L<14arNK69#Wgi(zNa%-{y7WzUann>E
z8@G%4;H3OQR*9--25AgslSDsNu^h$mozEr%;Z-ONV5ei5BsXc^>Jid~TqV435<*9W
zaOv)SUS6IkFagFAm&@*uHa#h9w(E!?SPVjt`XK-Ou#`JkyQ9)btsNR^DsI~7DE4|0
zV|*7k{;?fHqn{XEf;QtDcVWuYs=REuQiiX~-Z2)OY#{^gUSA?UM|C3g*C(#>g&!fP
zC?Lmc>_Rnh2jW+N=bnu03|wg6Dp}apir?W>F$cb4;bFjmu0qc8zQYU7`aSD=0C%XT
zO-4zP-n|!|cjmoux%6S#A7ic=khE?G17IXPaUl=wo?N5h)rwmJ9L4lU<uMcxODF?{
zx+UwkhA9jY%i0EN?bA9RG)m6L&TbFmtxC2R(y0(El?R80uYpdYxGzKZ^Qq=Yc_<&&
z{5a!FSUa<>J}l_b!&=yprSGbzdtCIelmGGwJ%U6^iTCh|?aC(OISouo=4Aw2MO7YY
z%RX2L57vc7YP2)0^Vz2!)Rc3w3OnAQ;|a`8qwk*moBZ?DcM;c;;J);8)<zI|EL@_X
z3`yN`v)KXq%p}`0ozXSDcFwvEJ=ijkrO*y;n^mkN>dict^`fBJZga<1ycP(y6u#M?
z9<94{B=I#~Zm3xFS0BBH&I4;Bqyc!5-RL-O4M=mQYQR@iYZb^tGHe$|HP0ZaYvRnu
zOVeTQF%oa@xGdPM!)bQGef>gXyyvlC^a6op>{2R@K@nQAT^J(8S^nzI&p}6y^J=ma
zgj>O1erPhTi4?{DXihJEx}dk%_bMLAq;|@KJ;V>iDrNjpGSMkA58AGbJikuvui)=0
z;IMMOoO3Z6$X4ehL&pxfGt}tK2U9VjA?A!{;<mJAi_n4_Mb^FRL9g5k4&|-<hWGqB
z_8o&v*;j*^=E<t7Oqx&0+0gQQ-|)xHF)Ef?C1{!sN!y`yT9vk!1BJQn_Zwfr17Bsx
zrS-=<-|fa$9j{1M(sF&iu8HCsi(AaaTDiUF0|)e?RIWv5{EK%cxiGX>%yR1RH>vz4
zTZ$=9M`b{Kuki-Iv>*7y_Yv8D0(HMN-2Pyt{{VIWqh=`>*;_l>85r3UFfjfeD)pD!
zdA|r;Ourj^2pE`t>w77DeEt)`>-V4e=QI7U`BWb)$V|^c%ly$iWMpEa<@m_A`uLib
ziS_@X*7dgo{}+d1`cT`LSvY=QNYDI%gv<<Vw9Fs!9y2}b$CCN~=42mA+`mb0f4gu0
z#c@9F5fd8+E&Ff68pFpp$43M+6Z?m&=|Ad$&d$!Xc1DJu46U7vXbr3_>Hek-YTDRY
zI~W-_5NJADnd)1+5NJACn;Gi!e|+)P$j*v@mY&tfQq$?T@3?E4+BsSn5jdL}I+)V3
zuozi-x_kadE%D!A|MyPt|J~>P{a5ji8}SS5|JsxN1^oY1K>V-4|HBLRkH4C~Yl+OP
ze-->I)YPq4KO=Z8R>>)YBmtO>?@Jdw2>Bln!i$7scn<kGkXF(xpp;xy6QR6aW-g}|
zP{`W?=hH{D8y~K4@JdACKE>}2;fl^6MxjTlu1-%r=AABPamffHx$VBz6Sl~1%3iX$
zHC*L0%`;(2brzq~9!4rRuO|l5Vtu)=T@^1JO+A%brI9sr&!aBfG;x*rPI_qw8VNh}
z<nj{?B_tZEi4ko^S}ihn%v^GAcE(-=Nkr<qVwvdux5KVEST%SR8Pwu8Uo@$~FXMZ4
ziZq}4Ew;%g?h@-O3jN!Zj)D54(X9vq9eS<>C1OE*W6P5UD5KacPs?2}uy$X29Dx^Z
zMZm_&j7?$X`ss;FEj8Hs?1$S_ji!ssrzk%~KI&Sqdc2d{f@Jr1F$hP^KnmjojMcg`
zTpOjs_XkIXASIhsg&cwp+S<P%q2akOSa)f(P$}iT4hF8Aq_H-f@W<zJ&vq;{YqO|4
z?{63X=+fBQ<rm&4iO#-FJABRz6<Jq0FGalj;?Wcn3!$|c(^krSjPE1D+~%%~W;z1=
z=`|L2h!U$^zHLdFG-yUK2`fID+q{~V8u|-!Fs`JjM2jkLypstH{i)EhipdPYgJ1~D
z2s;7g9n6l)Pa1b*Qqz*&38=`(6778`<Vg|pJnjibE`2m>$~G;&9E2X=df->54?B|~
zYtoiX{f;}u<344Xn_@%CDSL3Mq*x$u#Z9e;VVYgI?W_eo+Yya>Mrv9Ql<~bZI+)f~
zOU;!B1Q3S*5|h*0oO{}oK&V@$XLIHlTAV~cvEzMy*>I*U)LbJ(-Vor&!fBtp5GzH(
zrpz|?GHOF#wyMi=KCP>wh-0kQY@BFkh68JIk<dl{iyZ;<76oZ_>9Ko+9%PH-a9++#
z5I_34g^_dZYZqeWNWsuX<ZEgy(PZsiSeLk3&g#@~0z}3z_1zHWcfA?{>C;SYqf-kJ
zYOxr{hba}PW;x+))-ZiLFS!i5Wwnrd<-|`c){4@D%lfv{8qqIPk4SDAXZAvf<in~F
z!nGpPR>{fj^2d{wD#v$Dc2o8Bnn8juMRZal7gG&YBHh+n5Haj&2BkYGCXRWLI(v0A
zO);7f1*g6wLZ~DrhFg(}bRy{ObyW7)Jd-WrDf~YThprLnK1XSnv07V^ze+D0UD_{b
zm`0Z$Y2(UhiPZF`hiQJUSUIy%aF8#&39cPU8-MV2C^edi?mX3Q*?<*MFRe^jgKZt@
zQNf4j6gy)?8>v1J?Z(9Li)>i%0D-aHksW2Rl=BV}UoR$7R+cF9n)EI3+;p30dE%i6
z(4vV!JHVMWEM{evnQY|2iHW%k0Ps>HF+reSIjaFlu9GuM%NbOf2$rjs$JS<;S6~8@
zKtd$j5cj@d`D_I~>i_8y-OU3+jU4LrC`$5ist}?(zTQlu(6Z7x2!_W8ro(87E?J2x
z*n%{7bIDKCVaA7?WGppKjOk_Hiv&}|-G^W6E`W~bs!f>Y&Gqg*k4Z*!;%R&cQ5z}x
zk={8VAYmg4Xj>Gr>SlU%`VyCpJydTd=h`J?Kzhd81_=CpAC!Vb<V*u#nBzd?N`=u7
z0>$B*m#GDfAfM}m+7RS72Fpkn&+HnP8?A*e{OwlI&{2jT6%HydmoQN5U@aH+$cJe&
zl~=Fsr1avVq9)^kmQj#l6IRb>c+VBUKr(5D{@T32dAvl-M+#F+q}sqq`n@}UMp^?^
zO410BOg7|3X~Vp=3S#;b{jgL!FPS>N<tI!Yfn)JbSRT(4$EY-_n@_ObimS0iHnJyY
z(P+#m+6J8OxRLWe^g7z6Tq~spn_CE}Jh%aF?Z0G>RXc9CPS7zd8}$?4#9a7<Mrd(K
zwjHMY=&cWnIH?CWjaqY#zTn3^SVj=zB^XWjl{|79NRGwQ!8pWxY|QhIg=c>r5+*az
z1ryr-fwtzNgRL9X#)k*X-db_{#XGb9<mD0{L#0y*b=i3yEjm!%Lgx-=%3a^99(^8F
z_NDM4uKH&Mn)p$4w|MrCg%fHmjWT+Z^{!qJJ79bSwb{M21_c{-k|8X5lj$*A_}akX
z?+4~!qiI&xsu7F#3M<;X$;qD(jDZcB$tzy#dU$-Ov9;8LO4)O`sUd;9&9>lKxi?BA
zEI*P>zv}S0I|^kG>YnVFlkHM^3sb(!FnVR#yRB~0y?}h9eAgBj;e4C%9kqK-Zul_M
z`lzpYS!T0EV8wKoN2}%i{4Ti#ykh6BX7J<$AnPgWWxtz$!mt+?!XEWHev-fFgID;{
zY(;=G`14YGH~6|=W*0n1OtaA)a@BUtaLLxt@cs$iZV{B`-?{QbRr^PU{By4Sy@>vE
z<CN+Dw^+#Zul(@)0scZS^7l*r2|xV848Nen-vz=C*Vo@s;;)H?AF((8xFdgvg)D!a
z88R_nqgNZAA780qV1x__jD=6uKLIb~m?@%YHLTIsiN{PcT2jZ7ipJTuzTdy$<qKyz
znz7;75b?#~cRu<jl_9+EQ7M*51rbHmD!aBkzdRf)hl^D}$>&d>ILW44Ia~WJDl4Yl
ze<pRM#~FWFKYy&I%X$zA3{h--JX|c)7^Q!T_-Tp(mYdy+;p;4)7DC?$ao2#)sRj4Y
z@yfKW36lBjPy!&W>jnlR$|EI=SZpC%y}iczG167CJ;P!=h+US|nTGx-2Cq5q^ax|!
zh%d##9VkwPyTHA4EWpKiqw8%pl4Fj&r(YkdM-zw9p@1ST2$Rs_o5={@;38-&;K;dn
z078O@S<Uaz3$zK~Q)}sS46V>Z?T*L30HCnYa*4TcfnPrU91{f(7@rHV5hVU(_jBuW
zG`aAWOYNMdxulu3c>=o`u&nZHy@X%;v0N90kIlf#P>HdHd~u4+9HvY|izuP69!irV
z5hNf%K%-tUT(V4WCY%6>3b5+Qi@uwj6iNIyESK$kzBmE(wW0<l?i1-*jeMceZ{fhE
zt$||u*L^&h1m6YOS7kc8idbL~UY^(L4=ZQ<L8XwYxgiibapVn&lp}Vk+BQp2gC??g
zNOqwJ%Fg;42k6<AgY$Tl8_z|#O4aBu9Ok`^JZW|Ix9lljGDbu|v`3lyg=af}e=g^5
z)g1L_OOrp<-h*X8^+!nZYp%j_A865)A9Iii#VB3_=*_CMBZAMS$mGcwPHE-6KGtB3
zMK|4QwAQ$fP*;^6>^AWi*-oz)^D{A;92&UpV6I$J^f3vOy7fj_%)y76fSMw)^foZL
z58S_d&MWo4KDuCe=vxn;3^VbwnYQp^sgLblfp2(OxQ~`tIVHU@d0&>c93Xy<WCqEw
z(JMGTK{sf$x<n74oyg=kogmO-lI$H#h0Ir#Qsr}=F&pz6F#S$^;%t$b@aTaAnf8{N
z$j$9kZE@P5!L7a{@TCCaw68fosg`NWRfbYWNub`jk-KSa<9+W!EtYmXUeuQTHgop8
z91Y$*iEDPq7%;uW{gzLAf6eFU$4BmB8+(kFYhWTakx(YzJxG9g{jI-qWSh)-x>Oj2
ztV%jXP*v`$*B40$zVm&^&xCcS5I4pK6gLJtR+zc1GHl?z{39;0k_QkRr#E>AMk6u)
zV9BO%XoEF=DNDB?GaaMPvEw%o3tv%J&gAFaG&Gb@j`W`A-)t<>$xlfjUl_P$SfrCe
z)LKdlJ1_gyg>X7{_y8Zmhpbl9c*t`~6uwyMCx&s;9I*5xoLX!I@<+z3?8k0^I<_;l
z@-2(O(crI#y=HZNKXu2$URSkh6RIkhUJ#UNDVa#dvJ$4WGq2Xresg$T-}DB#t6B*3
zTO%oevFiLTmhK2+SKiXVvD`8eii(6EqD+&|W&)HcZQt2Xhb|3C2G!>Q9^d4Hp|+5m
z>B@iBQSo(yg<(c!r#7{@26*HLdlP;MVD}PN2=iBNouSw(>6;LY*Szd%in33JIefD{
z#txaCQ+QWHG?}i?0pg_;+7OA_-+G=5HK#cUYteZ%TCSI&mCHXo^54oho#G%QycQH$
z8F|hXC4@&(?<T#!rh7W7@h1-m_K#S(mnnhWjNNQ~t?rHsj$bI@uTKjOe7EBz)?xTq
zu_!jI7@LovphOl%LHeUn=9xSMXmJB8cq{N3X_$8h!!)=f&^dBD(82@Oao<}W*I?Kv
zV;ul}51^@^8Px*Tf(s?8U{_7oh!?wNXv?j$NANX6{_r{mg=kc>t9xloL;u(kQ6#B3
zMu|v3)P6>`BFFiD=qA5_y#O)W)jkr*6i5Dx@TYt8!qd{s<7BzdxpFc~muLywIUXct
zVsdI}<FAGIL(X~3`kVc}lA0iUlgcCl1dFM=mF!Q8eBN6eMyOBvS5|k!S7&m10M%GV
zQxET7SgBNr#xg=*Z*Us{-puy;s4L}lDV`bqgp2!@_ry0f%*P_kbPr2FEnXWsV2pRn
zdl?9UsHx{PbBE|p>W0MOs0O_-c7OKq2;RzbPB92JKxsR?=nC=R08*RM-zrf!&)&n#
zXKoA3!bJpRVx_HtG8DsTaz<22J$}K9MKU|ulW!h<3Yw5RR9{J_JUUz(@2#VQB#x~4
zS}<l7h>Ru}l!^)`ic(~=X=enon$a?)s)PLTUn@;9y`N|@pJ&Pm8lxa2ZwG2O{38U^
z;1g18R4l4PTmU3^mssH$GKRkkr_*2q+r@L)Gm~c1XyA6aUr)hq-Bni}z>-ykmN)^K
z$5qd@?@AVgImm+&&Q)-nnNF-{&mF{R{jrPSTSXrMatX`~rYm07w4)_WS}T+}4Zfbg
z=iRVHkQ0-Tlivik8lfR&&e1tXDO-WVk4WJq$4aL27e|wy{1}R6@es#FMCyhD?gO7%
zv_obQ*c6KH_iZ@cxkuNvVbjL_djs3y>3kn5dlP0qwr=^Af~Byl8xiW7a84FDLr7Ul
z^%==QC3v+;Ysp{2e0^u(;XxMZmTWSd1UKGB1Qtt~&4b3{(Q>;uW(8%&lJk0+$wQ~3
zR{=H`mM)<uy#$f7=G7`c1&Yb4?&)jP26;3wtape7tNN^=c~i!WY_Q}2d!ktlHItB$
zA%+Tx(x6)ESC<rp_hUIWj({>MDliw{p^%YJED(|jiW>$uxd1?)RZ*h_2mrM)h6E}0
zkT&O|I*$iss}u4@mfSzvdB#k-G$%osexg3mZ+47s;iVk{fwD^PAERFRu|tKrZ8{i#
zC&wP>x!((j5;pzrClkgb3>vd!Wp0w*ZrE9fj$ernxRLJ5s9^sDmC?LKF2Fr(2?EEy
zaxHv2$Rv;wYXzVR971qEy{jKmJc(6lm)dK`!elo#ZHx88WSB~XRb^rl?QCFn5_v!Y
zkEPv7NN6*MnrzE^^XO)%tidoezN9{)f@AyJ!c(dLXj6^JH{%db8NHNGF*}Bw?zj(n
zi5iS3BE}lU%b3nOC;7(NI0-|7&je9Una4RbD>;A!<Hf7&#9Cd0ymEfdo41^wy=GL;
zv`YZlZ+zrRg_eq)+N2^YIy+h21}DJfF6p6>`k@o~uk5@n&tu}grMr%SdNKvCaX%m|
zjL?KzDr)$x`D2F23;0&6CFbZJlWL5sopR)vX#+3}%S+XL-*!}i1PFR)D;Y<r`l@}X
zZ(4!`O&p6dYz+cQ@s;SfpDmgMeKQ5hlB21tdau+~aGsiW_I54G7oXOU3m_}2h^NjA
z7g+9qX}8Ea_v#>h72C|a(Jvr_>`3bA`+QM4+hx!E1(~L&cdz9ZZiEnFxV~dc=<Hh5
zY5F4D&ntPg%qE?VZ_Vv5`;rGw8gdyy+IigKCH#HQ#F4d|j)jab-miu)JfmL|<zYI3
z#_^1P{s~k6-&*bOkohl6{oiS||7gH7|LX+Bhj#dzW=p50{)OOw>A!RQdd%N!f&cQp
z-=`@*yo3KXbn%hR{|`d`MaugH#sALS|2%c^E0+HfY5Ct;1et%0&-oujXJTgkFVbQ(
zqE8zTy*71f=tSE1$HG*_JtbF=2?XT6k*9Mr5Bpn;wwfCb$L@XS_}Rg<7ZXRb;?7Y7
zi>9X71{ch<sVU}_-i6;e4m%ou$GQ(GY3SC<v*zLHeY<F#-!!gnwRiru;ncSI*0$0x
zPA5);C!@unWNF`s#;L9+tySBqbJ^)Hf6Q+FDIwE!T+uk7mTAti=GA@mtSs2Nv?ZTm
znVl?t%rGeFm2^7vH1}yq<U99xv1n)euG_sDUKm|b-&EU95;a+3xBN&ojUw~e;eL5o
zftkcO^I}OEF|uthjI0iey!&AMd~oe)sn+tKX*><Omt^Ng#VyV|of`0Ae36GCEId;r
z&SA4I<)Pdb&EksF8=jh1V^WDWDlBS?9lIl}a!YpqwElj_SNINe9eE>nn)Em_BB$1u
zaqp4yu8Re-4}bKd<OD<>hN>61;zgAEoK#_)n-Y(-M{!Wj8X%LhzKW&LDUY(_xkUQR
z<+cqsY*vHxv=CwvfPj>JVRKqd{{_RSO0R)1jL1Z-O_;PTMRI;v0w@DbP)-@EGmxP%
z#d@7(03ek~=*%{VNgQV_KYdoIyiY<*nR#^lK^c^co=raz=+Ed<e77wZDw;t{p;=BK
zFOQ$?)E40b^%fy@S{w=MIp#Q$dM~+^#57|32{#!K&yw>Nr?a$zRR?2x?{cp>7$ZW<
zFzp(pxdoxajW+ag;s!?~QaH*sd)ssHl@2GN7y7e}j=J<`RVI>n_VIpcG+TP&jR{hR
zZdB$?0k6j+p38~jNl92M>R&tSnN}j_ZHT_<s2s%f;}g4;0lw!SA^>f{I{^tdHenTE
ziQU%DJB?B<N&t=R+56&|d^T<8cCSo@`~uNKjNH)@e5($f1H`EN+>0BcET3Hr=}IE^
zW$n<0b72k$oE=43kp+J4j-B->US!yj4EO|Bsi_u}RO8A3Zm^*$Xr|%;;@okW<Ct)!
zs1@%choMgms6@7Uz##Mn*rpXUr=bXFzsTIB&-35`BnODaji>G_U$Z3_${?|YFIn$3
z4{R+vaPL`1HUDn0=cbd%`EG|V3H4l;Ng{cfyQDq{CK9{dZauFR9a)iVdyxl)Zi&Uc
zjo`k$%3xM9S!wkYan!PcfV8`s*?i=BQY|$M@@HAYMa)a;+OiY`?Usik<5MSmTR0G1
zPugKFcJyshN>1?Jmo}3a+ObgwE+6F?O}3mb^ogOxpwbyNd&EHe^Ob#hR#et3uGMMU
z1mB>#T_Basdb$|3>Vp;ih)t83;Ock{<ov(dQt)!xvD%yot1}=CO|LDrAe!nS59+%J
z2i80!SCv-_IjSxYEJGmTBN&=-Zl=a=OT#GIG$~qn&7#7~NEm@BwD{`>(m1?;D2u4J
z2+k@Ly*qF>;Ho**Ig0OWNf;Bf8hMv~sv~&NjpRw=YNUI*6Z8KbLnt+buru+pLFnv<
z(TRW#$w=-u)-LuU!0?<N<L(fq<GYrjrdOYCIK!ENr;}p=j|+``;sSKBWGNfF1<>ZJ
zm|rR6Nb%|#Tu9?Q^s|7-K9;_s7CP*Q`r40h?YA-Y7m_Mo|1D8)bPs_mhg``H=iHBp
zo8X}e_At#Y!kiP_Bl{@arAa;TBpeGNz&Fdqb@$bV1>EbIVHt%a!y6VEvK6wDTv{#=
zE``xD0O0hb=e|h69Xzro{yHuuc+28B=m)>qfoAYp@7(atyGBB>9S?5F)DUHsCXUy^
z2c_8`>2?Er+c<iowLH(?!g?<b$wB#NvN%t#Y$Kwh<N&+&xFL{#v|v8?u~qd;VpNEJ
z{u-j28zF;9^8+St)?vhyBtei^)DXtHP9JkJ+vF^uN`akCToD40iL1Apa>ftJW`;k?
zvl<lD@b;VJTC~rUDD|}_e<FxFS9GyW6=Fk$QPxLvIP91tF?CK|WO;xBHK+9{oC!#&
z1n28c1oQ(GM%hrX<28oYmt8+lYHxO8O!N#gV@;606ovMQ?>h%!X4{aGA}uLbpkyLa
z{(z0{WfT$+5Ni7o&2(%`b+139Sr2Gj_ZHw9tTsR5Q4wzUqQFWux!XQPhZJ~iM7>-h
zIDDFOYi+z%;&x+uR<rP?#vAEGjGkkDWf|8^^olK@RPr-QRb?IY3^Utw)MBqq9rw|_
zA6x#>a?~C-Pav4DP1`4izS=kNu4YvMUhlZcs@cC;OolZca7X@bExI4a2)YJ8%}_JY
zquTK@5-0@b=MM2mC`QH@r4`=i2@pLojb*#j%uKT+&;Apy(9`%D6bS?S7BS(lt4VWg
z1`>hEQZE3t66St3NV9=^AH^&AK}|`28UWbiYd)h_?A@BCzo{=)%HVNBKEb-2gfZC6
z>~ZLiYaY2D?bMC0>IRH8ir*Qmh#Llx!J<K>zcjA)=remM!CEq%r0Ay!W;dTqV#72j
zCD)I19pq<MGV^&@1noqt7EiQRhmE5+i5M|JjLNby_!Izn6FtjV+_v@KRQAVr$H59!
z0~qil!e0Q_kG=EG`1BaWX((2?N%95Jm^HjC&~>gV@A}P(qlw0iu{x`3FpFso3A46_
z92p}FXpXl+!HJ@c8$Ym$7AJSbldEuEFZvdb-+FQ<<-mSEx3nYeOwbx4u!G>$(qb{1
z{fy*@@bncgFH+3it-K-HDisp!-r!)Gb^8tk3n77QOF~*8j{}#L)JPDD(Tw?$CU^%`
zT0>;F+d)spkt+f)uEh)XM#1w*lmX=%GCl#TS0<{fY;9FN(ej{zFP~k#zq(c+7afhS
z1u+7rSZ!JJOM75El)au4tyF=CT|Z{|S?D{YeGs~7rjac_OA`XnnVZOLd_qn$naIGG
zO$p)0BG}Z=KLiQ;Hl?xiEIvsMC~ySVC#M+0y}8m>mZH{bb?D84Y4*Gx)dNoy3(~f?
zo;8XOD;=5LPsMO_^+>5}&ChZA6Gp(XFNi2c2$joTdN5?t_a9>IDhc}OY$(GkD+<q8
zQW>Rg(;3BNxg5-_>YEJ&mV>R9eydQ3e$&h#uV+e|^_>-NU>2y#>9+?3EO>(zS;vwr
zdD9ibs;a+@V&TX^@?;$2mi{3(N|lw*YQ#1=j&uMV4WU~I;C@hs$6$gpVUB(+`aQxn
z@)>QMc?+uYYJP0l2a)aa!kS&{%8or4g-ne)MvatI`}*DM_O@#H{DQ3c?Sf1XL1dk`
zo38PZ2b^je?j#-G>K&Dg2|=i!G49yHPPsfuwV!WYZz`IbFV8sB?i-j_clgkq$QLVI
z*kJ6~Tp-DpEaY9Jj?)P7_~<#qn()(=)Fet$0!OUi;BgP_LgCBF(^=ojaUDZj0=G0M
zy29t6Tx>V*8XRa~#j%3o?=kol2pDMb;~@6NLB_#*PKF{dJ-t7{t^GpV{m2QPPDoV-
zAjl%XHSr7kJT_2@nVz{ZvDaewQVBtvO>%d1wE8lTkx;v$o9l#~;N*2n=V7ME;SPR!
z93GT5Xg7NJ4aZ<OEhdhBdsmkqf%O-opptkB6J%g{EaDZ;u^7R6e^y6E46DvN_I;g&
zqa?SyWIA9+vcIyR4ZJwYLPp|=#CtcSWml}y0%7F37WXJG+6E}Nx(Tdx*?^q@#Ugs=
zl4Zii&B-2bc1;Anq!rlMY6?fF3nQst1jopJ*-MB1d_B`%1Q%JB=|)Y&cG1(ED`R3s
z_}~l(zzU|_4S~(0SFno~RAHZN<f=@@fjoQ<!fL@vlGPERRg+=^Y|nr;r`uZ?MYL$U
zi37AkLXzfp&j;;h&t7aHfzTvIrAL-Zp5WJ+GH$P_CSawHrU0%6cKzd~eEGF972IBe
zvm2hHI)0*C8l#mf*~O+4*{=r!KSkeu2w>O(DMf!ReB<jSj-RU&;X#~?Vhc1_5#(rd
zn^%o)iiAsbk74YWpAdQ)cSiW($%JHe)({&=^jx0qvJ_Wn?TRa>nYDE4EjQ3}MH?*5
zG@Wo~LAm2Kfz*CI$}%=XUT<EdoP6ypiX(jSzEDLAgs<AuU*4Tno#Gv~Ctx4Cms7C{
z!;DTuwhX$pWM*9sXD%^r&r|kMQLLap3GHZpk^_y0q~#v;LU?WB1k%2zNeBsG!lVs;
zRMr}}45x2F`N5cFniNonVN^{VZ?l!0v4NlimcZ@MrwTW@DYv89EgfGr*uZNN*jjS4
zYR?uQ603N3Jeuoo-{;PnIwJyrLXT*mvnVcT7J3yhS-vK7{#fX6N%T$DQm175E$cy?
zp}1ahHEx_R!oDEOh`EOcx8ULS3F*v0zW_|fJ%8spMMkKR8*3WV?_4$o=nErTuL$^T
z_ND{6pa65G=gLgBubU0aT*6DpqrDfgos*kP93y|fN7loQ#Fln2tf%_rTb@~FpP+ci
zoPUicWZ&i6+&kcxTTot!yh(khNi-le6X>INJbzp%Z<yQGzLTx2pSr>}!}@!i?&<yP
zmM=M2GE4WKuI1qso?S0+Is@%1ZeoeC_g!(bq%54^4KrM64`1(gu14Fr`imAWRHUa@
z6`w?;&)<M(H(^B{m7vl!(rnl^>~~D&Y3Rla;%K1Jx9`pvcE0*jGCY%fO{H*i352HO
z_dr7`2I;~i?Dk=z)AoTFF_d?dTdCHJ7<+IdyeZJWTARJrnY_`r`}c?Ee_F@z=MAaf
zAOHUO;hFib<q<Lb9#f$3%WND2^S=fQ{eFP|`{|kC_e=io4XIxqaep0@`^V$%uidHt
z+T8L_&;5UxTUbBF9RH%Ly<JuEBQq7D<Fbl=%HQatQPqx00vN{%nkMAy)X2jOiKdyV
zO+DR<c)Wh!dC5(oQJb+|M*)Q|1sjZS!aD&IY<JKrJ8>ht=fx>mK=*?_6ZO7-wJiLQ
zFL;>--4$t~av|TP{$(rGX%W@!`uK!r41DmJ9^Dp$i*&v`V*o(FxF^~@^7Fh2zn@ZH
z>g<x!x!}NtR`X1_QHo*K`V_$<RLa$fWkCfpd%BwBZYlVvj_HsKgQZk%wAUHB<@XSe
zOSr|Oi4*PC{hy^;AT`caT#Yr1SSsRKnvJ;d7i>l~+j_IPir<aUpi?K36vA*cKRa`_
zS19FtUH35)ICC%9{)x)}B9N!rbB&1mtOkaNJEl-b1W5!m10T4tf&^o7e^-nLN;GOW
zxVe8ZurzZacW<|;c7IiQ+O-FH43TBhJ+!i^Y!(B<eC3sgjfWV@egsKQhzuvXg99v6
zL4|#o)m;ItfmW*T`<Xhzo^{CNtrFO}S8RxeSV#fTLUQx5y>TP~ls`Z9F<8FQBzKTJ
zfPTbmA2rhj0xbc9df;YpB1X;a91D?lLrY668Pt|+M{L4|Yr6<(whP^r`Bp%RN~a?e
z&HHVM_4MWeg)xpOTo8jODc+M~EQn}`RF=_(9&~_Spp}wjyVHC=3|Ec@poPK5$~iN)
zm|#0HcehuMPN8iC1zW4NV4y*(N+oI@p>{^UCSrT2lGP&c?x`FVp`{BxE=QzLM{1f5
zSoek`m&{n7=8Be9uAzlOergbxQskN8B&?y5GUqyv3Y&Z*wYGUb$4+iSRN}k%nF8Rq
zF={R?08sZ0(Nx9pT<XS${+u`?8&=4~5|~^XUsEAAA>Kl_RVM^}NMZA@Az=8#kUu8L
zp^Sef3=m{cl>3SfL+Z8is51kr<3s?QytN3kB5tAn)Y8d2dP$o!Y6%F3Jbj1ilM8(U
zf2u&#Fk;J{EnNZHl>=c_Q|%VYX<2QBe<!C(={*OZm~&_M|L#LzjR0pM$i49bFs~SQ
zleib<4zT-#jF28u+d_gWe8Tp$mIO=Ot1{^_pAp+9q&03^we1CrN06{+)ep8)GqY}J
zy*78=sjnD<>gVkQ0aPf4N5NS#>X{<I0YjK>bE>ltkHHi4LZ7aQSg4tcb5ez^edQ_e
z7MG}l49?G=7UAOZEONR0v6NV6Jo4hgfz3p|DAvD<Yp7<v>{&bg`F7EyiMwBJ;l%AL
zY))4<W!ae*G_sMw{W+j#!J@h9vg&P9uvS*h<#X5jU{hjnp^?<Z?7msC7QZ;RPEJs;
z0QF1nQF>xgl)v>mzUG$hv!j`soQ1r!NJ}#%yEHr}+Cpb%CwkgRBN}K$aVOo?Ssf(1
z#`J3Xq@3a;vo|B68P$40ka70$I}+k^JCw1EY)Z?vN=ys$Vge0`MMGJ}z~@gu-ub%f
zy?|=a!Lx~M#r5A;+EO0AAh0tAtm3=LVKQx7m-AF|w67=<K$fb5tScrde!sfk=_U7W
z@C$$EVTRMo8o$`qIarFFx7xGnJK4F}X#`dR8rKI}=g%M=)Fw!K0P?22=~A4x(jR3E
z6~PL&(VrUb->fqlV4Xu}gau_iz2j&gMRQFP0H*$y>G^}OOV4pd$QRwGS=eOeXGp>z
zKFO`TRO5Zds6d8LTYnx}e$^+%2h*~Gh@5HYcl5kngI8?%O8J#xgclhCJ_&+LB!w}!
z5(gz1ppR+PHJ7jsg*2uiMlf>2pY%@{Ae4{Oe8=}dt*piQH2es7gecLRo*G!;vbdY8
z*`7QgM4><u8QfBE?T>XJ`9wf=cr5NAG;eYwUVCVB=jOr6@WuGNu&|QF6x%*-o*stH
zX#tE!cY_i(?z*<Ro#I_yLcVqG6exR~n|jr-&7CG9R2R&}rO0vydJ2wZ9iT})lb^Q-
z(*p!fYI>QOTR%B@&IR@!j(d%0z~vF7bTAEA;A&$P2YR}rxL9M<Ezh&v1q3qK`Tooc
zwpKg|+L&aQr!;UoRXWTsW&9Kkw#JoSk?!kA+Fq+GRTcWp+liRQA>_ifM}=Ne*D)=_
z@Yq~A*Z8NlgNZ*Wt{HOt8Bcy>00eYX0I&8t5ryTUdA2uw{-l;zp7;eAZwhf9Js!#7
zHC*@=mUx!bgh0YB>FcFEa*tz1*3kIo1Ab!ipOo`o{T~0Jod1vT{flq;_lfn73NLMB
zXr?Fl;ZfE2t-iB<q=d6^Fn$ci%jtcPg9E`I<@^`lGV|ZG?|&N1|NXT;D)rya@W1+$
z8CY0oKZf%^a@5&q**_x9893Nz89$7;tPHHQY^)6bkM`*P)2V)w-#;uSe?_)uVE)yh
z@+WWn$W)|dW@Y|&-Tt3)0DjT!fAP)yx_|0d4gVLC`MZYy*X%Sbf2P%cY$pE8l=4L~
z{$tf{$7EGXo)U`?GQpFCOoM^y!am7_+m4V0K067seiD%p@BhczTR_#dEbGE}f(3UC
z0Rll6?!lel?(Po39fG?CcM0xp!QI^h1cFO&`&Wdslf859-S7R6F&K<BS+lCUW_NvE
zGOOOcJ=vWIwTSTK4>dqh6n)}Fh#x9=;a`e_TKoLA)BKgCEE1G<%yOK6QYpSg%Oz7(
zty9FyvHgx=2IqYDW!L(<v$aN=7I__~*eAwuC%YdogO4}!YcJ@aneUiHnVkzi4#{$c
z+`*pN`%9!(hSb|~O4Vj|d6a*_f5;h#-05~}&(U2m7ihXtQcPTkR}Z#z?KD|Ju<yBZ
zkW~#k>KuBjE(wVmr4piA4r=VL<w8JS?=;&{nX72ezT;ktM);;c5^8n1xf{#L__Lon
z=vm=SH|5ni*N~4=?}E#7Z;r4b9sg@9Gdrzi|07a(<O1pfOeVn%?<u|bLYHK}I2^wL
zo000|Ktiz>LIbn7RdD02qkPH?634Sf&J{sDbmYr+WIRYZwz&HsUzsQOqxY3Oh37sS
zvu#hmHKBO*)j9e4JbPCze4vP+mTnn!DEn|#nIC7e=8N*AkA^Q3%#c1JX}@g#T!>^2
zSC}4%byb1GWvLc?BE|eVgN$`*XktQ&&rZ$yMcsT>B(;{G!LYlYbUIPtSejLW<uyJ8
z7qb;sT~JS?GB#vfkwe3wy}%g-EMc%ut~_VkGkewdW~l|unO$%WUytfy*SbrFEQ+OL
z<*XyTi?Rzl1IE4J3^tBjr>#!anBimb8Si{X@WvLIh6ZG$Im~%{vPz1)V_-2#1mYzC
zaEoN>*dkbDk!5d}O%Yz;>K8Kf64zdngmftP@j^z5mWkkQ7<Ws!b2$*8WfutKYww9p
zPWi`b6JG|#Zq1<aXeF2-@h2aiyODy9xC(jryN5lKj(C?vykf_GO|u<d=GpI?s&ukS
z637f9BNAo4Y`v0)O_ji1+RUz5g=$TxZ(}_t#)tOQxLtqNK~{DQE_^=vL<%>2b@%I0
zQ|SOF8pWL$X8&{@8MW5Pl6xc_QDsrmaf`*8a=C~gYd*)>WM_JEBox%Es5?UrGIX_^
zTO`2q7c~BgOBwh=>$sPrrH)iAy12@Gm!>m2X!`u^a8=tX_4F6CV2Nm)BedI*d7rKn
z6{Sue7?k=(eV=d`?&UY=&G<a&vduhjubN1q+R)GwNjHFul6*MbEm@k9+!B+~TA^3C
z3q;rkbpQ`_gj_mUlZ)#=T}3&x6AbSlL{`wXvx4(Qx&dQOgNrf2ygDLg)ItZJOy|*_
z;t6-WemiyKZ<}4WrK}d2%KfmVnhbT-G!03q*2-N}#x#ol#Et7|8+Y>OQuYQoAC~B4
zs3zu@xU+ibl_+|F_J(9BWvEIN?VwFNjmAZ)7PXbRyGS4mOI8AAPCRBH!E)IU=wwjz
zNoYI<<PopA3$07Yf~Rr)m6()3U>EFVRlOo#iEzO0N{z^}<Jv2~K;G`(frFQa1m;&#
zJh6`%`+}p7gm)Gjsb03%zaukN(R@ysY9pfEC81(McOeLI+a|y{J`k2iIKvnnxvO)0
z+!ZEe8!CBO_HG-VVIE@<N}wXyLb;OFDoxv3Yws1fxrv=)EE}H*A=vgE3{u{^P|SQP
zHbF&cCp!M8Vs%BfyCYt0BTrJOuWiCzDlI;)%GkVPbBHR>ddQ{V4l;Q^f~wHRG^u(X
zD91MFB|1ZCye}5w3z5u<)Z(r(KxrVN*0g;_D+B#@&up$9Y_R6d0~_2ci>cj}+y<df
z9$}0`Uf_V5OAK3=`Apb7epmcYAm*)En?+Vvl~^F5(WXr=M!Z@2IAdEJD1*=Spu9%$
z6>{ScVV;dcDsXIr;zt(FL<&KQm}Rjr5a2psJ1U)U=PN_v=R0_9xjJKVgij2Vk6Z}~
zd7*ys_JLX5U9!;MV0bET<EGsYo9y|Fv6=b<hD!2b>!c^h?Ku}1hV0A6PvEhG6%y@m
z8R<gr$SJRVKUs9Gc=5bk!|90N-?ShETSu9vF7ow!wlZlxCyuIVILGVR{}BXJj`Q$r
zUZV?fpZJO~_QPz@t=T8N)NWj=I4UHXUTP4$BeAC-vadm0!B?ro{j|DCxTztsxl;r_
zKL-hQrxr!!8;rOmz!ZX@rq;3Sf~%0`RmINf4p_A=P0E5kO6LGKiJ?t<_qHlWO4hhv
zUVNzcMT#$lk|8Gb1zSAcEtTxFh-a+&vX?MeX-E?uZI3|HP8!IpXzxrY^sMv7^-EFJ
zh_csBZI?}+A3Azh@t@S|u5^xV<tI$`J!zJz9HxZ_?a}I*^3{`C-2nBR?qn3K)WAsy
zSyP~m_k7ra{9rm*(=@uBeo#U{US#6}J{l^pM1)Xw_)MLV@ix)Xzf8oZoJQ0m!WQNe
z2?y`I>_v#c=BkA!BL|CpGEzshQ-yz7pQ_bG*gfoZ5>)?IoncZBYQ*h`xYrl%2>*gD
zQ-sn^Xwvv})=si(5}tCjKDfd#EhaX%ByroL<JX&X_n8Rlcb4j)Xji?J$+`{+P}c5v
zz|t2lHQ%mnG=jWnGXq=M^Vrw}FQ<uVQnKMm_Cik5Wd<Xg)T<Z!w4J}uF2b)iUeQ+u
zmS@=MeLOs$o-<L1)ohiYxwx=24VjBu)<qkega2-6au8*?G_h%bAv2*F6{fl6r8}}0
zO{kpWsHQBUw)ZC`0tQm7@ks+a9NX@W!dQ|MvgTZ{E>hYSWiW?A<p`217z8_*L|lYi
zu5}384<rb(m3EBbnp5m@%-D=ZMGST3di#$00-Q#Tgo-54Sr%j7G((B1A`eukkh*fa
z4)(D9>@0E89B(Hay`S5{DOH@6P3LNq5<|PqJcP(;k8yn|y=b_#>gpn5QRrA{?^?vo
zI4)=YjQxItV5iy(!tQ-3*D{OLu`!h?7G#Fnc-)0kOI#D`^CCG75fd^bV&%_QqiMzT
z1_jN&UdVD8tZ-i^*WYOY=3?ro)Fecx#F)xb@B<822k`DNiX{X)I_edld&KLUPAGsl
zuYT#$B^VQ5HTvKf6n4Lh9&MLnuK%)pkjV{2Mhh(LP24TRa?Y)e1=fye87gRWbQ(%A
zu~#qEOS&GmEnL;al^yg~p%&KJteXcbNPSvR9y1$F8wqv>D!~DsN(;|%b!|_iRtv)9
ziE@)}P2Hm+NY}Q`tQ0z#`%shFL`(8?{a&v$G1?099N#6t+L8LQvL?zvN6Gh;SV)A?
zq3IDyNsz+$eMEj%IVPq3H9Su-F<I$(OD8N~@Oo}KoQ5LM5%M{2j9Zul$g3fh&*JQ1
z%cwMRARBm|J7s&S9CUcb1y9#h=VrnibP{8O2_<eXwIx)Zr(EtuVHLc$D_xLA)}w!0
zydaGqfF&>hHu5R9jY#-2lcKjOtG`y<#atmpk_^XdH-4C&H`sT~%<{6bUsqh&z*5(s
zPOv($mSRnxAo1HmZC(R5787v{dnm8-rV;>yU1$~YT~KFB5)aN)uqa|KW3VK_YwlEj
zq*5SBAZ#qwj8b4aMjAM8g40v;z2}Hb&ALRmWze=PrM#Nb!J6<<p!GwgMKxR4Gpz@x
zs#=MZ!r<XLhy&Jx`&3P(J492dp(k;Q7>C!iBn>i#T;8*9u}aw~DMG!&+VmRiqdj%v
z+FFh0A(lKz1$|e@B>4qCIFO+l0#_zC00k@IbX21Gs{1*k$Vw0k0SDZ~aX<*xxwcv*
zHA^F1BzVngad3Rp(|9YiftM?0+-<Mzp13ntx{=Ye<8d6tDhrR%za@%cTA%X&=xPUw
zpm~23=28}E?<&q0E@V-eSRX)y)hXi2XpIY+sJz92zUluF_bFXCKnkylh7C(P=)(3s
z<tD@NI1+lAdBW;hC2t|d2CzY;tggnS)qxjzIuoi(;KzJ<?bo(}TQvGd_$y_HZ@+?n
z>YE4n?@`eb>0N6Ok)^gqwWogN35ULwEY0?aPx4@DV_v@1{3sE^j?G-pbvA^pKJ2ZR
z%VW8|)QBIizB(%Tc_J>h48#>6SL(rTMMdXtMG}0cdfYlkW1YFkYaK&FtiQSyci))x
z8F|;+Bn3cbi8_|Tb$hpdcfsK(^zb<q;r1>vs{HGY2#SCM&W_IN@dd3BcmgOF5_o5e
zDj7?&y``5ip6QnY9bB*CQAJAA?GJ?R@9V!j13NZ#xY)XcgLG-D%Pn_z3UNs87U|4Y
zulJ5G?@)JeTU+STNh@oUx)y)qcS+9UPl+Bt;f0MZYNwqZeJ2+NWj}_Og7AUXKgysU
z5)ap)#gf@PDI?27bL;ccyby#44DsO0C;Aic*EV1n0mj6WeJR@66<4;1v8OY9N`c11
zu?r29PdnaUH*0gjjuzJP9XKeNecieV@{-<zKr-$GMHdx`Z-1rv`o$-Pr-sPR?R$hK
ziQ4`2x``kh2JmxH93Grs+qM00Fa_fe++I9o+cG808aIK|luLk;F<8#1BE^A_m6;19
zxc;J|RMTDTwc%@hKPS|#2=Z!$9in>yI-a?n)b@0!BXqS?2~;);69Muz;H)+&%>&-D
zFzM*LF$s~(@$C(qJ{;QVLPw}59W^ua>*ZTFeBLe))L~||b5kWkSwqc^er3-G5Qu1&
z)_>aNjK9{i{j**E-E#Zg&Cd9L>t_Fl&HA1Hzwoj%{@p$Qw;KU0bkyH!2>&BV?fXz4
zjs9PZ-M^C5esjbBY6Jg2GJJpad_2BK!picm4c}rFG23+}<dz%7OEPn1rB6H{$*NI_
zawM_ydWKCdv+r3)QT46)B4I?jj_<Gc1;JedVyGk~B%Gci6V7g1xZc_G-7<N5+%k<(
z1`ZalAq8jG4?SaESy{Qg6C$@ji8FF?X|)AEtW8r`$un<R4p1tspfguBOPngP*j84~
zTWHGgKT9mdzfUq$ADG4=&zibxF)~(aES21hdna!%gg|iHBrx^XcnZxp{^t2Yrk8R5
zR&|)7!)0%Y%xl@&kr~Yc4=Ck*!om<8r6(82S>gmIHC&9!_pCuLF4yl<#;%JDc%dKq
zpC6u|#kE+mRO3BsUF!9v*WTEFXB*d9jw8Y=R5$B*_VPZSS)zRDGaBbbVqkK53D3X)
zH|sr*;sm3(y@~PC*v<$Mr^}V2Gg)xR{L0-q%IU6D2OASFtExqvwhGxxV&$NBiQ4&y
z;@11|w|wgRK|Pwi?)0J9!{Vh<azyabu9(P`otUpPhon$$ZX<^CkfG^`uyR$(y50$u
zJ<WNUc%EsV9hEI_re}Y~8q~R7Akvswvi+Qk-1ck3ENS6|agB#PZFNdPhty@am&GWz
z40#0@mr<p*u`XI0p%syf2=pg$Xzg^2ukL-ZexUKPFC_05^f8w6pW-r{t4zF*wCK}K
zY+`3z2KX4JgymdNDcQnmC|9XVb2^tUr(CUw`&q;+Y`V{zu)lv@h|tqO#rANIf4%17
zI#smpcm}_8kwxE!vxE`TgBHDNWO#pLY@ymEzOq!5<F!<nIrYM*&`Bdf!X{U~Y$3j$
zfBg%!IarCd*YPLPQQSqFw70X!!!#<++v*e5oZ}lt@<#ybXuUIa<#TW(?~h7P7FA7N
zb4=J9pUx+Asc9%&8-aGyQuw%~1|6-TbR)#fv&N#3aBkphS3^LOMw3`hJktxQR<<AM
z!Bs`a#j1@{YzbT1@r^4?(-LW{-*+Td%Dyt4#yPhhEQ0R<7_2deZ&yQ{`pKek*&T#c
zXrm?>Q}DceDs>Uw(2wECT|w*$&6}iPAaBQTfnp&ziM`Bm8Jjkma5ark%;P^KJNB6e
z8<`X{HFdhNUat&`B=`kad(eXvoL0W|r2PQbMm~|+rFWd8nO5O^px4NH!pfTGMSJG<
ztO9Lt7!}(3blnU}cl$eO)H7=85cNCJTkZS(UQoz}ekY@R0&X?ufhB?`Nn<{!G&*Mq
zicZmS?hy!uv(j?v%X`zfgp>n3BQmCWv&sa`*HtuREdlZ_&Q?;fwG&oghj<|R2u@RH
zr{-?NpxV1WX*-i>`eUpT>tI4IZCNb4jfl7fyIkC-CDa4XR61q+o`=wvlBmz+D#Txr
zQn)fdSu*k-48VT4J{kaPvNniyjirIuVrATPeWuiUaJ&vC=>4Qi*7x=-U{Va5AgIhs
zCUY6Z(8b3(VwP~Jx`%^;_QR$-$0vfYTt@_iYm%@{3`Dc&)Lu9WL<|KG%p3{Ab{03n
z_m_Sstf7H%&{Sq{Z8BF(yru~Zw;`hqG?NfDz33sS-up-)hmIq}J9)|uqIU<~;VXVh
z2+NIlvap9Hl8EnZ>;xnOI_!*@&j<Q~%??atUm;$IJ(QDS$Ym4G2kc^f$a6bw8-iTt
zTx)0p-vgO_wk=pgAffmCiIX4bY<#n-YVJBKsAnlm<2>V;mrB4J$KsS-U5W^mi%IS6
zC0;qyGLf?iE<qo`oi+|?Z(u8@_XNT1-SUWCVzrnE@Y(Tq7@7h8eB5eMt)X9oU7nE!
zd@R{J73V=hE!~(JZBv)Po=-W0yX1Bdas*jT@qeGXW#d(8VlXTdR~VaUCG*O+=~}9-
zWM^B~v6b79aT}muxrX-fgT5XTk{(<+fszx^Tc%ipdEY^n{f1meCD<e9o-d(auDUwK
zvEODn4b^IS3^?*vS}~>F@eXWuS#_aiKFor>gcR+ME{lE(^6U^D6R!*pzuR-|(5Oa7
zNYPTu5=Ju*WRrka-daiED{2*Xp)sgu1bf?N6Dp`<+lpG?s^G{gB)*JQm<T}?z{f9^
z(Boy!X!P&%na2@V-X`bg1#<II%fF$8S-01cR~(B>Unwooo)t;-jMjdkT0=$PpG@-f
zi7u(m4uC+2LAqBjGOZ^O+E|dJR!k%gifqhnE+_`p_S_f6K16{E*c4cmd*Lb?CRLVe
zLRLqtWMxh+^a`1a)WKU&^NmzHeEG#w{?l1hoI?sksON{t8(7BqP{3B*sU=Zm<U%b<
z&+H+sHi}N&Yo)O367LL*8SNW0`s1#j^S1_L+)<t@ovF_6W8i|F2o{%?Mo|s66$NUZ
zlX063Ty<t~h3v4Y+rs#DL#jcwewt5p^VoYMeh!NnRK@qkpOHwIFws?I^BApmtA@|V
zNslcv>?xPbpsb7Mdxdn6(D+`0QN^6g&RH~14Hfi<0SwtNW(!L~1e}0k#9pQz0o_-S
z8|qDF!4SSNCS&My<<v{U$VAwVt+4H`lriO_L7Fk5Y7sD>Vgngv{QdSICUZ+u(E<!m
z$Ad{%HYLQ68^;Z!q`=@VN)ApG$&hpq-s2O6Mk$5nRWQBPvj?xUE-Iu7OUYwUpqr45
z5!1$@obJ#n?an-dhC9k<N&S#U3k{3FEq}E$)51+R5g~9+Q8JB)h|Nh24V|Gd$2H>K
zqxL=_qf|^+GD#t#uhl21vl4E*>*KSx7Q2g*+=c|Bvw5cNoY!N~lad>vW)5PVRAAJ|
z8xIaDsV5f_vNZkyWI-eRmNHU7FP?fMy4igMfSRIe*sS^XP(ULjclY6*(dlZ6#;Zqj
z$?RFv@bVv$wOW*I*OZ|@yEnWRMV8NS^}G6*$P&H^&!~dx9)u2=t0;ntZ0@*6HlNJo
zt@4S7DquUFRT-BD69KG2!s|nCV-qW|n79Th81E8;y3735vupG)7$vjPw_M=-d|^~9
zUL(fE?$7!_!?i6xeihRic(HmZTh^s_l4M;_T5@Ov31W%?GL2-XIEW$`0v#0D;pA&A
zV3U1hH(_XFzCHNm4i*w0G%(yv%u9%Om^%i-;N<|fPX+XvoM<1gL118>Bl+ikB&|~<
z_xLU!Evqh84HnR-AbR~ty(fon<lm|GX;(eJ&LxtZD?_4<wsILIIfqo&!I|+=<Y+Xr
zVN30DQ5nF^pnY+G#AIvPlzK-Dm@q+JMUKajB!2tBJ~KPELkjiw`5qrhLQt-@ms~3d
zX9Q-VQVWt3k?Am#E_bS5aSrIh<eb+uKL+e5@%x(Vou$oFQ~V)VndmH>+~}Fh5Qy<u
zwRsC)s_5$iTGy(jkJhymE#p%&*;-1P(J(p6Jh4;kCMg)vtD#luvoR4a*f^~k%4KY`
zy+XB>pW?M6>suUmeM79*gp_uLC+U)3i_oJ!BfJn!GloZS?}AmP-eh0`?dOEAe|{>F
z2W>U7!*?88LIRQTYX98|N=T8<Iv*(uTN%7Y<IqCpd))D_Qv+OcQP3!Xb~eZP7azuW
z3?c4#BG(|u+06!sVsc1NZDy^S5Ej$?XyuHrGPrQtq;M{<(OQIcKN_1gd*QIvdzPHr
zab#v`FW1{yeP|E$8vdxHsFlfIUixKnLH5f_A~slC`<CcMCc8Sb-1&Dh$;_H!&I~P9
zMthJD{zG~<<0BtOD7uAm+MvMRILjurd}McB{d{)^fAM-Q5;Fy&(+mWyySR;DA4CLn
zh)xkZ#KPm^^{4Vvf@28w*k+FP0+-8n^1I5F8;y1~u$ys9U*WEj#9ZOi)KamySz{~g
z6j9c^jpe!$Vd9S?hMjZw?Y2+Swu0AgNl!0IjQp`gcV9l|?9LpKENecJg<1oFd>3<d
zJCxix&@26g+<6y|F)hd<yw#D-sW0g^Tz2fTjb-{E9SWMbUuANGfH|#&zgBhhd_;@C
z*V-aY>NKQdm~D}4a8|vs>B{kR#65Q{gccLLW_}Qpx9zh?-Prma`!y2ep3rIkIi5E?
z|MWfPjwv(EE8_+aY0ifylal)ue=x6pEj;sI%&YI=zTawaGJY?d`F|7k1a?aP7CZbc
z?D_rLqXqOY!k)mO9I*a43n2?Vu$dYQAp`I)9k3tFw-zi6-|HaR>)1J2=@Zh38(Wyd
z(C`p)a1eegQ7&XDXGuuIPe^8_t)p*7^|Khc3ZJEgfw3XAnWeU#z707cFk1N6D@(r_
zXWv4Yz^>K*D)!0v>#Ik9xDh~0OU=N-@&|M9*E$D(GzWjp(E6)p05C)Am)Pg8CgcBM
z9F*x-gZt4OWCj*u{g>HV`zp%ei*-ouX_^28sB!Q*tm6oNHp%5q*gc&!!c=9#0CPg@
zKu)TnFb(MwN5qZRydeLuxOrW0zzH&D8CejeLU~E)2Y(Hrqi~J9J+>UAKu7148O7I&
z>UYON!zF0a(5)@(vjPrXH|idf%r59cjeB+<`Z;z7YOGP*c2o08nlp|MWR)*tKWAHR
zQ`5ZWM;cg?bNT4j-Hbaw4F470$E{A;LGrHL44-4u%3zsEctd;JVC$1O8oHi)h1rRT
zr!1_51=sQ&Ttz@VrAs346;%pp4gKy*fiG1703xGLIqs+RGZGoWuWN@gTrYEmV3w|^
zZ030jh^)o$QW2za>lwY@nCfK8d{Iom)#uz{Q=QNW$`*f<ov%qJ+n3rMk}U!TE-Ht+
zkkgj&rSbV;bKG={AF&*nq!VBI9CQI;2&B`X)nQ5hE-|bSs$Uv?$Ct^scWQ;aUgJA#
zra^A<Od4XtT*;~Iv?#jv5IO-dWejKY)+dV@2dola8uHafgyGZFRxod%(Bh3?ls9ut
zmIxL;hKjAH()&<qd_>)1Up1`HMd+!LINJ~mXGZm{qzPm^pjUCvWX!1)5Gw&K-=!26
zzgb<LZyI!N-7Vh>SNLp{(R3!C0xUrJ-p){?Si;V@#|p=z+M-&)X;9?D#>~Q|$W|6W
zil<Jv$SIFFv`+KXU4l0PHD-&e_3K`(fe&Kv({=659(dRkqz0|)D5Ph64o-pUR}I`L
zHg%>Kv|2AQF|2N(;a?U)J(yH~%$Z>mm&^IGl+Ss&H7*Fdlq?OFwbVmi4b{*9)BW5z
zBvQ*dZ30^~lUU7gdP%C@1=D27QLsq=&3LkiC$TxVJt-bV8SMq+BKcvz%RUYG(kL4S
z_=|u7rCHTO$5v-s2LU&1M5K&zUwLVMWD;5oeuz}0KtJ`9q!D<V6rB6D%)TIE5}EXL
z;eio1FPH*CY*taD4+~I)FQWPpQxN!~v(|hBeZ?J0c}|26#M$jwH59WyYrt4CF|4(H
zA(#o9`O-Q$eXMPDNX`)>YjdJUSDA5%iHTFq#{57oW<}&=#W->z>CZ@RRZC`|nH_^D
zpxc@WEbQa`lK*M_=;@$}r#+9`EsEJp)!qDqj4%a}+QCB27*6-V+IHWJ6EYv}+p696
zYh=V)2mic!#Vij3yXYH{T_f5B0`}r1s|xFe$^#Nzf`_#i6)TYa)2yDt1TFX12l2KQ
z_sL^$l%XsY4MG-e9naP4#AKmGKIs#gdwDZLwfJbQeBE5@BT0RFevTJ1MknnNc%t@E
zCJQtR)nbpDl~<#mdeGioo8WDoQh1g@{^3$2<=$ol@`ntI#o2zQ6;rzv@E38=f#R4C
z+Eo|!VanNLu(7V^Z{H&b`S7q~0aAw6MlxWqdm?mwnFCw(+@Pp$p-<40Pjh$Y;q7?Z
z-Lr_zoH$HIq(GB|njBtaBEB2yJ!#MVTtSDw2w)tbc7d7~kG^ysLDLRW-H8dKl%!zY
zc%Bp29t<)FaGOJrB~_QoGX^^%uiJdmEu%#q+qi(w@WhO56b8kVEf}Jk$W9i<Cx$H5
zu}F76F;IO8-DgOhHkIeu1EX#D&0tJD4)wE2`h%d8idQ!;TXTyBUhOUh4|!fz(GFV}
zJu$VVrKAeQ<nj(SV!j7m%s<9`pH=dTIk~M9SIZV8j2t16u?K#MJ}RJ$@JOw8xOM7a
z#`?+P8xsI1HzvN_*||Yl*^=b>L#riXJV;P?ud@E#vcF&kWn?{jIz8rAU%TRP+e>V<
zJTLJQ06R^2i9M>;<vn@Zr`0z+e!7e@+lsqMnMOXtFh+agXXqB#?c6Q>LqRAR5qA(!
zlb=7YlrDROl)&~DzUE8)403|MCQ>Xv<GztoTw+mr&omB30Tlun!9BdgSw7r-NZL0t
zmjR~^n)z9Hv(fgnX1|(sOIMO)n=!ICAnI5^umlhM46TC=-gUT{hSi$Ikp@)bLtrd?
z!pWyDn5|C4H!Tb1X5~kCj9}0D37OIW<zt9#Pq_^VC5B<;q(Z77mo}_uYD~jDFOrI;
zO(9{muRK9uX=Cm#XdL)T-XA7qoCG4!({dHo+?kt6!sS&9i-K{<YeszW$a!^6_2GI3
z>Xm(3ymv}9HCf;U;Z5s9>7ZrQ(^XYbVxMtzLZ;~3V4XRuOOwj>u2mcPrY{)-Fhuw`
z;LqG?zdYCe@K(TyTiAUxg-YyrBE!^_*+!jJ2`wG;jf#<H*K1V9@E*F?4aVFtE$$ig
zOjrT^pT<sWu|Y-0oyyoLz^DdK-TJ0ezNX_W&2hn2#MSqCSXA-LL~t~VyDTGI`LxD%
z!#>O3-rmI9hu&iRR5ilcW?eG5$F?<-a1%NQ&!;tzu0sy1*#KyZqg4JBou$e$Z-~As
zFuiIlsT<5lQ~tz2`V_EBw^ygLKn4;~`R0R@$@Si;pe5=koHUa2OYw3?dJ8qW^cmYY
zN{n7_7x-?^!O3cBQ7yztRMD`4##(8)35Zj6^(QG#Jg<aNCyj$0vV*3I@fQ7at6eU6
zD=C?j1*U<O+d0NB$gn~K+8AEjmZh1wk-lZ)L$Z!B6l`4-wgVtVI=>0iQ_~sn>IHRo
zWH7IsIqL&+b_BN1IQksKr=*$^h7}XlP}v1KT&E(l2eRrTupt%9szlI5#s+LcAi<Jz
z@%5%?*KLSGZNO$&LYgbaXqQO8#RIgAoCqm)i<7j-DFe|~TL4gsp3u8^1E=12#YcvL
zq2TG{$E6E-I(cfNK=nd?E4995lvn~YLpmSZ!Vq!_4ovfdo-FsV%y8AaEqi6-vRLU=
z5^LkE>YGnVTS0!$aDDGk&2*h?6XFT+c!;c)xLw(>+7{F|*IeFTR*^oFWN3Dq!(7sW
z)iO~5ec|y@#m^5LaypC=rYDYJ+K$DW1*}@v$YUZ!ozmgbHt0B*{^E!lHVhnK68^IB
zO27S}@k88PmZtVp?dn4|dr?=e{q8hmg3E<nbZ6tfih1FQ3i=2a1op$RCsF<M+UfpT
zdFL`W2cR>L;p+PSdH}IQ^5xf3`(+=U_^uJVHc){aGwav2D8X84Fdb^2oTm<!J|8eP
ztV!*T0_=vBbxxyFg#qW-IHU9PJS0fbAvnY!THU%Uw$+>@NRlmq@E4|OHWDhi8w#31
zeBM#oeA1uq1V@DfVxBw9Y8HEL#AtX~JlLHmVsj|^L3tiiggNhCa2wnDW!<LV4BVLw
z2jHq|-wRRBRHlP~XF1L#a@3VnAA)~4ZrGzta9#OquCF@Km+4u%lBY@Qiqm*1an!^a
zTf+kv&GclRbgW4NfArZK9WJ9MlBcwG8R{QK+3Xy4?7}4kvr$;u9gU`EP^sLz-wQ3P
z2Fp-Gsz2)(OYiU4URD=A*OoFwdlxr4*=ayprhoGug-a!gev<qZnr6+Gp7F@(QTZ0C
zi-H-FfM}&U{W0d(2UiG@k-Er*MYX3`ltfX~eBJCz;EpRqJ<stV*A5$aEU!ZYw(TA$
z!bte@zn<KYAUZ@j4L3a<b*)KwDCS0=w`y16&RW+bawI(HDB9n69cMrZN)s@c{t4UT
z{1Q+&7&j<IBF3tC-SiAX?DJ}0vbz9^D6vf?D9I-X&x`CFsRtfh<tdMlC(rJ|*<(EZ
z(29WRTfFFZ6GTeeP#=Z{XdKu9GdO`IYQLY0{}yHXS;+SPJo}UB4^?WvU;BLw|8w>y
z02omM+D_lHKLISvgaAecYFgSK*`L4?3IC6>KYtt5H$&#TsQ&kW81vtgKmT?gBONWU
zc@*Fe2GFlJ{@DQfFLAG54WmCAKs16tGZDb_!-V^HS)ssM4Sy|+$9Grg|GhO-t@y=u
zUJ&(RSd&L~1d~H>%!^x3#Sj&-T*ex8+BlhkK+m{*Y9~GF2w?tj(X!<BQTS=ku=sIE
zbq~Ay;i7sDc}Lp#Ug1y+kyypFrL*)po^ca?p-*n6Fu1y96j~n9eLQ***?t)C{f~hL
zhb(*H%5%k&wKFC9#CHtaXAssx6C@H$ECX|M%8e7?@8lEtb-p?!bQZ3v^fM_cKl`M@
z)6(ZdjNZso^=4cxgcWijM;tyHj_k>GI^`TjT3XlE;reXo_Aw5Cy~Vx*eu5`l(o|14
zYVyD+nO1UIH8+dw$gLYqVJ_qqP8P(gcBLatz%ubj^!DWncyC(S*4WO9zB(7zr*qoC
zJR=%P3P!NHV(u%ju|Xqhc~;7^eA-B%#7zOk@dkl>dkH!_KiR|tY_Y?4f)<Tg=b_VN
z$4p3}wtyGr0<o$Hra?)byb9}{Pc%0o=W)65jmgyZ#lVYu{GxHXv-$8zv=gf2o9!_=
zXLQJWJDF*WxFm1^m?Y*o)dC=}+2*Jqv**+xF{u{x&cXasdLsTAWMJYeZf)vE!P5dC
zLfF@a+@0(uEhNX=_3`9moh!-?@(9@Q4qU8UTNO_pM3%z!JNha;^mh_55u(rdu+>(y
z=E@h39NCTvV7NX`%DyL4o-T_(`7F$YO-e3Te52H5v4_EKoeHIx#34doAXts{&fBW}
zt07A+mV{+NtG-kQ8o8UeFGxX!UhGOeIqz(VMalgt<Y5M*llKZvwpw1Fc?783hOh#f
zw@~Bfq(a)G&e^r%{w!<3U6nqeL<7}Rc@cNJ&8_;5WN0ez3QYt3k2^!)X$0f19Y*7l
z^iVQisE>$cR^z-z?euHs!Xcwt)904j0v1!IfjqNeB5i#SD>fK3b)|D230*EUEnL(@
z7Z2G-0ieyx(y>xjw?-^GGj1u1>t$6<_k~8`bTEr(Mu2al93(HN+L+hDnH$K^wUI+H
z^@TQ)^dIi(O>OD9YJ??%n!tL7>!K6=5r&SB%|=6M5aLZLtO1kY+awlrrvAWc%vA8m
ztKqk1Hi0mM7rC73QDB>npqRyhj3jv^n&idh61jr{H~l?+9pZ37^@Qg~9|6`*?&S&e
zUn_IuG1ec7rH?+LJXzt$ws@zE{gRc+W^Aa|^rL7knJEE+XOZX*4~E2ezDkyWLip2n
z5onk9(75BK%#`Xc%XAzU&~kDE_;kZbb0`wWXFHmELz(?3(+uz@kpvKtY@brpyIF$Y
zQc5nRchi}Pz-Z_%){O@+l_NBRO}n{!<5g*E8t1*Beul?E`j)6eGNNK@R#_poB>k(X
z6)}KjV*qU|u0z%dV_Dbbl>8L=bRn@itv2BZX-GY-WKkjkLriWFYbEHMbz1~Vjm2Rf
z<r#Z{X{op@6ek|T27-Xg{S!R*F5Y%2N7e<srE^tYQLy`Lgd8V`bf?<V7Y0e7R#sGd
z1l`$sl54Edh&>u{oIUw#^OFQ{qO03(Z*<et^&5)PBzpHSOoI%&!zC|98ME!I5ENyu
za*~-N{GLm+3X#`w^MBebYDsGJRs^JO2+W`_vn9vunThLQ>)s$a)yGG#ydjF@7WW`&
z(g%O#<u>U~STh1Al~RAlX6ry8GVPlNvvGpO9eD`SFOTT`Mg5%#3teDfMln>ScH}#6
z8lll?VC=PxAk2fY#Wgm`ozKGEs5gp<Hqn7Ljs)^6f2hSsoAs;`llu~y7tK&g?ba{_
z_hQ}@bvnSVgN&1Y_3eW?4ZOie5Ag2Zvwu1#nSOPm{n<JBT|fVh%1l4~mH!fz|8n*L
zi>m*n>%T+Y_ZL3?*ZM>Nre7iJ?>Eu|fK}3g-bzM#pquhf-G#o7_YwF10=)mY?SP)W
zUz0u_^Fsa|egVrW{WYhb_{GG+@WYu$2ty-iY-Xo#Lr5cNrfsLsudi#Vrw??0!r0o`
z=xdw9IL=Qgs#+$|V!CZ9j-HdehpMcH6qC<0)0b%=-u2_Oce84rlLF6KH42<0c)g&8
zjBG@hRP!bh8OJqjWb4%JEu4NEm(;u54Bn2ICy4pn1icz$ZdN%CX?ChL@_Nb@>?kkP
zA8Igk4?#U%x(U#OY)ia6h#wiQD&-D6dn1T4y`vr`H0<;++^mJ@CryL`ZItNf)>Ssw
z7QV&jkRWU3Jq#fVu($V&C^C`~up%tW6B?d<&mvn}ssy(YJt^j$02PjlqmR%Eiju4b
zV=?V(Ic&m^j1RBwz|mm5y5^~K4nMgEfClVSusH%gvt7MSxs8z8i^kFzWQ}_psk!!C
z47b~s$hY0V+u%9*vC<xrdY9T$Wmshz@vryAt6tc!VzsYqE_;)Se7Fo>Bk0M0@pnq+
zS<*U>f`WU_a9Zk$wM)8KVX1l_=B7M~^i0|4U5)8-b%&RtrqE0^7S2mObjpis4IjN@
zTKOljf#KxCWupf{@Q!T0#F@r5+$n8(A|e}kE{wHrY+cjFAzWfgSePt?ORG0&;&z@X
z^A+Q3)hu=GO^+EuzUi|0*uj(+lKxyuS}&z`LZv{zId0i1tm3xzoKl6Ozcwv1sTGZJ
z@R*8s1Lg3d9C;-*sw;-H{mwgTu^|pk>*4K}ySB*JFZ-UOrA6ckL?FISi%Tja`eOSv
zp_o`Re_c3Ps>lAOOEC4$OO32%-JaJfY{zNz`E!;!;UIKq>EZiV6<~KsMP-c!0;aYU
zTcu=gt*zcJhrp&PZpeHd1@*sOAzz&KFc*LfINPiXQe1MIe||#)AC0T%eFZF_&1kJC
zO*tVKSy#v*QtQBdPC}?D%smxJ^`K{-&Wo-`h7||Y8snjM^2k`ZIYbh=Q3+_d${NlL
z#4p}{8Dpm5OjPc@goGYa5c;S30Csi#YwiD8eSF^@zpD@Cf0;VQ{38bPm-+xO|6PB4
zAHe^9R0e!!`A-e->$d-oHu#s@{%2SR=I@~oE1*?FOH21X%mI`bkSPoR*s}DyoM6ZR
zgd!GpHkNw!x^~8v7KG$5|M|+#WAMMC$MVydf+3?L{1IHSx6%GiP}a%9PTTPpx<CWq
z2kSqNlm54Hviy`UkS;ZVP{0vrnCSuA$P(Ha>HnRNu93dE_FuyXD!l)R4a4tje&)k{
zV?#$Mr|)QIuWd#sYp-K#XKZKxeICXZgbLbb_WHKJuy_>WpJ&4OJBy!y0YgSlsAyws
z_iYw`lhpSa{30pVN1^^3`m8^-K9D|8QnEj237F}de>Y|S8Z_&pME^(>K>ORESs&LF
zkSN_FQ9es^D@$Np5ojU*LiSODe<lm~o$TY%B4Z$wwbQoH)3(tg{27!Z<kz;-{uayo
zMbN-E^8SZs3HWVk(*e!lpG%wZpTvL4<xf+fqkWX%pQrFQE|1HLj1egA$ABWR$H`C8
zQ0r*h8teW?K6JE?lKeCA-&Xy%Bg4-r$N|^6gtnE{x78vGY?<<Hi+)#YzfgV@=ASA5
zw$SMSk4p+TY`Q-U`&(}nU`X^)3(x@`rTJ&-Ous%?I>6)FB4Z+yxBaog9|b~aXZdsg
z{#-}D@OYHvpLwwS&f{^Rkug7xpOA`*ijeo4ya{!`2X-HIkFC9xm8Ff{FPt6)`$tZ6
zzb$?`z~ibT13FuN(wDadD$~E{J+}7#vyS{5z3+XwziD4GmdBA(>pd>^?{nbSH_)~>
zvm^X=mGIjhAhgjpGzK2boPJ^XDC0j*;kS)O2XtEeTu7|{VfiEI{hi<ATg`;uu6=u4
z{}9+?|IGg~!Qb};@WlJb4Ja`2@4<CKNgWg5y8UZ2(*a@a$G84Gf!|4fZ}Ie9X26yE
zpIv6IXJ%}n|9uL;ooo(NAXMKk0NV`N*cj{S|8pMSTSR@M^Jk{NZ@<UB13+;6Yiry7
z3I^X6oUEPA50U)^5`R6V$L2o&h2QU+^08*=H-13D2{>w-Tbcc$|A56#|3{JiPVKSw
z3k(_0qsHd9)V2SH13y#;aFrQb00HP%w5R*6)XZZF0MI)6w<!P;;q7laXc74j?_mnn
z_I!rs1MzOWAxt7vj6kPqYy4jUOtMKcGSEJB_EbV=@U{#(8wy#XLb?XMiWZH<P9%?r
z(5uSt<0GsM56pVwY-;RCkes1bc~?S?P`=Ka2Vz$Llt%I@3O2{O;XRd9`OKCw*X9Ck
z4~kdh^5t4paF6X&7c%FwB^+9-z_b;KU{P!BW6SX>w~reouU|S5n((8(K?wMW{o>Bk
zk$9JCGzugIB_hz_D=ee%CTFpT$x=Rop1^Vm4`&oFv8{Ol=h-aX)ryQN<#vee9=;Pu
zpF$UxP1g<{HC<1xxpj%I?R5%R&;8yBQjh*3$aXH9y!ol(#uKduxu}1fN9lg62K&eJ
z=nu*Nb{?ezdNP3-C;xk{%|Fhgbl+<T0oOS&{{;q^_M-Y@0RQ`W^asnw8~?IM|9_lE
zfp7VL-{;?l@?%8-2$`9gU}%1PpaXib0goT(fwnT;;|D?j0GLbj{pWwTR%PYko=)3~
z_eqM{4~$TZF;3D~$ylAkhcPfn(N5B@%E;SOi%L-T4UX7~P>NK`&{kD3$XQWJDAF>>
z)6&+`(Us9LZpp}74b$XP_KS}I%g0zvLy*%cS`A82^^3n5lsy98EIb667@3@qg_xX{
z{9Z+-Aa6AxItvkET)GG`MkQHAE-o>%N8czvBI><bW<*S4>ieD?1e-`h4V4yeR(_V8
zDiBinI2AcKyBKe%(a1y??w7e6TbpvPva@rozuats?~6h<_+p3gGA|Z0yzHR?VCZqM
z#L#Y_*wWj+%6N5R3o9xn6O8J5Dk~FwG8+3Xj~90<DKSbpF_?N+IVvIg{a!+RYV?P6
zQ=?2nEi;(NUdp!vMZyx$y_6!uQo>?`<ZmE@^FHUKvqNIKYWZ)wymNf#2WiT;jz)<U
zk?>v-Sw=bHgO;Zh{AV1Z%QmT3{Y6p%Or!L)v=FZ#crhu^hv;USHg01Eo9rN8$kO|D
zW@dJZQftA0ort4-fJdV{m64%9m7(yYo}L*PpaC_J27!_&=2dY=!XQ(D_c|Dt6crz)
z9u}b-2I;fbcJA&YZ_QMh>6PJ`8HY2f%gA`|U0-iZ_2J`a)8NN(IY|siS#WT0*cSnZ
zQDVVCGN+<#1y3=hXx&vvX{}^sDP*GSEi?usYb^Q_TZ+x~Q+7YW7Al>wdZa4doRHpl
z1>o6N(#x9oNNhpA%8jRVsy-T<Zt3_mhe6U5Rh$QdVIH}6wi|d;A?!FbT>?2kM9PqG
zFw=-csWG?67<!Rbb4@YQ;ipaixr!^QW?u8Fqe5QA@DNS^*-OXIIZakUDouws&8CaI
zDm>9uG1-HbC0eZmWTYBS7gzdMlI%Nq1ho?G4(D;<hMtb)s`1xd4D1KlG=VFrR$aNV
z>P`80mxITvu^8d~Br^9;FE94OOB`P;WK5P^9}6E<1eL1W12Z-y(aM4~cN~`*k4!^f
zznpPb&dYM3!L?w$7jA8BcS|b@9}Mj2#%j%ZV{{{dBEno}I9T;%?pfc2vLr8a3A5S5
zgSR^H)st}jz&YC*$w+%AS@}BG+WiZu<7E}jLvwnf6`DD>gqwK_X+G^63g(m>kKwJa
z>>bnk8>$Uw3srbnZlz~kX)TPbw)@^UQ9bbWa2!##reM0YwrAHb8}4*>IWNCzT#SL)
zwyP!&@;2@vSXtvjb9Fn3yLGiB#Wycnbi+;o(5x9~Kim~)hb}o>UvMprI%Y`%B>kK*
zwiON#J^C*jE_^4}sgL8!b4vvrTCT8EoCl44x{Tj-mA1UyN36dyx>?M3N+OVt$>(Rd
zW82zZu6poX7CE*pz^imfGW6Iiv;Uy(8hzUDJhvP6<pgXmig$s4_K4qbLj-rAC4I1r
zT69I`zV`hP>|x&lTJDUJ88W4)rNYLcs7^x5gIBTmDW&FwvTE2RS4*+|HeZ_pKGz8E
z<^x?rPw~0HVjYXT;STol<lH;e+NT2vMagJ-nphK!^%idapnjgFlb+hz+%|7abfiVx
zEp()rFhJsny;c69_J72i9<}KYwf{}0!q5Os%J1honr|0@+E2g&=zjZljG%igD<f!Z
zV{6A}q-_J#g0w)5sQv3F#cyp8ZGm6@b_fOn1n|(S|6N(rh{%a3D9BRqNysXHyY1UW
zetlbA8)GXwOCYK5e$L-&%l+p0<gwNLc06Ea1m4Q0Z6&O4Y-nUh$izYq1Q8&WZ~vnL
zurmCh`$pRwm}vPoh9Y*_X2!Za7KXrPjZ^^OH)ZYg%@qh)7-49Xev)IL2Tu6gz<9q`
z&7lGw1gThoAwpnH7$zVvFao2OK$oPUEg|skZvv3jw<9E@(b3j5wKdbWH6s6UGthgf
z51h|8aQSsUKSc8Fgr~|#OAAak{prT|A=lp@#iP)HlQT9j&<B?5`PPtB6-beW+QQOK
zPv791?Em8YZG?~i`7Mi;_UF0mcZg$UegrZg#IgQ2h@%5e;a@`BzKp39iXvvQf&e-w
z658<;1r!8%A2q4378dl=XJm1t0oVof0|ycnWpnUJ@TBrFFv2<2GD1NDIWlBK;9)eb
za+_-l?N3a23S_8Q2;b&U@{m>QFlUF!-p<|c*d3<bwb&liZ>1hOj|9m+AroPO7A%J&
z@C+<RL_{joAO}yt6liLG!4hzaIzZX!gb7CL2h3)ye5OU`+0O@7j!60R7z(LqtY4aq
z6T$Vxtr;<^6gHs{5(~JFwFUybH<Z@|_YgAbMzKzMwsCsJJ}oF30}nNPlnz7(hzJsg
zus2%}4>1$pSV$bCjY|>(jwX+B1U+=FXQ|dW!c%mSxQt>?Ulsm}7_?z;?`V&XNvmTr
zdI_2?cqS)RVav8Uugqc(Bqg7*LB4056~r3}Ym=L!2UO@$F>|!Ip`Wc2iDE*+-VuUC
z2aP$=v2(xAB!eYhCrcd#r+SmLDn&rIVMW2~%qK`39M@BTOmw#|i}&%9RnXdcvEb9u
znT-r=7j;-H>lnx)-Y&8rXnyVH>l>?C<w&^pDAX))uL?p>TU-kPDV|qSq~N9iZUAu=
zh!GN;SSQ>7tg2^(INN9it`5!zUQl9{M19tqIta7;&<wH&#E49CRX7#)qt7HA9m)gq
z+;9+N*)pCYIqlpfZQ~Eas@+lr2AIu=y;7*~-FpB;gD@Ys*I#RVZ#AY=M0;_0VTmK+
z4B`^fV%4ndCR)xSNkY-GNW#G5KQyS|?kM1Dwnh_8OjTqe7<{=L@98q1N=rKvt_P63
zOGrB$#yeiD_*yyDyi%vAuysJL8X5n*5N$=;MTy?F)eiq_+9}JmnaRg6^*QqiUX%Ls
zk3*F!=3i2p*T(CJPNeH{vvVJM4Dz6D<rh9XOT^a3uvdj8>}i|tn_cV9Q}zsI%73_7
z;grQozB(}iELE;T<j>dzr7jZ8jQZ$VL{i(~VVc40!NYzy5w#j%JEXWUxl0*1NI;=M
z5N5>G-N9!Oi3-o{4$eItL(8H(t<h{`KOfV7?ayN|0xIEhGH=cmJCu@y^m1x?S_p7e
zf<b3sN6NJ?XD4A5++uUPniJl=+uTc~Oj4Wrdg#GVWAWy^ujJEp3qgaWov&i*!sMC}
zoJ5k;jn1v|SfxvW?$Y%QDU_S_M)5ggQ1TJU@cnQ-Y4aCv*KT!b$B`YR`uo5eQ3{S%
zZ$2}l7xy;aSE$M|10LRFTu*Pl8Y-ES$Wot_r*^=(+^wCz4bhdvJyMHdCAnPCt#-ed
zC?*-n-ju~@zz+&@m5EqaAfp+R4xYSw=aVJD=6}-Vk4_1_^m?LC#)f3he6Wj=?jngi
zPi=iHj6dzH5dJmE^77v9?4iuNfDSHmoAAQ7p<|jHXR3);E<1KNfI8b{m*@+VR|BMs
z5*qRR)Th*iclPZypL)L>jP}iW$VCW$nz=*NBWw85+1@NUKOXOALW<V>U_HWiHn{bn
zY8+K;n(V6I>~3mbRg5o*>IwhFwMoBZQF6`gyOG?2^_te%)3yxj-Tk=hG|p4C?um&J
zEvGv}ItDh2hm3yb(p6ALE}1LB_QKC^hQr8^Hl5tc$O{rZ@D;W>%QAE}S{6H68at+4
zn`_tBpp;j-!8WHRVux|YY#S}#hH+%uU%lE&5lrjQg_#_l8TGh){s4RX65=WLC)^Qi
zD{9Cq6lC_g$bX_Ku*}(Cmc+kA({GL*AVB_wrmU>Lf+;N!=>7wy-?aE|U@D|2C8Hql
z9ZZ4v^>-}&?w$J;djOCAy6-sfe}SdH^ZPGQ3ixI%{TD0+7FMNV1=@bVqH`?3GIZa{
z!2JPA4a_Vpf&LL7wA1M78v{KiK(ICdddI%c@CUAbN3>t(M)z0>4n~y;7@Pg}`wO=I
zbf<lv_m2njFR+!BmFCYQAp}nGPdNJ+toR$u0uLvTmL$+%{BJnR4D?F<Yn(Nflbu0_
ziu{mKL|2e)!bur|Y%B=(Ji`+dIRG&j3{)>7!G+JFz8-^yIU<Iq(+fsk8NGZvN?I-k
zrZ9*bPs$`v-U#&N_^XfLaU=e6#X+7EjausJIj>i4&ZgP7M&?%ThnH8bh7Ia`{VItO
zTR`L1F#A50JLw>%OZ7Ubai`R)Vf!F)x4lAcFXu*s4hEGZK7lCwO1A0(QX)lpgb3@)
z0QIy@HUuIno5V{M#5Rf;0vS)7&;$(%^_2|lZa%gN8q7y)MzCl>rigShC3dG5XhnXq
z+C=d0W*P7ixC0U)a8`*SvaCrN`vCCId@Z$w(@v;kp->89$X1z1qM^wm5yLVBLE5pg
z4H@6q`;b#Tz$;G#4}2wSP{~Q6e7!lja`Fn#8~Z_PwE>kA8im`7kJd{fPq5vxi(KpJ
z*lw=jW;Tq#Br+wmA82r`_mKKAx{!>10O-4qh%&{H*l39`l54b35{kT6%sD<NSuuY7
znTrpX^96bdFzvlhbz$J@JSy^ZmeT<G$X=M-l<`BGCuBs`D=DAy7^8JeaCys!+b|r#
z(0KAg7)%24g+b^XLD~stLA1bg5<0rAq8h-+3@bdLUK&L|?DRq6qP~$x;a>rBZ4*gM
z=In14**laxz373*qD0`*I);>^IDnxcM765C7Y*r*2z6G%e+r>Sli?q)1tY%;4c6UT
zkdbNTf?<l-Z;Z**W|DYCT5`U8J=Zd^U|NwQpVCwGz5rG%%*_0C>%LqdvrFB~!W!Gy
zr7gtm*x~uY-fO<F1+;Kq*A6cRZ?0=&kzDc@$(6-D%X+(=MY9B4yBW?b)Y{#f6*bcc
zc267@>1puAjqN-xgLf+%jkk(-c@Iy0W4}h;zGtkpXmO58X)4GXF*{}4V3QLMreQ~E
zz)-k9wOE@9e8S_1W>M0@@!H~nzGue3c{k-?W1)s5)0@t3Dik>_-nLHMC%4t7n?0gw
z7FYe13L154^XO?qMH7UQ>-M?~^8UgiYUZW+IX8wZ!>Y`Qdo2fZAq$HrK5N6OlAlk5
z0q72joJ6hfuIfznwrhE?lh2u{4PMkP`xTc!Q%rYrNOj<PDP}9p9P6p^Z7@dj$O`JH
zD*gw0HOgek!2p{Jw$=F0pScFjw#JJ~-L!22rHxw~ghX$>UCb5GtG>9O?2ee>%X%7(
zh2rKV3AI`=o*G|ttv9%B5>QOndo1Y9qr7BP3ecg}PW!6Jbv=_4=M8=fq0DV&`Drdd
z^&s99hguGjZTj<22s2}!p1fG1!8#}S{i(Vu9)|Nsb#y1ZAjm%TX_p1x8u)&__>=a|
z%glA15uYWRW`B3;Fb2jj++(#EbIvM#o)hYGpMXs)jaKCpgaQeMx6V_k40kM+b6jLZ
z(bWc;C&!5qiTiBpx?{=BJ^W|kjtZOaw^BtD-A7}P{L+W1^d>4d2bsFBDXPOo8b6j{
z#0HuXbsw6EQ3z`JyzHhQzchTUPID9{=Df7;qPL&16S9<q&(%_`GZuEnP%hyy;u(-i
zbl1dkGc-MxO4uCQTvfn19kRr(V6)Z3#m`W-x%k|rxlr;evzVLewmrr@pvDG1*s?UR
zcQq4ma20ol0+(Meqqy7YnG8j9Qy8^4I85rdesPjFoKV1;qtzIqhwE-%@j<Dt4}VYD
zsei%`4zCbZcTH?FZ)_8t{$LQ-MP`rVGGx~ENTsYS!xdx0p^?aJ$*9YBWT^z&Eo?nD
z@-|F!#F304h`D^t1!c>0JoMeL(-#T#9N4{ncu>mPVneQ>%czH`FW%il=%)cwC$!t_
zV&*sJyJ3nxu{CVVab%HR54T2kLM2l-%2$QkUqhAJ#~T10_YJG#?JZgCG&w@<35E^3
zxpvgC=&dy&($nzOs&QR-pM&wRr$WylU8oh%=iD>-!2`wv)a)1beW?oHq1nT~e7mr>
ziZ^b6Za*|+np$nO^;t6+pGFL}^wK%y&`H}O@g!M%bc=b*Wv0td1zEJ0ja81Um@C0f
zavml5YVpt?A^oBVTqvF+kWDoz?LtRt!KJ&fNtnwuN7km$9wDm+*8BtY&_Y&%>U!D#
z*V@&^NLEzgXP5Zo1BryBvkA!#$x!$I-Zn8hEVDB5Gt008>>7hT-FJ2_+ue7#-MzcB
z5)8ii08t(^zVK{9B$Du>5`ADHJec?(F(6MSKH!5A5{W<2h~KGGb^BJGUNWeJF!bp<
zr%s(Zr|Q(HbE^OSi{5M3|NQV9@BHbB%dg)2@=e(*Prv<x@2N-MdhWspAN@Fd@w+#_
z`}Ge`{phyj*4LkT>Gw~)^87b%x%It&z1#0UyYul2x4d@drO$l!)^~o@Jo)Cy>$m*z
zbCcbFzW?FLS@BD}6aMDO=g<G`OF#eoU;c6BJHLJXsTXhh&G-L)>5Gp(^TO}eo~WLA
zKlsB<=WlPySHJPx(e<}K`pNa5?SAy$lOKHRrw1Ra55M%lm!E$9_HVzjxc%f0|60HG
z^0U7depB8(ogUL|B2UY4o3d^UB8!$@lYMSy8G<<f(3yMBJPfbtzO5A@h@3+f%iIYK
zum^MFG-VWHIA#FC>+S|#Qx2AI;54n{QfNKGIKqIurZiaXHQ^>^rY<5k3Q1F4UrBo<
zj;mxVM^Ah$*RrSocAO>Y#U8_1$^u0U;C)}~8TGQrveV0A3+=`wq7@G)9iyvIjq3Se
zNYNg(U%^LH`^`XKIje)k^x~jdOvZ!7T-~d~F>1_z>Wjz)xTN%57=k9t0E)sdC|oYp
zq?#^}O5ZXHA5~o#4w`zZuJIrKFZ!l|R>TDT&tqzTAOC`n{c$gfTEcOF(GNiQ@kKo^
z>5_R|s@dUSSgP{sa6Fjs7o)wgnl2`LWizWTPC;}$9yIE(Z1nJO!G<qzZ^(_=CqATy
zo5>?q`u`Y+@LSnbvrFo5yqGE6?@tEm;OOw6oD#9B9w`>XwX&%dB%$V4YBfJ-%2Msu
ziw1%OtF!8=nn4&-r2}hfirY)j8bb!Na#HdCX+0|+N8*W{UNBc80R^oFbG4!FRHxNl
zYEzw2Ur~3fE%jA(kGfagr|wtV>a2P|?WhOUIkl@EQV**~)OmFQGl1Y(32|afZ5+))
z?f8aOHLMz35URx#)zGtH-5B9&yn)(NLp4$ukGiN1RHYtMm(*BI)Kt~#P(7|1HB)o7
zP?yyebyXdyYr2Qh^xE*JPH*#Sj4RqO%G)^&F{#VxMCJrK8`n^&b}lun9FMERSv6Cm
z0VOGzKQz-kfc<fKm1bprP%rf4KFF$daZ!!6qG*=3H=%+{YYLOR?L~jD839q|cyBQt
zmve>16(LAfPV^r=0yS??Jw6#Um(<Mbhf!+`GVKg9?W^;n3gJc!0TIlOt-!chcSrS=
zDb8jvKvFw8p{e1bp#bHPI)eJ_HT9)(x(7|3aWaUq1ffocN2cFqv=6P)N=8FR4iK3X
zbO6a6)o?IIjMM5c=NEvdk(vyKWR#;4Yh9_;U7Ic`G|~b{*Be&s_S0avn8UyqWG@34
z<>7&`2^xW>Im8?3Fg!Ah7OQSXlfi7bpc{MD@@H6|k^wY8CX|E0c%Qp8z*%b%T2LFj
z5Nt3X8yzLs;2GLzrL#fnliJu+r!7mHjJ(Onn;qoM7Hf;!Y;l{dPMfW^&F1_--Ot@^
zbMtL(zTIiQZ5p&#6Qp@mhvOr)&CACwqwX^5ZU=R@#W~Mko;#?K)(`!syobH<Op)lZ
z$PD=FK?fHZcrZ1E+z!?{HPB)hVDYhHz-$w8$;f2~8B=GlDsEG88?#zyW2@RWCB!i0
z?rLscbMv}GvNjD`tWk9t=^CnLAuJei!HA0v;-bYk;xA$15W<lKcXz*3?z0rZ`J}rS
zez&iCopP?4l(TjADLQXM*mRgqz~j@?K8<|=qV1CHW^5nNQ<E$kvg>!&<57n;ewX0W
z2G1mI7Kcaq|0n028oJ!!S%#H9y_D}R$Ct}Fmb>-sy>W$|v_s!yMg32+*wXLNjZJP^
zhcJ~C&ZwqvfjczhKAh!WhluP0miJU6Ts_eo?erMdGsS9L=Fl?<#X4(tr>p09NyMMn
z<8@Zh@OOGTyA*%)O&WY)odZq2u>KZTUi7!yVHp0mJt92EM0LL04)%W2iOv|qXPt3H
ze|0F<>D0++VcR&~FtN#Vr{=dS5F+Hy<~<i%)fnoM6L+LKvBsWFR&<I(#p;O+&2*W(
zo9;+>%0G!H)PvPQ>(Ne6&oHE-<&MAn17W?8(|%mqCM&R`dE1LQZLv^39RvrOxiqFL
z<z8fe<(oA#j5=z}-?f}pa2{~wto4JQ*5PzCoDy3RQ0Md+4nMx-Ps@W|<eCMO(>ZUz
zA#z$dmgH6ta7Jec@C~pgpv`AVK)=hn<i{|6KeD=)!)BQ<YiMd@VkE>eSq~kVn9Nz0
zu;IbvX<-mLtC9M&_U*`o_5Fl0JO~Lz&Wgvx**3I<^tRI>Zpd{+tRdcv>Eusf5qFlG
z)!dn3819U`jD$36t%U5SRT9D_L}x)VgD_J=JQ%vGCB!R)m5>~uyY?_~ouJDD#4Ee3
zf@Xk*Tt`A0;z<a32O109p-R{x;OdaxM9~vVNDmaBjD*-ctq$2wt0d&^#byv@YKR9j
z5341_3$*1y4$!?NGsKO|LEPNzv<hm;vC#%XB?D(KV{+!^i!@5-vqJ`!*A)baq$hCR
zT(nB+Z2K+&!zznd4`wN1%?v)Wnc7dQM68G9_u86is4EFNy*ks3-%?^FU>@wGn`t&9
zv^<zdnc<<kd@>oAhY*jJ8C47WM(eETTpoDQx6^E9X{pD^!VzK(b(sc6wSk|5axFcy
z<n%O$_@OA`qDxL1ZKz903^~_mTXI@L%!@<LbxO3>gm_lk4joSx+}O%O{eA%YHN;I@
z7Qbbk#cz8ye$~1oe(m3Q;xAfD4|QSCQU%&xSwaObSRDyzh)=FH+K?Ty)(~29JP7u+
zx&n6|NjPUD)S79i1BXlQB3C)=7H)_eoiY+KtQ!fLo>xl93B4K~OgNKtDEBsG%LDgn
zW@#y*NIwl|B}Ca_@Mz70dx+aq!}SSQu&lwA(UWSQ<3o%{mnW?u`bvsn;T2V+t@ZSS
zdM(g>yKk+>7G+V~Z|@j#i2!>1BqGc53pCWvz~7*^_NBm(WpRUEyEOtBlh%9Nbfzn^
z)|VAR=#tk!uf=FS!dJS!xaDSMilkh0U2Y@Wv^6EvgVy`D2VIM|hQ#Zxw6AbA+B$Ox
zzS%iH)z%HNuhugF)Jm|MZUHDiU}w?-Q255)q6MHzNEh;fS0<VRU^IaNsPW-d$F}GL
zV0hOps4&eq^h_f}r3?Squ0f$PFkQu9dkMj*ahVMYIOQ};ivmtVcCIW0teW$J!2zqG
z@y87Wyy~B~ZTkVQ{8POaLWMs%Z=wYdvql~Ca3V0vHz=m#2+W!nVz`?K%yQPIZ9&ZH
zdefFFnB|OroL=zq<)oV5(DDq?Y|TA3kl9(wHAhi?=+kXRGhv${Ar#$2e2VI<Y?1T+
zB_EnjqmT(fM_L_m3<i>M2kqGe5VMl<vJs_bou>Bpp)kFtc4wt+?R3^Xi=Nf>tEg!|
zbvW8TS3a^H+z}>7811Qz1x1Q_r!LeeraE0WC<oJJscSvO>)t6_c6b({a&+3;DNRP`
z8cO9))3p!bgDCEP^hd<wQKDG(PTlo|C<u}u3Ht$ZZ-QX0!{61W+&>Xuc>NQB{U^c6
zKE+{Yzh_|hAw-xO{nMLTSV+%qdN6#1?$AHWcK85MX_%!~Bq<q$hPCwY36j#VWxIq_
zT*Er;$our##WWpmJ2)~c`XG<k!;R>T0~Z(7hhl6vn#guB9mzf1l>9FbmeIQ7!E!o*
zd$2w^t}ZMNXsvKzD3Z4PEoG%o+its9?~ooWmVE>PJ#(wuE$Icc@Oik2oVPfo?Za!A
zlh^OT3R;UiSYP5MItJsmOCpJz#4B)9IvqK<MUutXL>yR91ax$FVA$g55iJ)6hr!3i
zqgJ%-IPGHOp7~>gMv|-Gwk!IyRXDH&@hrbx5+@W|bK4b};4g*x`8uBVbD=L8nDKee
za-C}L^br*L@a4ktm?8!)EXnCr02h`6C}3G@)2;pHl0VD`x#KQ9{fS|%PU3N8UoJrr
z(gx<@=7o@b9wc<)aN6Y&<+i)9Fo@}}?X*L-AFcilEGS~J9)ws$ef|(Zq>@et7okgv
zF}Sdd?)E)cnhLqV3!tN+i<?S&3)8-sH^AtK<KPxyLH7_2EbkX$yhWJP5zlQGq~t3&
zFr3NA8Sr2_XzRi<eMEC%eW`y@gnS2&9;$_fz2Q7bXGVt}rdP}dRw1z#!y%yZT^Bb^
zDX#6pqJm=4E(~_=>q`U~_;U_au?}GvLiWHF>vPl($YpW*O=HRzbzyOoiM1q-Vquq2
zKOu+Sp@(PV#JWLCyFZsP<l@^*9OdM#y7X4aH1DVM=7G~L2hblIdKK#~W>>83aZ%9W
z(V-WoeX&=>ke@$ZY%>&Vba0aZljOnTzAmqK+u^k0kE2Km*+-nV{)S!uP+KJStt5=Z
zTqbec`Z+CbnhDv%l@dCh#2FoYT%J=r#puI|oQ}C}yCR@)k_$_7I_0^rLSIXGuvqwi
z37kEjUW$#zr<Vp99riqW1>Lteur$c!o)_m7D{<Rl&-3Y}0Tjz`mrMU3%@Z-dDHPDx
zjWmand-RH=FJy&fSNNw;Q^_A{%Y<C|SjfFTu*Zu%AcHU9&t(>`kbS=|cm}T6kKkE|
zbuG)}z6woi{d=pYz@><{GanhY51+q0kn2?*=%+tC+(fKvc@PU<6gAtzuftExQy|iz
z2Ny*6FF7uYd^uwnLU;23$UeO&7V^ung!wiIS4!+XIb2(R4a?)0t^ggL3mA>R&!7*n
z9~C)PZ4Vb9y1H`cQ8V8@3OGcvT_$|_JkHwlf}L|LCE~1>$CxG2FJ{Dp6;K4pv(&3R
zE`-e#S%1aYu;6=rVAmHmSY*;JbDWQT{s5zhjw8PU<|cT?l#4mS;_mAKTzRox<%tfN
zIDO<vB=(O2M{}X4i9Ex?%NF}vk>O<FjRWn3pOYtv@PP{~6248NotR5_0%AQVGI%he
z-&E*0mR;dv7O-C_=S=t;WYT^g&=zryLc0~XQrCbh=;heseLVn7%sIR|xz^$MA#hOu
z?8Ax_n}9E?G?2EH>cj^}Uw})`CONP?4TatVCe}n)g75|Mbj5m=O24E?AsErGjLGB(
z`}petaOK*WCQ>I;={w?-BJ46xf$Z~=!P)a+*noXra<~jae%PYK*m78*PY*7LKbJYq
z;=*S7*i}4S9J}d`z?nC|gbWH?P5b>qBSjnuo~Ss3A!;Gct3?6N*zdPUgl|?Lh~nEI
zJnyc5fD}`dyEk}62<dCpHKbu6ps0508kr0_+=`o`t?7P^PZ+7b*~2@YNTAg5#{#=n
NcH*|%Hn-25_z#;~4c`C&

diff --git a/branches/bug1734/src/ZConfig/doc/zconfig.tex b/branches/bug1734/src/ZConfig/doc/zconfig.tex
deleted file mode 100644
index 749cfa23..00000000
--- a/branches/bug1734/src/ZConfig/doc/zconfig.tex
+++ /dev/null
@@ -1,1856 +0,0 @@
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%
-% Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-% All Rights Reserved.
-%
-% This software is subject to the provisions of the Zope Public License,
-% Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-% THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-% WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-% WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-% FOR A PARTICULAR PURPOSE.
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\documentclass{howto}
-\usepackage{xmlmarkup}
-
-\newcommand{\datatype}[1]{\strong{#1}}
-
-\title{ZConfig Package Reference}
-
-%\date{27 October 2003}
-\release{2.2}
-\setshortversion{2.2}
-
-\author{Zope Corporation}
-\authoraddress{
-    Lafayette Technology Center\\
-    513 Prince Edward Street\\
-    Fredericksburg, VA 22401\\
-    \url{http://www.zope.com/}
-}
-
-\begin{document}
-\maketitle
-
-\begin{abstract}
-\noindent
-This document describes the syntax and API used in configuration files
-for components of a Zope installation written by Zope Corporation.  This
-configuration mechanism is itself configured using a schema specification
-written in XML.
-\end{abstract}
-
-\tableofcontents
-
-
-\section{Introduction \label{intro}}
-
-Zope uses a common syntax and API for configuration files designed for
-software components written by Zope Corporation.  Third-party software
-which is also part of a Zope installation may use a different syntax,
-though any software is welcome to use the syntax used by Zope
-Corporation.  Any software written in Python is free to use the
-\module{ZConfig} software to load such configuration files in order to
-ensure compatibility.  This software is covered by the Zope Public
-License, version 2.0.
-
-The \module{ZConfig} package has been tested with Python 2.3.  Older
-versions of Python are not supported.
-\module{ZConfig} only relies on the Python standard library.
-
-Configurations which use \module{ZConfig} are described using
-\dfn{schema}.  A schema is a specification for the allowed structure
-and content of the configuration.  \module{ZConfig} schema are written
-using a small XML-based language.  The schema language allows the
-schema author to specify the names of the keys allowed at the top
-level and within sections, to define the types of sections which may
-be used (and where), the types of each values, whether a key or
-section must be specified or is optional, default values for keys, and
-whether a value can be given only once or repeatedly.
-
-
-\section{Configuration Syntax \label{syntax}}
-
-Like the \ulink{\module{ConfigParser}}
-{http://docs.python.org/lib/module-ConfigParser.html}
-format, this format supports key-value pairs arranged in sections.
-Unlike the \module{ConfigParser} format, sections are typed and can be
-organized hierarchically.
-Additional files may be included if needed.  Schema components not
-specified in the application schema can be imported from the
-configuration file.  Though both formats are substantially
-line-oriented, this format is more flexible.
-
-The intent of supporting nested section is to allow setting up the
-configurations for loosely-associated components in a container.  For
-example, each process running on a host might get its configuration
-section from that host's section of a shared configuration file.
-
-The top level of a configuration file consists of a series of
-inclusions, key-value pairs, and sections.
-
-Comments can be added on lines by themselves.  A comment has a
-\character{\#} as the first non-space character and extends to the end
-of the line:
-
-\begin{verbatim}
-# This is a comment
-\end{verbatim}
-
-An inclusion is expressed like this:
-
-\begin{verbatim}
-%include defaults.conf
-\end{verbatim}
-
-The resource to be included can be specified by a relative or absolute
-URL, resolved relative to the URL of the resource the
-\keyword{\%include} directive is located in.
-
-
-A key-value pair is expressed like this:
-
-\begin{verbatim}
-key value
-\end{verbatim}
-
-The key may include any non-white characters except for parentheses.
-The value contains all the characters between the key and the end of
-the line, with surrounding whitespace removed.
-
-Since comments must be on lines by themselves, the \character{\#}
-character can be part of a value:
-
-\begin{verbatim}
-key value # still part of the value
-\end{verbatim}
-
-Sections may be either empty or non-empty.  An empty section may be
-used to provide an alias for another section.
-
-A non-empty section starts with a header, contains configuration
-data on subsequent lines, and ends with a terminator.
-
-The header for a non-empty section has this form (square brackets
-denote optional parts):
-
-\begin{alltt}
-<\var{section-type} \optional{\var{name}} >
-\end{alltt}
-
-\var{section-type} and \var{name} all have the same syntactic
-constraints as key names.
-
-The terminator looks like this:
-
-\begin{alltt}
-</\var{section-type}>
-\end{alltt}
-
-The configuration data in a non-empty section consists of a sequence
-of one or more key-value pairs and sections.  For example:
-
-\begin{verbatim}
-<my-section>
-    key-1 value-1
-    key-2 value-2
-
-    <another-section>
-        key-3 value-3
-    </another-section>
-</my-section>
-\end{verbatim}
-
-(The indentation is used here for clarity, but is not required for
-syntactic correctness.)
-
-The header for empty sections is similar to that of non-empty
-sections, but there is no terminator:
-
-\begin{alltt}
-<\var{section-type} \optional{\var{name}} />
-\end{alltt}
-
-
-\subsection{Extending the Configuration Schema}
-
-As we'll see in section~\ref{writing-schema}, ``Writing Configuration
-Schema,'' what can be written in a configuration is controlled by
-schemas which can be built from \emph{components}.  These components
-can also be used to extend the set of implementations of objects the
-application can handle.  What this means when writing a configuration
-is that third-party implementations of application object types can be
-used wherever those application types are used in the configuration,
-if there's a \module{ZConfig} component available for that
-implementation.
-
-The configuration file can use an \keyword{\%import} directive to load
-a named component:
-
-\begin{verbatim}
-%import Products.Ape
-\end{verbatim}
-
-The text to the right of the \keyword{\%import} keyword must be the
-name of a Python package; the \module{ZConfig} component provided by
-that package will be loaded and incorporated into the schema being
-used to load the configuration file.  After the import, section types
-defined in the component may be used in the configuration.
-
-More detail is needed for this to really make sense.
-
-A schema may define section types which are \emph{abstract}; these
-cannot be used directly in a configuration, but multiple concrete
-section types can be defined which \emph{implement} the abstract
-types.  Wherever the application allows an abstract type to be used,
-any concrete type which implements that abstract type can be used in
-an actual configuration.
-
-The \keyword{\%import} directive allows loading schema components
-which provide alternate concrete section types which implement the
-abstract types defined by the application.  This allows third-party
-implementations of abstract types to be used in place of or in
-addition to implementations provided with the application.
-
-Consider an example application application which supports logging in
-the same way Zope 2 does.  There are some parameters which configure
-the general behavior of the logging mechanism, and an arbitrary number
-of \emph{log handlers} may be specified to control how the log
-messages are handled.  Several log handlers are provided by the
-application.  Here is an example logging configuration:
-
-\begin{verbatim}
-<eventlog>
-  level verbose
-
-  <logfile>
-    path /var/log/myapp/events.log
-  </logfile>
-</eventlog>
-\end{verbatim}
-
-A third-party component may provide a log handler to send
-high-priority alerts the system administrator's text pager or
-SMS-capable phone.  All that's needed is to install the implementation
-so it can be imported by Python, and modify the configuration:
-
-\begin{verbatim}
-%import my.pager.loghandler
-
-<eventlog>
-  level verbose
-
-  <logfile>
-    path /var/log/myapp/events.log
-  </logfile>
-
-  <pager>
-    number   1-800-555-1234
-    message  Something broke!
-  </pager>
-</eventlog>
-\end{verbatim}
-
-
-\subsection{Textual Substitution in Values}
-
-\module{ZConfig} provides a limited way to re-use portions of a value
-using simple string substitution.  To use this facility, define named
-bits of replacement text using the \keyword{\%define} directive, and
-reference these texts from values.
-
-The syntax for \keyword{\%define} is:
-
-\begin{alltt}
-%define \var{name} \optional{\var{value}}
-\end{alltt}
-
-The value of \var{name} must be a sequence of letters, digits, and
-underscores, and may not start with a digit; the namespace for these
-names is separate from the other namespaces used with
-\module{ZConfig}, and is case-insensitive.  If \var{value} is
-omitted, it will be the empty string.  If given, there must be
-whitespace between \var{name} and \var{value}; \var{value} will not
-include any whitespace on either side, just like values from key-value
-pairs.
-
-Names must be defined before they are used, and may not be
-re-defined.  All resources being parsed as part of a configuration
-share a single namespace for defined names.  This means that resources
-which may be included more than once should not define any names.
-
-References to defined names from configuration values use the syntax
-described for the \refmodule{ZConfig.substitution} module.
-Configuration values which include a \character{\$} as part of the
-actual value will need to use \code{\$\$} to get a single
-\character{\$} in the result.
-
-The values of defined names are processed in the same way as
-configuration values, and may contain references to named
-definitions.
-
-For example, the value for \code{key} will evaluate to \code{value}:
-
-\begin{verbatim}
-%define name value
-key $name
-\end{verbatim} %$ <-- bow to font-lock
-
-
-\section{Writing Configuration Schema \label{writing-schema}}
-
-\module{ZConfig} schema are written as XML documents.
-
-Data types are searched in a special namespace defined by the data
-type registry.  The default registry has slightly magical semantics:
-If the value can be matched to a standard data type when interpreted
-as a \datatype{basic-key}, the standard data type will be used.  If
-that fails, the value must be a \datatype{dotted-name} containing at
-least one dot, and a conversion function will be sought using the
-\method{search()} method of the data type registry used to load the
-schema.
-
-
-\subsection{Schema Elements \label{elements}}
-
-For each element, the content model is shown, followed by a
-description of how the element is used, and then a list of the
-available attributes.  For each attribute, the type of the value is
-given as either the name of a \module{ZConfig} datatype or an XML
-attribute value type.  Familiarity with XML's Document Type Definition
-language is helpful.
-
-The following elements are used to describe a schema:
-
-\begin{elementdesc}{schema}{description?, metadefault?, example?,
-                            import*,
-                            (sectiontype | abstracttype)*,
-                            (section | key | multisection |
-                            multikey)*}
-  Document element for a \module{ZConfig} schema.
-
-  \begin{attributedesc}{extends}{\datatype{space-separated-url-references}}
-  A list of URLs of base schemas from which this section type will inherit key,
-  section, and section type declarations.  If omitted, this schema
-  is defined using only the keys, sections, and section types contained within
-  the \element{schema} element.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    section.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If any base schemas are listed in the \attribute{extends}
-    attribute, the default value for this attribute comes from the base
-    schemas.  If the base schemas all use the same \attribute{datatype}, then
-    that data type will be the default value for the extending schema.  If
-    there are no base schemas, the default value is \datatype{null}, which
-    means that the \module{ZConfig} section object will be used unconverted.
-    If the base schemas have different \attribute{datatype} definitions, you
-    must explicitly define the \attribute{datatype} in the extending schema.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{keytype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to keys found in
-    this section.  This can be used to constrain key values in
-    different ways; two data types which may be especially useful are
-    the \datatype{identifier} and \datatype{ipaddr-or-hostname}
-    types.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If any base schemas are listed in the \attribute{extends}
-    attribute, the default value for this attribute comes from the base
-    schemas.  If the base schemas all use the same \attribute{keytype}, then
-    that key type will be the default value for the extending schema.  If there
-    are no base schemas, the default value is \datatype{basic-key}.  If the
-    base schemas have different \attribute{keytype} definitions, you must
-    explicitly define the \attribute{keytype} in the extending schema.
-  \end{attributedesc}
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts with the \element{schema} element if it hasn't been
-    overridden by an inner element with a \attribute{prefix}
-    attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{description}{PCDATA}
-  Descriptive text explaining the purpose the container of the
-  \element{description} element.  Most other elements can contain
-  a \element{description} element as their first child.
-  At most one \element{description} element may appear in a given
-  context.
-
-  \begin{attributedesc}{format}{NMTOKEN}
-    Optional attribute that can be added to indicate what conventions
-    are used to mark up the contained text.  This is intended to serve
-    as a hint for documentation extraction tools.  Suggested values
-    are:
-
-    \begin{tableii}{l|l}{code}{Value}{Content Format}
-      \lineii{plain}{\mimetype{text/plain}; blank lines separate paragraphs}
-      \lineii{rest}{reStructuredText}
-      \lineii{stx}{Classic Structured Text}
-    \end{tableii}
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{example}{PCDATA}
-  An example value.  This serves only as documentation.
-\end{elementdesc}
-
-\begin{elementdesc}{metadefault}{PCDATA}
-  A description of the default value, for human readers.  This may
-  include information about how a computed value is determined when
-  the schema does not specify a default value.
-\end{elementdesc}
-
-\begin{elementdesc}{abstracttype}{description?}
-  Define an abstract section type.
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the abstract section type; required.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{sectiontype}{description?, (section | key |
-                                  multisection | multikey)*}
-  Define a concrete section type.
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    section.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  If \attribute{datatype} is omitted and
-    \attribute{extends} is used, the \attribute{datatype} from the
-    section type identified by the \attribute{extends} attribute is
-    used.
-  \end{attributedesc}
-
-  \begin{attributedesc}{extends}{\datatype{basic-key}}
-    The name of a concrete section type from which this section type
-    acquires all key and section declarations.  This type does
-    \emph{not} automatically implement any abstract section type
-    implemented by the named section type.  If omitted, this section
-    is defined with only the keys and sections contained within the
-    \element{sectiontype} element.  The new section type is called a
-    \emph{derived} section type, and the type named by this attribute
-    is called the \emph{base} type.  Values for the
-    \attribute{datatype} and \attribute{keytype} attributes are
-    acquired from the base type if not specified.
-  \end{attributedesc}
-
-  \begin{attributedesc}{implements}{\datatype{basic-key}}
-    The name of an abstract section type which this concrete section
-    type implements.  If omitted, this section type does not implement
-    any abstract type, and can only be used if it is specified
-    directly in a schema or other section type.
-  \end{attributedesc}
-
-  \begin{attributedesc}{keytype}{\datatype{basic-key}}
-    The data type converter which will be applied to keys found in
-    this section.  This can be used to constrain key values in
-    different ways; two data types which may be especially useful are
-    the \datatype{identifier} and \datatype{ipaddr-or-hostname}
-    types.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.  The default value is \datatype{basic-key}.  If
-    \attribute{keytype} is omitted and \attribute{extends} is used,
-    the \attribute{keytype} from the section type identified by the
-    \attribute{extends} attribute is used.
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the section type; required.
-  \end{attributedesc}
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts in the \element{sectiontype} element.  If omitted, the
-    prefix specified by a containing context is used if specified.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{import}{EMPTY}
-  Import a schema component.  Exactly one of the attributes
-  \attribute{package} and \attribute{src} must be specified.
-
-  \begin{attributedesc}{file}{file name without directory information}
-    Name of the component file within a package; if not specified,
-    \file{component.xml} is used.  This may only be given when
-    \attribute{package} is used.  (The \file{component.xml} file is
-    always used when importing via \keyword{\%import} from a
-    configuration file.)
-  \end{attributedesc}
-
-  \begin{attributedesc}{package}{\datatype{dotted-suffix}}
-    Name of a Python package that contains the schema component being
-    imported.  The component will be loaded from the file identified
-    by the \attribute{file} attribute, or \file{component.xml} if
-    \attribute{file} is not specified.  If the package name given
-    starts with a dot (\character{.}), the name used will be the
-    current prefix and the value of this attribute concatenated.
-  \end{attributedesc}
-
-  \begin{attributedesc}{src}{\datatype{url-reference}}
-    URL to a separate schema which can provide useful types.  The
-    referenced resource must contain a schema, not a schema
-    component.  Section types defined or imported by the referenced
-    schema are added to the schema containing the \element{import};
-    top-level keys and sections are ignored.
-  \end{attributedesc}
-\end{elementdesc}
-
-\begin{elementdesc}{key}{description?, example?, metadefault?, default*}
-  A \element{key} element is used to describe a key-value pair which
-  may occur at most once in the section type or top-level schema in
-  which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this key should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the key name to underscores.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    key.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.
-  \end{attributedesc}
-
-  \begin{attributedesc}{default}{\datatype{string}}
-    If the key-value pair is optional and this attribute is specified,
-    the value of this attribute will be converted using the appropriate
-    data type converter and returned to the application as the
-    configured value.  This attribute may not be specified if the
-    \attribute{required} attribute is \code{yes}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the key, as it must be given in a configuration
-    instance, or `\code{*}'.  If the value is `\code{*}', any name not
-    already specified as a key may be used, and the configuration
-    value for the key will be a dictionary mapping from the key name
-    to the value.  In this case, the \attribute{attribute} attribute
-    must be specified, and the data type for the key will be applied
-    to each key which is found.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the key.  If the value is \code{yes}, the
-    \attribute{default} attribute may not be specified and an error
-    will be reported if the configuration instance does not specify a
-    value for the key.  If the value is \code{no} (the default) and
-    the configuration instance does not specify a value, the value
-    reported to the application will be that specified by the
-    \attribute{default} attribute, if given, or \code{None}.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{multikey}{description?, example?, metadefault?, default*}
-  A \element{multikey} element is used to describe a key-value pair
-  which may occur any number of times in the section type or top-level
-  schema in which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this key should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the key name to underscores.
-  \end{attributedesc}
-
-  \begin{attributedesc}{datatype}{\datatype{basic-key}
-                                  or \datatype{dotted-name}}
-    The data type converter which will be applied to the value of this
-    key.  If the value is a \datatype{dotted-name} that begins
-    with a period, the value of \attribute{prefix} will be pre-pended,
-    if set.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the key, as it must be given in a configuration
-    instance, or `\code{+}'.  If the value is `\code{+}', any name not
-    already specified as a key may be used, and the configuration
-    value for the key will be a dictionary mapping from the key name
-    to the value.  In this case, the \attribute{attribute} attribute
-    must be specified, and the data type for the key will be applied
-    to each key which is found.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the key.  If the value is \code{yes}, no \element{default}
-    elements may be specified and an error will be reported if the
-    configuration instance does not specify at least one value for the
-    key.  If the value is \code{no} (the default) and the
-    configuration instance does not specify a value, the value
-    reported to the application will be a list containing one element
-    for each \element{default} element specified as a child of the
-    \element{multikey}.  Each value will be individually converted
-    according to the \attribute{datatype} attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{default}{PCDATA}
-  Each \element{default} element specifies a single default value for
-  a \element{multikey}.  This element can be repeated to produce a
-  list of individual default values.  The text contained in the
-  element will be passed to the datatype conversion for the
-  \element{multikey}.
-
-  \begin{attributedesc}{key}{key type of the containing sectiontype}
-    Key to associate with the default value.  This is only used for
-    defaults of a \element{key} or \element{multikey} with a
-    \attribute{name} of \code{+}; in that case this attribute is
-    required.  It is an error to use the \attribute{key} attribute
-    with a \element{default} element for a \element{multikey} with a
-    name other than \code{+}.
-
-    \begin{notice}[warning]
-      The datatype of this attribute is that of the section type
-      \emph{containing} the actual keys, not necessarily that of the
-      section type which defines the key.  If a derived section
-      overrides the key type of the base section type, the actual
-      key type used is that of the derived section.
-
-      This can lead to confusing errors in schemas, though the
-      \refmodule{ZConfig} package checks for this when the schema is
-      loaded.  This situation is particularly likely when a derived
-      section type uses a key type which collapses multiple default
-      keys which were not collapsed by the base section type.
-
-      Consider this example schema:
-
-\begin{verbatim}
-<schema>
-  <sectiontype name="base" keytype="identifier">
-    <key name="+" attribute="mapping">
-      <default key="foo">some value</default>
-      <default key="FOO">some value</default>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="derived" keytype="basic-key"
-               extends="base"/>
-
-  <section type="derived" name="*" attribute="section"/>
-</schema>
-\end{verbatim}
-
-      When this schema is loaded, a set of defaults for the
-      \datatype{derived} section type is computed.  Since
-      \datatype{basic-key} is case-insensitive (everything is
-      converted to lower case), \samp{foo} and \samp{Foo} are both
-      converted to \samp{foo}, which clashes since \element{key} only
-      allows one value for each key.
-    \end{notice}
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{section}{description?}
-  A \element{section} element is used to describe a section which may
-  occur at most once in the section type or top-level schema in which
-  it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which this section should be the
-    value of on a \class{SectionValue} instance.  This must be unique
-    within the immediate contents of a section type or schema.  If
-    this attribute is not specified, an attribute name will be
-    computed by converting hyphens in the section name to underscores,
-    in which case the \attribute{name} attribute may not be \code{*}
-    or \code{+}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    The name of the section, as it must be given in a configuration
-    instance, \code{*}, or \code{+}.  If the value is \code{*}, any
-    name not already specified as a key may be used.  If the value is
-    \code{*} or \code{+}, the \attribute{attribute} attribute must be
-    specified.  If the value is \code{*}, any name is allowed, or the
-    name may be omitted.  If the value is \code{+}, any name is
-    allowed, but some name must be provided.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide the section.  If the value is \code{yes}, an error will be
-    reported if the configuration instance does not include the
-    section.  If the value is \code{no} (the default) and the
-    configuration instance does not include the section, the value
-    reported to the application will be \code{None}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{type}{\datatype{basic-key}}
-    The section type which matching sections must implement.  If the
-    value names an abstract section type, matching sections in the
-    configuration file must be of a type which specifies that it
-    implements the named abstract type.  If the name identifies a
-    concrete type, the section type must match exactly.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\begin{elementdesc}{multisection}{description?}
-  A \element{multisection} element is used to describe a section which
-  may occur any number of times in the section type or top-level
-  schema in which it is listed.
-
-  \begin{attributedesc}{attribute}{\datatype{identifier}}
-    The name of the Python attribute which matching sections should be
-    the value of on a \class{SectionValue} instance.  This is required
-    and must be unique within the immediate contents of a section type
-    or schema.  The \class{SectionValue} instance will contain a list
-    of matching sections.
-  \end{attributedesc}
-
-  \begin{attributedesc}{handler}{\datatype{basic-key}}
-  \end{attributedesc}
-
-  \begin{attributedesc}{name}{\datatype{basic-key}}
-    For a \element{multisection}, any name not already specified as a
-    key may be used.  If the value is \code{*} or \code{+}, the
-    \attribute{attribute} attribute must be specified.  If the value
-    is \code{*}, any name is allowed, or the name may be omitted.  If
-    the value is \code{+}, any name is allowed, but some name must be
-    provided.  No other value for the \attribute{name} attribute is
-    allowed for a \element{multisection}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{required}{\code{yes|no}}
-    Specifies whether the configuration instance is required to
-    provide at least one matching section.  If the value is
-    \code{yes}, an error will be reported if the configuration
-    instance does not include the section.  If the value is \code{no}
-    (the default) and the configuration instance does not include the
-    section, the value reported to the application will be
-    \code{None}.
-  \end{attributedesc}
-
-  \begin{attributedesc}{type}{\datatype{basic-key}}
-    The section type which matching sections must implement.  If the
-    value names an abstract section type, matching sections in the
-    configuration file must be of types which specify that they
-    implement the named abstract type.  If the name identifies a
-    concrete type, the section type must match exactly.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\subsection{Schema Components \label{schema-components}}
-
-XXX need more explanation
-
-\module{ZConfig} supports schema components that can be
-provided by disparate components, and allows them to be knit together
-into concrete schema for applications.  Components cannot add
-additional keys or sections in the application schema.
-
-A schema \dfn{component} is allowed to define new abstract and
-section types.
-Components are identified using a dotted-name, similar to a Python
-module name.  For example, one component may be \code{zodb.storage}.
-
-Schema components are stored alongside application code since they
-directly reference datatype code.  Schema components are provided by
-Python packages.  The component definition is normally stored in the
-file \file{component.xml}; an alternate filename may be specified
-using the \attribute{file} attribute of the \element{import} element.
-Components imported using the \keyword{\%import} keyword from a
-configuration file must be named \file{component.xml}.
-The component defines the types provided by that component; it must
-have a \element{component} element as the document element.
-
-The following element is used as the document element for schema
-components.  Note that schema components do not allow keys and
-sections to be added to the top-level of a schema; they serve only to
-provide type definitions.
-
-\begin{elementdesc}{component}{description?, (abstracttype | sectiontype)*}
-  The top-level element for schema components.
-
-  \begin{attributedesc}{prefix}{\datatype{dotted-name}}
-    Prefix to be pre-pended in front of partial dotted-names that
-    start with a period.  The value of this attribute is used in all
-    contexts within the \element{component} element if it hasn't been
-    overridden by an inner element with a \attribute{prefix}
-    attribute.
-  \end{attributedesc}
-\end{elementdesc}
-
-
-\section{Standard \module{ZConfig} Datatypes\label{standard-datatypes}}
-
-There are a number of data types which can be identified using the
-\attribute{datatype} attribute on \element{key},
-\element{sectiontype}, and \element{schema} elements.
-Applications may extend the set of datatypes by calling the
-\method{register()} method of the data type registry being used or by
-using Python dotted-names to refer to conversion routines defined in
-code.
-
-The following data types are provided by the default type registry.
-
-\begin{definitions}
-\term{\datatype{basic-key}}
-  The default data type for a key in a ZConfig configuration file.
-  The result of conversion is always lower-case, and matches the
-  regular expression \regexp{[a-z][-._a-z0-9]*}.
-
-\term{\datatype{boolean}}
-  Convert a human-friendly string to a boolean value.  The names
-  \code{yes}, \code{on}, and \code{true} convert to \constant{True},
-  while \code{no}, \code{off}, and \code{false} convert to
-  \constant{False}.  Comparisons are case-insensitive.  All other
-  input strings are disallowed.
-
-\term{\datatype{byte-size}}
-  A specification of a size, with byte multiplier suffixes (for
-  example, \samp{128MB}).  Suffixes are case insensitive and may be
-  \samp{KB}, \samp{MB}, or \samp{GB}
-
-\term{\datatype{dotted-name}}
-  A string consisting of one or more \datatype{identifier} values
-  separated by periods (\character{.}).
-
-\term{\datatype{dotted-suffix}}
-  A string consisting of one or more \datatype{identifier} values
-  separated by periods (\character{.}), possibly prefixed by a
-  period.  This can be used to indicate a dotted name that may be
-  specified relative to some base dotted name.
-
-\term{\datatype{existing-dirpath}}
-  Validates that the directory portion of a pathname exists.  For
-  example, if the value provided is \file{/foo/bar}, \file{/foo} must
-  be an existing directory.  No conversion is performed.
-
-\term{\datatype{existing-directory}}
-  Validates that a directory by the given name exists on 
-  the local filesystem.  No conversion is performed. 
-
-\term{\datatype{existing-file}}
-  Validates that a file by the given name exists.  No conversion 
-  is performed. 
-
-\term{\datatype{existing-path}}
-  Validates that a path (file, directory, or symlink) by the
-  given name exists on the local filesystem.  No conversion
-  is performed.
-
-\term{\datatype{float}}
-  A Python float.  \code{Inf}, \code{-Inf}, and \code{NaN} are not
-  allowed.
-
-\term{\datatype{identifier}}
-  Any valid Python identifier.
-
-\term{\datatype{inet-address}}
-  An Internet address expressed as a \code{(\var{hostname},
-  \var{port})} pair.  If only the port is specified, the default host
-  will be returned for \var{hostname}.  The default host is
-  \code{localhost} on Windows and the empty string on all other
-  platforms.  If the port is omitted, \code{None} will be returned for
-  \var{port}.
-
-\term{\datatype{integer}}
-  Convert a value to an integer.  This will be a Python \class{int} if
-  the value is in the range allowed by \class{int}, otherwise a Python
-  \class{long} is returned.
-
-\term{\datatype{ipaddr-or-hostname}}
-  Validates a valid IP address or hostname.  If the first 
-  character is a digit, the value is assumed to be an IP 
-  address.  If the first character is not a digit, the value 
-  is assumed to be a hostname.  Hostnames are converted to lower
-  case.
-
-\term{\datatype{locale}}
-  Any valid locale specifier accepted by the available
-  \function{locale.setlocale()} function.  Be aware that only the
-  \code{'C'} locale is supported on some platforms.
-
-\term{\datatype{null}}
-  No conversion is performed; the value passed in is the value
-  returned.  This is the default data type for section values.
-
-\term{\datatype{port-number}}
-  Returns a valid port number as an integer.  Validity does not imply
-  that any particular use may be made of the port, however.  For
-  example, port number lower than 1024 generally cannot be bound by
-  non-root users.
-
-\term{\datatype{socket-address}}
-  An address for a socket.  The converted value is an object providing
-  two attributes.  \member{family} specifies the address family
-  (\constant{AF_INET} or \constant{AF_UNIX}), with \code{None} instead
-  of \constant{AF_UNIX} on platforms that don't support it.  The
-  \member{address} attribute will be the address that should be passed
-  to the socket's \method{bind()} method.  If the family is
-  \constant{AF_UNIX}, the specific address will be a pathname; if the
-  family is \constant{AF_INET}, the second part will be the result of
-  the \datatype{inet-address} conversion.
-
-\term{\datatype{string}}
-  Returns the input value as a string.  If the source is a Unicode
-  string, this implies that it will be checked to be simple 7-bit
-  \ASCII.  This is the default data type for values in
-  configuration files.
-
-\term{\datatype{time-interval}}
-  A specification of a time interval in seconds, with multiplier
-  suffixes (for example, \code{12h}).  Suffixes are case insensitive
-  and may be \samp{s} (seconds), \samp{m} (minutes), \samp{h} (hours),
-  or \samp{d} (days).
-
-\term{\datatype{timedelta}}
-  Similar to the \datatype{time-interval}, this data type returns a Python
-  datetime.timedelta object instead of a float.  The set of suffixes
-  recognized by \datatype{timedelta} are: \samp{w} (weeks), \samp{d} (days),
-  \samp{h} (hours), \samp{m} (minutes), \samp{s} (seconds).  Values may be
-  floats, for example: \code{4w 2.5d 7h 12m 0.001s}.
-
-\end{definitions}
-
-
-\section{Standard \module{ZConfig} Schema Components
-         \label{standard-components}}
-
-\module{ZConfig} provides a few convenient schema components as part
-of the package.  These may be used directly or can server as examples
-for creating new components.
-
-
-\subsection{\module{ZConfig.components.basic}}
-
-The \module{ZConfig.components.basic} package provides small
-components that can be helpful in composing application-specific
-components and schema.  There is no large functionality represented by
-this package.  The default component provided by this package simply
-imports all of the smaller components.  This can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.basic"/>
-\end{verbatim}
-
-Each of the smaller components is documented directly; importing these
-selectively can reduce the time it takes to load a schema slightly,
-and allows replacing the other basic components with alternate
-components (by using different imports that define the same type
-names) if desired.
-
-
-\subsubsection{The Mapping Section Type \label{basic-mapping}}
-
-There is a basic section type that behaves like a simple Python
-mapping; this can be imported directly using
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-\end{verbatim}
-
-This defines a single section type, \datatype{ZConfig.basic.mapping}.
-When this is used, the section value is a Python dictionary mapping
-keys to string values.
-
-This type is intended to be used by extending it in simple ways.  The
-simplest is to create a new section type name that makes more sense
-for the application:
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-
-<sectiontype name="my-mapping"
-             extends="ZConfig.basic.mapping"
-             />
-
-<section name="*"
-         type="my-mapping"
-         attribute="map"
-         />
-\end{verbatim}
-
-This allows a configuration to contain a mapping from
-\datatype{basic-key} names to string values like this:
-
-\begin{verbatim}
-<my-mapping>
-  This that
-  and the other
-</my-mapping>
-\end{verbatim}
-
-The value of the configuration object's \member{map} attribute would
-then be the dictionary
-
-\begin{verbatim}
-{'this': 'that',
- 'and': 'the other',
- }
-\end{verbatim}
-
-(Recall that the \datatype{basic-key} data type converts everything to
-lower case.)
-
-Perhaps a more interesting application of
-\datatype{ZConfig.basic.mapping} is using the derived type to override
-the \attribute{keytype}.  If we have the conversion function:
-
-\begin{verbatim}
-def email_address(value):
-    userid, hostname = value.split("@", 1)
-    hostname = hostname.lower()  # normalize what we know we can
-    return "%s@%s" % (userid, hostname)
-\end{verbatim}
-
-then we can use this as the key type for a derived mapping type:
-
-\begin{verbatim}
-<import package="ZConfig.components.basic" file="mapping.xml"/>
-
-<sectiontype name="email-users"
-             extends="ZConfig.basic.mapping"
-             keytype="mypkg.datatypes.email_address"
-             />
-
-<section name="*"
-         type="email-users"
-         attribute="email_users"
-         />
-\end{verbatim}
-
-
-\subsection{\module{ZConfig.components.logger}}
-
-The \module{ZConfig.components.logger} package provides configuration
-support for the \ulink{\module{logging} package}
-{http://docs.python.org/lib/module-logging.html} in
-Python's standard library.  This component can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger"/>
-\end{verbatim}
-
-This component defines two abstract types and several concrete section
-types.  These can be imported as a unit, as above, or as four smaller
-components usable in creating alternate logging packages.
-
-The first of the four smaller components contains the abstract types,
-and can be imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="abstract.xml"/>
-\end{verbatim}
-
-The two abstract types imported by this are:
-
-\begin{definitions}
-
-\term{\datatype{ZConfig.logger.log}}
-  Logger objects are represented by this abstract type.
-
-\term{\datatype{ZConfig.logger.handler}}
-  Each logger object can have one or more ``handlers'' associated with
-  them.  These handlers are responsible for writing logging events to
-  some form of output stream using appropriate formatting.  The output
-  stream may be a file on a disk, a socket communicating with a server
-  on another system, or a series of \code{syslog} messages.  Section
-  types which implement this type represent these handlers.
-
-\end{definitions}
-
-
-The second and third of the smaller components provides section types
-that act as factories for \class{logging.Logger} objects.  These can be
-imported using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="eventlog.xml"/>
-<import package="ZConfig.components.logger" file="logger.xml"/>
-\end{verbatim}
-
-The types defined in these components implement the
-\datatype{ZConfig.logger.log} abstract type.  The \file{eventlog.xml}
-component defines an \datatype{eventlog} type which represents the
-root logger from the the \module{logging} package (the return value of
-\function{logging.getLogger()}), while the \file{logger.xml} component
-defines a \datatype{logger} section type which represents a named
-logger (as returned by \function{logging.getLogger(\var{name})}).
-
-
-The third of the smaller components provides section types that are
-factories for \class{logging.Handler} objects.  This can be imported
-using
-
-\begin{verbatim}
-<import package="ZConfig.components.logger" file="handlers.xml"/>
-\end{verbatim}
-
-The types defined in this component implement the
-\datatype{ZConfig.logger.handler} abstract type.
-
-
-
-The configuration objects provided by both the logger and handler
-types are factories for the finished loggers and handlers.  These
-factories should be called with no arguments to retrieve the logger or
-log handler objects.  Calling the factories repeatedly will cause the
-same objects to be returned each time, so it's safe to simply call
-them to retrieve the objects.
-
-The factories for the logger objects, whether the \datatype{eventlog}
-or \datatype{logger} section type is used, provide a \method{reopen()}
-method which may be called to close any log files and re-open them.
-This is useful when using a \UNIX{} signal to effect log file
-rotation: the signal handler can call this method, and not have to
-worry about what handlers have been registered for the logger.
-
-Building an application that uses the logging components is fairly
-straightforward.  The schema needs to import the relevant components
-and declare their use:
-
-\begin{verbatim}
-<schema>
-  <import package="ZConfig.components.logger" file="eventlog.xml"/>
-  <import package="ZConfig.components.logger" file="handlers.xml"/>
-
-  <section type="eventlog" name="*" attribute="eventlog"
-           required="yes"/>
-</schema>
-\end{verbatim}
-
-In the application, the schema and configuration file should be loaded
-normally.  Once the configuration object is available, the logger
-factory should be called to configure Python's \module{logging} package:
-
-\begin{verbatim}
-import os
-import ZConfig
-
-def run(configfile):
-    schemafile = os.path.join(os.path.dirname(__file__), "schema.xml")
-    schema = ZConfig.loadSchema(schemafile)
-    config, handlers = ZConfig.loadConfig(schema, configfile)
-
-    # configure the logging package:
-    config.eventlog()
-
-    # now do interesting things
-\end{verbatim}
-
-An example configuration file for this application may look like this:
-
-\begin{verbatim}
-<eventlog>
-  level  info
-
-  <logfile>
-    path        /var/log/myapp
-    format      %(asctime)s %(levelname)s %(name)s %(message)s
-    # locale-specific date/time representation
-    dateformat  %c
-  </logfile>
-
-  <syslog>
-    level    error
-    address  syslog.example.net:514
-    format   %(levelname)s %(name)s %(message)s
-  </syslog>
-</eventlog>
-\end{verbatim}
-
-Refer to the \module{logging} package documentation for the names
-available in the message format strings (the \code{format} key in the
-log handlers).  The date format strings (the \code{dateformat} key in
-the log handlers) are the same as those accepted by the
-\function{time.strftime()} function.
-
-
-\begin{seealso}
-  \seepep{282}{A Logging System}
-         {The proposal which described the logging feature for
-          inclusion in the Python standard library.}
-  \seelink{http://docs.python.org/lib/module-logging.html}
-          {\module{logging} --- Logging facility for Python}
-          {Python's \module{logging} package documentation, from the
-           \citetitle[http://docs.python.org/lib/lib.html]
-           {Python Library Reference}.}
-  \seelink{http://www.red-dove.com/python_logging.html}
-          {Original Python \module{logging} package}
-          {This is the original source for the \module{logging}
-           package.  This is mostly of historical interest.}
-\end{seealso}
-
-
-\section{Using Components to Extend Schema}
-
-% XXX This section needs a lot of work, but should get people started
-% who really want to add new pieces to ZConfig-configured applications.
-
-It is possible to use schema components and the \keyword{\%import}
-construct to extend the set of section types available for a specific
-configuration file, and allow the new components to be used in place
-of standard components.
-
-The key to making this work is the use of abstract section types.
-Wherever the original schema accepts an abstract type, it is possible
-to load new implementations of the abstract type and use those instead
-of, or in addition to, the implementations loaded by the original
-schema.
-
-Abstract types are generally used to represent interfaces.  Sometimes
-these are interfaces for factory objects, and sometimes not, but
-there's an interface that the new component needs to implement.  What
-interface is required should be documented in the
-\element{description} element in the \element{abstracttype} element;
-this may be by reference to an interface specified in a Python module
-or described in some other bit of documentation.
-
-The following things need to be created to make the new component
-usable from the configuration file:
-
-\begin{enumerate}
-  \item An implementation of the required interface.
-
-  \item A schema component that defines a section type that contains
-        the information needed to construct the component.
-
-  \item A ``datatype'' function that converts configuration data to an
-        instance of the component.
-\end{enumerate}
-
-For simplicity, let's assume that the implementation is defined by a
-Python class.
-
-The example component we build here will be in the \module{noise}
-package, but any package will do.  Components loadable using
-\keyword{\%import} must be contained in the \file{component.xml} file;
-alternate filenames may not be selected by the \keyword{\%import}
-construct.
-
-Create a ZConfig component that provides a section type to support
-your component.  The new section type must declare that it implements
-the appropriate abstract type; it should probably look something like
-this:
-
-\begin{verbatim}
-<component prefix="noise.server">
-  <import package="ZServer"/>
-
-  <sectiontype name="noise-generator"
-               implements="ZServer.server"
-               datatype=".NoiseServerFactory">
-
-    <!-- specific configuration data should be described here -->
-
-    <key name="port"
-         datatype="port-number"
-         required="yes">
-      <description>
-        Port number to listen on.
-      </description>
-    </key>
-
-    <key name="color"
-         datatype=".noise_color"
-         default="white">
-      <description>
-        Silly way to specify a noise generation algorithm.
-      </description>
-    </key>
-
-  </sectiontype>
-</component>
-\end{verbatim}
-
-This example uses one of the standard ZConfig datatypes,
-\datatype{port-number}, and requires two additional types to be
-provided by the \module{noise.server} module:
-\class{NoiseServerFactory} and \function{noise_color()}.
-
-The \function{noise_color()} function is a datatype conversion for a
-key, so it accepts a string and returns the value that should be used:
-
-\begin{verbatim}
-_noise_colors = {
-    # color -> r,g,b
-    'white': (255, 255, 255),
-    'pink':  (255, 182, 193),
-    }
-
-def noise_color(string):
-    if string in _noise_colors:
-        return _noise_colors[string]
-    else:
-        raise ValueError('unknown noise color: %r' % string)
-\end{verbatim}
-
-\class{NoiseServerFactory} is a little different, as it's the datatype
-function for a section rather than a key.  The parameter isn't a
-string, but a section value object with two attributes, \member{port}
-and \member{color}.
-
-Since the \datatype{ZServer.server} abstract type requires that the
-component returned is a factory object, the datatype function can be
-implemented at the constructor for the class of the factory object.
-(If the datatype function could select different implementation
-classes based on the configuration values, it makes more sense to use
-a simple function that returns the appropriate implementation.)
-
-A class that implements this datatype might look like this:
-
-\begin{verbatim}
-from ZServer.datatypes import ServerFactory
-from noise.generator import WhiteNoiseGenerator, PinkNoiseGenerator
-
-class NoiseServerFactory(ServerFactory):
-
-    def __init__(self, section):
-        # host and ip will be initialized by ServerFactory.prepare()
-        self.host = None
-        self.ip = None
-        self.port = section.port
-        self.color = section.color
-
-    def create(self):
-        if self.color == 'white':
-            generator = WhiteNoiseGenerator()
-        else:
-            generator = PinkNoiseGenerator()
-        return NoiseServer(self.ip, self.port, generator)
-\end{verbatim}
-
-You'll need to arrange for the package containing this component is
-available on Python's \code{sys.path} before the configuration file is
-loaded; this is mostly easily done by manipulating the
-\envvar{PYTHONPATH} environment variable.
-
-Your configuration file can now include the following to load and use
-your new component:
-
-\begin{verbatim}
-%import noise
-
-<noise-generator>
-  port 1234
-  color white
-</noise-generator>
-\end{verbatim}
-
-
-\section{\module{ZConfig} --- Basic configuration support}
-
-\declaremodule{}{ZConfig}
-\modulesynopsis{Configuration package.}
-
-The main \module{ZConfig} package exports these convenience functions:
-
-\begin{funcdesc}{loadConfig}{schema, url\optional{, overrides}}
-  Load and return a configuration from a URL or pathname given by
-  \var{url}.  \var{url} may be a URL, absolute pathname, or relative
-  pathname.  Fragment identifiers are not supported.  \var{schema} is
-  a reference to a schema loaded by \function{loadSchema()} or
-  \function{loadSchemaFile()}.
-  The return value is a tuple containing the configuration object and
-  a composite handler that, when called with a name-to-handler
-  mapping, calls all the handlers for the configuration.
-
-  The optional \var{overrides} argument represents information derived
-  from command-line arguments.  If given, it must be either a sequence
-  of value specifiers, or \code{None}.  A \dfn{value specifier} is a
-  string of the form \code{\var{optionpath}=\var{value}}.  The
-  \var{optionpath} specifies the ``full path'' to the configuration
-  setting: it can contain a sequence of names, separated by
-  \character{/} characters. Each name before the last names a section
-  from the configuration file, and the last name corresponds to a key
-  within the section identified by the leading section names.  If
-  \var{optionpath} contains only one name, it identifies a key in the
-  top-level schema.  \var{value} is a string that will be treated
-  just like a value in the configuration file.
-\end{funcdesc}
-
-\begin{funcdesc}{loadConfigFile}{schema, file\optional{,
-                                 url\optional{, overrides}}}
-  Load and return a configuration from an opened file object.  If
-  \var{url} is omitted, one will be computed based on the
-  \member{name} attribute of \var{file}, if it exists.  If no URL can
-  be determined, all \keyword{\%include} statements in the
-  configuration must use absolute URLs.  \var{schema} is a reference
-  to a schema loaded by \function{loadSchema()} or
-  \function{loadSchemaFile()}.
-  The return value is a tuple containing the configuration object and
-  a composite handler that, when called with a name-to-handler
-  mapping, calls all the handlers for the configuration.
-  The \var{overrides} argument is the same as for the
-  \function{loadConfig()} function.
-\end{funcdesc}
-
-\begin{funcdesc}{loadSchema}{url}
-  Load a schema definition from the URL \var{url}.
-  \var{url} may be a URL, absolute pathname, or relative pathname.
-  Fragment identifiers are not supported.
-  The resulting
-  schema object can be passed to \function{loadConfig()} or
-  \function{loadConfigFile()}.  The schema object may be used as many
-  times as needed.
-\end{funcdesc}
-
-\begin{funcdesc}{loadSchemaFile}{file\optional{, url}}
-  Load a schema definition from the open file object \var{file}.  If
-  \var{url} is given and not \code{None}, it should be the URL of
-  resource represented by \var{file}.  If \var{url} is omitted or
-  \code{None}, a URL may be computed from the \member{name} attribute
-  of \var{file}, if present.  The resulting schema object can
-  be passed to \function{loadConfig()} or \function{loadConfigFile()}.
-  The schema object may be used as many times as needed.
-\end{funcdesc}
-
-The following exceptions are defined by this package:
-
-\begin{excdesc}{ConfigurationError}
-  Base class for exceptions specific to the \module{ZConfig} package.
-  All instances provide a \member{message} attribute that describes
-  the specific error, and a \member{url} attribute that gives the URL
-  of the resource the error was located in, or \constant{None}.
-\end{excdesc}
-
-\begin{excdesc}{ConfigurationSyntaxError}
-  Exception raised when a configuration source does not conform to the
-  allowed syntax.  In addition to the \member{message} and
-  \member{url} attributes, exceptions of this type offer the
-  \member{lineno} attribute, which provides the line number at which
-  the error was detected.
-\end{excdesc}
-
-\begin{excdesc}{DataConversionError}
-  Raised when a data type conversion fails with
-  \exception{ValueError}.  This exception is a subclass of both
-  \exception{ConfigurationError} and \exception{ValueError}.  The
-  \function{str()} of the exception provides the explanation from the
-  original \exception{ValueError}, and the line number and URL of the
-  value which provoked the error.  The following additional attributes
-  are provided:
-
-  \begin{tableii}{l|l}{member}{Attribute}{Value}
-    \lineii{colno}
-           {column number at which the value starts, or \code{None}}
-    \lineii{exception}
-           {the original \exception{ValueError} instance}
-    \lineii{lineno}
-           {line number on which the value starts}
-    \lineii{message}
-           {\function{str()} returned by the original \exception{ValueError}}
-    \lineii{value}
-           {original value passed to the conversion function}
-    \lineii{url}
-           {URL of the resource providing the value text}
-  \end{tableii}
-\end{excdesc}
-
-\begin{excdesc}{SchemaError}
-  Raised when a schema contains an error.  This exception type
-  provides the attributes \member{url}, \member{lineno}, and
-  \member{colno}, which provide the source URL, the line number, and
-  the column number at which the error was detected.  These attributes
-  may be \code{None} in some cases.
-\end{excdesc}
-
-\begin{excdesc}{SchemaResourceError}
-  Raised when there's an error locating a resource required by the
-  schema.  This is derived from \exception{SchemaError}.  Instances of
-  this exception class add the attributes \member{filename},
-  \member{package}, and \member{path}, which hold the filename
-  searched for within the package being loaded, the name of the
-  package, and the \code{__path__} attribute of the package itself (or
-  \constant{None} if it isn't a package or could not be imported).
-\end{excdesc}
-
-\begin{excdesc}{SubstitutionReplacementError}
-  Raised when the source text contains references to names which are
-  not defined in \var{mapping}.  The attributes \member{source} and
-  \member{name} provide the complete source text and the name
-  (converted to lower case) for which no replacement is defined.
-\end{excdesc}
-
-\begin{excdesc}{SubstitutionSyntaxError}
-  Raised when the source text contains syntactical errors.
-\end{excdesc}
-
-
-\subsection{Basic Usage}
-
-The simplest use of \refmodule{ZConfig} is to load a configuration
-based on a schema stored in a file.  This example loads a
-configuration file specified on the command line using a schema in the
-same directory as the script:
-
-\begin{verbatim}
-import os
-import sys
-import ZConfig
-
-try:
-    myfile = __file__
-except NameError:
-    myfile = os.path.realpath(sys.argv[0])
-
-mydir = os.path.dirname(myfile)
-
-schema = ZConfig.loadSchema(os.path.join(mydir, 'schema.xml'))
-conf, handler = ZConfig.loadConfig(schema, sys.argv[1])
-\end{verbatim}
-
-If the schema file contained this schema:
-
-\begin{verbatim}
-<schema>
-  <key name='server' required='yes'/>
-  <key name='attempts' datatype='integer' default='5'/>
-</schema>
-\end{verbatim}
-
-and the file specified on the command line contained this text:
-
-\begin{verbatim}
-# sample configuration
-
-server www.example.com
-\end{verbatim}
-
-then the configuration object \code{conf} loaded above would have two
-attributes:
-
-\begin{tableii}{l|l}{member}{Attribute}{Value}
-  \lineii{server}{\code{'www.example.com'}}
-  \lineii{attempts}{\code{5}}
-\end{tableii}
-
-
-\section{\module{ZConfig.datatypes} --- Default data type registry}
-
-\declaremodule{}{ZConfig.datatypes}
-\modulesynopsis{Default implementation of a data type registry}
-
-The \module{ZConfig.datatypes} module provides the implementation of
-the default data type registry and all the standard data types
-supported by \module{ZConfig}.  A number of convenience classes are
-also provided to assist in the creation of additional data types.
-
-A \dfn{datatype registry} is an object that provides conversion
-functions for data types.  The interface for a registry is fairly
-simple.
-
-A \dfn{conversion function} is any callable object that accepts a
-single argument and returns a suitable value, or raises an exception
-if the input value is not acceptable.  \exception{ValueError} is the
-preferred exception for disallowed inputs, but any other exception
-will be properly propagated.
-
-\begin{classdesc}{Registry}{\optional{stock}}
-  Implementation of a simple type registry.  If given, \var{stock}
-  should be a mapping which defines the ``built-in'' data types for
-  the registry; if omitted or \code{None}, the standard set of data
-  types is used (see section~\ref{standard-datatypes}, ``Standard
-  \module{ZConfig} Datatypes'').
-\end{classdesc}
-
-\class{Registry} objects have the following methods:
-
-\begin{methoddesc}{get}{name}
-  Return the type conversion routine for \var{name}.  If the
-  conversion function cannot be found, an (unspecified) exception is
-  raised.  If the name is not provided in the stock set of data types
-  by this registry and has not otherwise been registered, this method
-  uses the \method{search()} method to load the conversion function.
-  This is the only method the rest of \module{ZConfig} requires.
-\end{methoddesc}
-
-\begin{methoddesc}{register}{name, conversion}
-  Register the data type name \var{name} to use the conversion
-  function \var{conversion}.  If \var{name} is already registered or
-  provided as a stock data type, \exception{ValueError} is raised
-  (this includes the case when \var{name} was found using the
-  \method{search()} method).
-\end{methoddesc}
-
-\begin{methoddesc}{search}{name}
-  This is a helper method for the default implementation of the
-  \method{get()} method.  If \var{name} is a Python dotted-name, this
-  method loads the value for the name by dynamically importing the
-  containing module and extracting the value of the name.  The name
-  must refer to a usable conversion function.
-\end{methoddesc}
-
-
-The following classes are provided to define conversion functions:
-
-\begin{classdesc}{MemoizedConversion}{conversion}
-  Simple memoization for potentially expensive conversions.  This
-  conversion helper caches each successful conversion for re-use at a
-  later time; failed conversions are not cached in any way, since it
-  is difficult to raise a meaningful exception providing information
-  about the specific failure.
-\end{classdesc}
-
-\begin{classdesc}{RangeCheckedConversion}{conversion\optional{,
-                                          min\optional{, max}}}
-  Helper that performs range checks on the result of another
-  conversion.  Values passed to instances of this conversion are
-  converted using \var{conversion} and then range checked.  \var{min}
-  and \var{max}, if given and not \code{None}, are the inclusive
-  endpoints of the allowed range.  Values returned by \var{conversion}
-  which lay outside the range described by \var{min} and \var{max}
-  cause \exception{ValueError} to be raised.
-\end{classdesc}
-
-\begin{classdesc}{RegularExpressionConversion}{regex}
-  Conversion that checks that the input matches the regular expression
-  \var{regex}.  If it matches, returns the input, otherwise raises
-  \exception{ValueError}.
-\end{classdesc}
-
-
-\section{\module{ZConfig.loader} --- Resource loading support}
-
-\declaremodule{}{ZConfig.loader}
-\modulesynopsis{Support classes for resource loading}
-
-This module provides some helper classes used by the primary APIs
-exported by the \module{ZConfig} package.  These classes may be useful
-for some applications, especially applications that want to use a
-non-default data type registry.
-
-\begin{classdesc}{Resource}{file, url\optional{, fragment}}
-  Object that allows an open file object and a URL to be bound
-  together to ease handling.  Instances have the attributes
-  \member{file}, \member{url}, and \member{fragment} which store the
-  constructor arguments.  These objects also have a \method{close()}
-  method which will call \method{close()} on \var{file}, then set the
-  \member{file} attribute to \code{None} and the \member{closed} to
-  \constant{True}.
-\end{classdesc}
-
-\begin{classdesc}{BaseLoader}{}
-  Base class for loader objects.  This should not be instantiated
-  directly, as the \method{loadResource()} method must be overridden
-  for the instance to be used via the public API.
-\end{classdesc}
-
-\begin{classdesc}{ConfigLoader}{schema}
-  Loader for configuration files.  Each configuration file must
-  conform to the schema \var{schema}.  The \method{load*()} methods
-  return a tuple consisting of the configuration object and a
-  composite handler.
-\end{classdesc}
-
-\begin{classdesc}{SchemaLoader}{\optional{registry}}
-  Loader that loads schema instances.  All schema loaded by a
-  \class{SchemaLoader} will use the same data type registry.  If
-  \var{registry} is provided and not \code{None}, it will be used,
-  otherwise an instance of \class{ZConfig.datatypes.Registry} will be
-  used.
-\end{classdesc}
-
-
-\subsection{Loader Objects}
-
-Loader objects provide a general public interface, an interface which
-subclasses must implement, and some utility methods.
-
-The following methods provide the public interface:
-
-\begin{methoddesc}[loader]{loadURL}{url}
-  Open and load a resource specified by the URL \var{url}.
-  This method uses the \method{loadResource()} method to perform the
-  actual load, and returns whatever that method returns.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{loadFile}{file\optional{, url}}
-  Load from an open file object, \var{file}.  If given and not
-  \code{None}, \var{url} should be the URL of the resource represented
-  by \var{file}.  If omitted or \code{None}, the \member{name}
-  attribute of \var{file} is used to compute a \code{file:} URL, if
-  present.
-  This method uses the \method{loadResource()} method to perform the
-  actual load, and returns whatever that method returns.
-\end{methoddesc}
-
-The following method must be overridden by subclasses:
-
-\begin{methoddesc}[loader]{loadResource}{resource}
-  Subclasses of \class{BaseLoader} must implement this method to
-  actually load the resource and return the appropriate
-  application-level object.
-\end{methoddesc}
-
-The following methods can be used as utilities:
-
-\begin{methoddesc}[loader]{isPath}{s}
-  Return true if \var{s} should be considered a filesystem path rather
-  than a URL.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{normalizeURL}{url-or-path}
-  Return a URL for \var{url-or-path}.  If \var{url-or-path} refers to
-  an existing file, the corresponding \code{file:} URL is returned.
-  Otherwise \var{url-or-path} is checked for sanity: if it
-  does not have a schema, \exception{ValueError} is raised, and if it
-  does have a fragment identifier, \exception{ConfigurationError} is
-  raised.
-  This uses \method{isPath()} to determine whether \var{url-or-path}
-  is a URL of a filesystem path.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{openResource}{url}
-  Returns a resource object that represents the URL \var{url}.  The
-  URL is opened using the \function{urllib2.urlopen()} function, and
-  the returned resource object is created using
-  \method{createResource()}.  If the URL cannot be opened,
-  \exception{ConfigurationError} is raised.
-\end{methoddesc}
-
-\begin{methoddesc}[loader]{createResource}{file, url}
-  Returns a resource object for an open file and URL, given as
-  \var{file} and \var{url}, respectively.  This may be overridden by a
-  subclass if an alternate resource implementation is desired.
-\end{methoddesc}
-
-
-\section{\module{ZConfig.cmdline} --- Command-line override support}
-
-\declaremodule{}{ZConfig.cmdline}
-\modulesynopsis{Support for command-line overrides for configuration
-                settings.}
-
-This module exports an extended version of the \class{ConfigLoader}
-class from the \refmodule{ZConfig.loader} module.  This provides
-support for overriding specific settings from the configuration file
-from the command line, without requiring the application to provide
-specific options for everything the configuration file can include.
-
-\begin{classdesc}{ExtendedConfigLoader}{schema}
-  Construct a \class{ConfigLoader} subclass that adds support for
-  command-line overrides.
-\end{classdesc}
-
-The following additional method is provided, and is the only way to
-provide position information to associate with command-line
-parameters:
-
-\begin{methoddesc}{addOption}{spec\optional{, pos}}
-  Add a single value to the list of overridden values.  The \var{spec}
-  argument is a value specified, as described for the
-  \function{\refmodule{ZConfig}.loadConfig()} function.  A source
-  position for the specifier may be given as \var{pos}.  If \var{pos}
-  is specified and not \code{None}, it must be a sequence of three
-  values.  The first is the URL of the source (or some other
-  identifying string).  The second and third are the line number and
-  column of the setting.  These position information is only used to
-  construct a \exception{DataConversionError} when data conversion
-  fails.
-\end{methoddesc}
-
-
-\section{\module{ZConfig.substitution} --- String substitution}
-
-\declaremodule{}{ZConfig.substitution}
-\modulesynopsis{Shell-style string substitution helper.}
-
-This module provides a basic substitution facility similar to that
-found in the Bourne shell (\program{sh} on most \UNIX{} platforms).  
-
-The replacements supported by this module include:
-
-\begin{tableiii}{l|l|c}{code}{Source}{Replacement}{Notes}
-  \lineiii{\$\$}{\code{\$}}{(1)}
-  \lineiii{\$\var{name}}{The result of looking up \var{name}}{(2)}
-  \lineiii{\$\{\var{name}\}}{The result of looking up \var{name}}{}
-\end{tableiii}
-
-\noindent
-Notes:
-\begin{description}
-  \item[(1)]  This is different from the Bourne shell, which uses
-              \code{\textbackslash\$} to generate a \character{\$} in
-              the result text.  This difference avoids having as many
-              special characters in the syntax.
-
-  \item[(2)]  Any character which immediately follows \var{name} may
-              not be a valid character in a name.
-\end{description}
-
-In each case, \var{name} is a non-empty sequence of alphanumeric and
-underscore characters not starting with a digit.  If there is not a
-replacement for \var{name}, the exception
-\exception{SubstitutionReplacementError} is raised.
-Note that the lookup is expected to be case-insensitive; this module
-will always use a lower-case version of the name to perform the query.
-
-This module provides these functions:
-
-\begin{funcdesc}{substitute}{s, mapping}
-  Substitute values from \var{mapping} into \var{s}.  \var{mapping}
-  can be a \class{dict} or any type that supports the \method{get()}
-  method of the mapping protocol.  Replacement
-  values are copied into the result without further interpretation.
-  Raises \exception{SubstitutionSyntaxError} if there are malformed
-  constructs in \var{s}.
-\end{funcdesc}
-
-\begin{funcdesc}{isname}{s}
-  Returns \constant{True} if \var{s} is a valid name for a substitution
-  text, otherwise returns \constant{False}.
-\end{funcdesc}
-
-
-\subsection{Examples}
-
-\begin{verbatim}
->>> from ZConfig.substitution import substitute
->>> d = {'name': 'value',
-...      'top': '$middle',
-...      'middle' : 'bottom'}
->>>
->>> substitute('$name', d)
-'value'
->>> substitute('$top', d)
-'$middle'
-\end{verbatim}
-
-
-\appendix
-\section{Schema Document Type Definition \label{schema-dtd}}
-
-The following is the XML Document Type Definition for \module{ZConfig}
-schema:
-
-\verbatiminput{schema.dtd}
-
-\end{document}
diff --git a/branches/bug1734/src/ZConfig/info.py b/branches/bug1734/src/ZConfig/info.py
deleted file mode 100644
index a0deb174..00000000
--- a/branches/bug1734/src/ZConfig/info.py
+++ /dev/null
@@ -1,514 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Objects that can describe a ZConfig schema."""
-
-import copy
-
-import ZConfig
-
-
-class UnboundedThing:
-    __metaclass__ = type
-    __slots__ = ()
-
-    def __lt__(self, other):
-        return False
-
-    def __le__(self, other):
-        return isinstance(other, self.__class__)
-
-    def __gt__(self, other):
-        return True
-
-    def __ge__(self, other):
-        return True
-
-    def __eq__(self, other):
-        return isinstance(other, self.__class__)
-
-    def __ne__(self, other):
-        return not isinstance(other, self.__class__)
-
-    def __repr__(self):
-        return "<Unbounded>"
-
-Unbounded = UnboundedThing()
-
-
-class ValueInfo:
-    __metaclass__ = type
-    __slots__ = 'value', 'position'
-
-    def __init__(self, value, position):
-        self.value = value
-        # position is (lineno, colno, url)
-        self.position = position
-
-    def convert(self, datatype):
-        try:
-            return datatype(self.value)
-        except ValueError, e:
-            raise ZConfig.DataConversionError(e, self.value, self.position)
-
-
-class BaseInfo:
-    """Information about a single configuration key."""
-
-    description = None
-    example = None
-    metadefault = None
-
-    def __init__(self, name, datatype, minOccurs, maxOccurs, handler,
-                 attribute):
-        if maxOccurs is not None and maxOccurs < 1:
-            if maxOccurs < 1:
-                raise ZConfig.SchemaError(
-                    "maxOccurs must be at least 1")
-            if minOccurs is not None and minOccurs < maxOccurs:
-                raise ZConfig.SchemaError(
-                    "minOccurs must be at least maxOccurs")
-        self.name = name
-        self.datatype = datatype
-        self.minOccurs = minOccurs
-        self.maxOccurs = maxOccurs
-        self.handler = handler
-        self.attribute = attribute
-
-    def __repr__(self):
-        clsname = self.__class__.__name__
-        return "<%s for %s>" % (clsname, `self.name`)
-
-    def isabstract(self):
-        return False
-
-    def ismulti(self):
-        return self.maxOccurs > 1
-
-    def issection(self):
-        return False
-
-
-class BaseKeyInfo(BaseInfo):
-
-    _rawdefaults = None
-
-    def __init__(self, name, datatype, minOccurs, maxOccurs, handler,
-                 attribute):
-        assert minOccurs is not None
-        BaseInfo.__init__(self, name, datatype, minOccurs, maxOccurs,
-                          handler, attribute)
-        self._finished = False
-
-    def finish(self):
-        if self._finished:
-            raise ZConfig.SchemaError(
-                "cannot finish KeyInfo more than once")
-        self._finished = True
-
-    def adddefault(self, value, position, key=None):
-        if self._finished:
-            raise ZConfig.SchemaError(
-                "cannot add default values to finished KeyInfo")
-        # Check that the name/keyed relationship is right:
-        if self.name == "+" and key is None:
-            raise ZConfig.SchemaError(
-                "default values must be keyed for name='+'")
-        elif self.name != "+" and key is not None:
-            raise ZConfig.SchemaError(
-                "unexpected key for default value")
-        self.add_valueinfo(ValueInfo(value, position), key)
-
-    def add_valueinfo(self, vi, key):
-        """Actually add a ValueInfo to this key-info object.
-
-        The appropriate value of None-ness of key has already been
-        checked with regard to the name of the key, and has been found
-        permissible to add.
-
-        This method is a requirement for subclasses, and should not be
-        called by client code.
-        """
-        raise NotImplementedError(
-            "add_valueinfo() must be implemented by subclasses of BaseKeyInfo")
-
-    def prepare_raw_defaults(self):
-        assert self.name == "+"
-        if self._rawdefaults is None:
-            self._rawdefaults = self._default
-        self._default = {}
-
-
-class KeyInfo(BaseKeyInfo):
-
-    _default = None
-
-    def __init__(self, name, datatype, minOccurs, handler, attribute):
-        BaseKeyInfo.__init__(self, name, datatype, minOccurs, 1,
-                             handler, attribute)
-        if self.name == "+":
-            self._default = {}
-
-    def add_valueinfo(self, vi, key):
-        if self.name == "+":
-            if self._default.has_key(key):
-                # not ideal: we're presenting the unconverted
-                # version of the key
-                raise ZConfig.SchemaError(
-                    "duplicate default value for key %s" % `key`)
-            self._default[key] = vi
-        elif self._default is not None:
-            raise ZConfig.SchemaError(
-                "cannot set more than one default to key with maxOccurs == 1")
-        else:
-            self._default = vi
-
-    def computedefault(self, keytype):
-        self.prepare_raw_defaults()
-        for k, vi in self._rawdefaults.iteritems():
-            key = ValueInfo(k, vi.position).convert(keytype)
-            self.add_valueinfo(vi, key)
-
-    def getdefault(self):
-        # Use copy.copy() to make sure we don't allow polution of
-        # our internal data without having to worry about both the
-        # list and dictionary cases:
-        return copy.copy(self._default)
-
-
-class MultiKeyInfo(BaseKeyInfo):
-
-    def __init__(self, name, datatype, minOccurs, maxOccurs, handler,
-                 attribute):
-        BaseKeyInfo.__init__(self, name, datatype, minOccurs, maxOccurs,
-                             handler, attribute)
-        if self.name == "+":
-            self._default = {}
-        else:
-            self._default = []
-
-    def add_valueinfo(self, vi, key):
-        if self.name == "+":
-            # This is a keyed value, not a simple value:
-            if key in self._default:
-                self._default[key].append(vi)
-            else:
-                self._default[key] = [vi]
-        else:
-            self._default.append(vi)
-
-    def computedefault(self, keytype):
-        self.prepare_raw_defaults()
-        for k, vlist in self._rawdefaults.iteritems():
-            key = ValueInfo(k, vlist[0].position).convert(keytype)
-            for vi in vlist:
-                self.add_valueinfo(vi, key)
-
-    def getdefault(self):
-        return copy.copy(self._default)
-
-
-class SectionInfo(BaseInfo):
-    def __init__(self, name, sectiontype, minOccurs, maxOccurs, handler,
-                 attribute):
-        # name        - name of the section; one of '*', '+', or name1
-        # sectiontype - SectionType instance
-        # minOccurs   - minimum number of occurances of the section
-        # maxOccurs   - maximum number of occurances; if > 1, name
-        #               must be '*' or '+'
-        # handler     - handler name called when value(s) must take effect,
-        #               or None
-        # attribute   - name of the attribute on the SectionValue object
-        if maxOccurs > 1:
-            if name not in ('*', '+'):
-                raise ZConfig.SchemaError(
-                    "sections which can occur more than once must"
-                    " use a name of '*' or '+'")
-            if not attribute:
-                raise ZConfig.SchemaError(
-                    "sections which can occur more than once must"
-                    " specify a target attribute name")
-        if sectiontype.isabstract():
-            datatype = None
-        else:
-            datatype = sectiontype.datatype
-        BaseInfo.__init__(self, name, datatype,
-                          minOccurs, maxOccurs, handler, attribute)
-        self.sectiontype = sectiontype
-
-    def __repr__(self):
-        clsname = self.__class__.__name__
-        return "<%s for %s (%s)>" % (
-            clsname, self.sectiontype.name, `self.name`)
-
-    def issection(self):
-        return True
-
-    def allowUnnamed(self):
-        return self.name == "*"
-
-    def isAllowedName(self, name):
-        if name == "*" or name == "+":
-            return False
-        elif self.name == "+":
-            return name and True or False
-        elif self.name == "*":
-            return True
-        else:
-            return name == self.name
-
-    def getdefault(self):
-        # sections cannot have defaults
-        if self.maxOccurs > 1:
-            return []
-        else:
-            return None
-
-
-class AbstractType:
-    __metaclass__ = type
-    __slots__ = '_subtypes', 'name', 'description'
-
-    def __init__(self, name):
-        self._subtypes = {}
-        self.name = name
-        self.description = None
-
-    def addsubtype(self, type):
-        self._subtypes[type.name] = type
-
-    def getsubtype(self, name):
-        try:
-            return self._subtypes[name]
-        except KeyError:
-            raise ZConfig.SchemaError("no sectiontype %s in abstracttype %s"
-                                      % (`name`, `self.name`))
-
-    def hassubtype(self, name):
-        """Return true iff this type has 'name' as a concrete manifestation."""
-        return name in self._subtypes.keys()
-
-    def getsubtypenames(self):
-        """Return the names of all concrete types as a sorted list."""
-        L = self._subtypes.keys()
-        L.sort()
-        return L
-
-    def isabstract(self):
-        return True
-
-
-class SectionType:
-    def __init__(self, name, keytype, valuetype, datatype, registry, types):
-        # name      - name of the section, or '*' or '+'
-        # datatype  - type for the section itself
-        # keytype   - type for the keys themselves
-        # valuetype - default type for key values
-        self.name = name
-        self.datatype = datatype
-        self.keytype = keytype
-        self.valuetype = valuetype
-        self.handler = None
-        self.description = None
-        self.registry = registry
-        self._children = []    # [(key, info), ...]
-        self._attrmap = {}     # {attribute: info, ...}
-        self._keymap = {}      # {key: info, ...}
-        self._types = types
-
-    def gettype(self, name):
-        n = name.lower()
-        try:
-            return self._types[n]
-        except KeyError:
-            raise ZConfig.SchemaError("unknown type name: " + `name`)
-
-    def gettypenames(self):
-        return self._types.keys()
-
-    def __len__(self):
-        return len(self._children)
-
-    def __getitem__(self, index):
-        return self._children[index]
-
-    def _add_child(self, key, info):
-        # check naming constraints
-        assert key or info.attribute
-        if key and self._keymap.has_key(key):
-            raise ZConfig.SchemaError(
-                "child name %s already used" % key)
-        if info.attribute and self._attrmap.has_key(info.attribute):
-            raise ZConfig.SchemaError(
-                "child attribute name %s already used" % info.attribute)
-        # a-ok, add the item to the appropriate maps
-        if info.attribute:
-            self._attrmap[info.attribute] = info
-        if key:
-            self._keymap[key] = info
-        self._children.append((key, info))
-
-    def addkey(self, keyinfo):
-        self._add_child(keyinfo.name, keyinfo)
-
-    def addsection(self, name, sectinfo):
-        assert name not in ("*", "+")
-        self._add_child(name, sectinfo)
-
-    def getinfo(self, key):
-        if not key:
-            raise ZConfig.ConfigurationError(
-                "cannot match a key without a name")
-        try:
-            return self._keymap[key]
-        except KeyError:
-            raise ZConfig.ConfigurationError("no key matching " + `key`)
-
-    def getrequiredtypes(self):
-        d = {}
-        if self.name:
-            d[self.name] = 1
-        stack = [self]
-        while stack:
-            info = stack.pop()
-            for key, ci in info._children:
-                if ci.issection():
-                    t = ci.sectiontype
-                    if not d.has_key(t.name):
-                        d[t.name] = 1
-                        stack.append(t)
-        return d.keys()
-
-    def getsectioninfo(self, type, name):
-        for key, info in self._children:
-            if key:
-                if key == name:
-                    if not info.issection():
-                        raise ZConfig.ConfigurationError(
-                            "section name %s already in use for key" % key)
-                    st = info.sectiontype
-                    if st.isabstract():
-                        try:
-                            st = st.getsubtype(type)
-                        except ZConfig.ConfigurationError:
-                            raise ZConfig.ConfigurationError(
-                                "section type %s not allowed for name %s"
-                                % (`type`, `key`))
-                    if not st.name == type:
-                        raise ZConfig.ConfigurationError(
-                            "name %s must be used for a %s section"
-                            % (`name`, `st.name`))
-                    return info
-            # else must be a sectiontype or an abstracttype:
-            elif info.sectiontype.name == type:
-                if not (name or info.allowUnnamed()):
-                    raise ZConfig.ConfigurationError(
-                        `type` + " sections must be named")
-                return info
-            elif info.sectiontype.isabstract():
-                st = info.sectiontype
-                if st.name == type:
-                    raise ZConfig.ConfigurationError(
-                        "cannot define section with an abstract type")
-                try:
-                    st = st.getsubtype(type)
-                except ZConfig.ConfigurationError:
-                    # not this one; maybe a different one
-                    pass
-                else:
-                    return info
-        raise ZConfig.ConfigurationError(
-            "no matching section defined for type='%s', name='%s'" % (
-            type, name))
-
-    def isabstract(self):
-        return False
-
-
-class SchemaType(SectionType):
-    def __init__(self, keytype, valuetype, datatype, handler, url,
-                 registry):
-        SectionType.__init__(self, None, keytype, valuetype, datatype,
-                             registry, {})
-        self._components = {}
-        self.handler = handler
-        self.url = url
-
-    def addtype(self, typeinfo):
-        n = typeinfo.name
-        if self._types.has_key(n):
-            raise ZConfig.SchemaError("type name cannot be redefined: "
-                                      + `typeinfo.name`)
-        self._types[n] = typeinfo
-
-    def allowUnnamed(self):
-        return True
-
-    def isAllowedName(self, name):
-        return False
-
-    def issection(self):
-        return True
-
-    def getunusedtypes(self):
-        alltypes = self.gettypenames()
-        reqtypes = self.getrequiredtypes()
-        for n in reqtypes:
-            alltypes.remove(n)
-        if self.name and self.name in alltypes:
-            alltypes.remove(self.name)
-        return alltypes
-
-    def createSectionType(self, name, keytype, valuetype, datatype):
-        t = SectionType(name, keytype, valuetype, datatype,
-                        self.registry, self._types)
-        self.addtype(t)
-        return t
-
-    def deriveSectionType(self, base, name, keytype, valuetype, datatype):
-        if isinstance(base, SchemaType):
-            raise ZConfig.SchemaError(
-                "cannot derive sectiontype from top-level schema")
-        t = self.createSectionType(name, keytype, valuetype, datatype)
-        t._attrmap.update(base._attrmap)
-        t._keymap.update(base._keymap)
-        t._children.extend(base._children)
-        for i in range(len(t._children)):
-            key, info = t._children[i]
-            if isinstance(info, BaseKeyInfo) and info.name == "+":
-                # need to create a new info object and recompute the
-                # default mapping based on the new keytype
-                info = copy.copy(info)
-                info.computedefault(t.keytype)
-                t._children[i] = (key, info)
-        return t
-
-    def addComponent(self, name):
-        if self._components.has_key(name):
-            raise ZConfig.SchemaError("already have component %s" % name)
-        self._components[name] = name
-
-    def hasComponent(self, name):
-        return self._components.has_key(name)
-
-
-def createDerivedSchema(base):
-    new = SchemaType(base.keytype, base.valuetype, base.datatype,
-                     base.handler, base.url, base.registry)
-    new._components.update(base._components)
-    new.description = base.description
-    new._children[:] = base._children
-    new._attrmap.update(base._attrmap)
-    new._keymap.update(base._keymap)
-    new._types.update(base._types)
-    return new
diff --git a/branches/bug1734/src/ZConfig/loader.py b/branches/bug1734/src/ZConfig/loader.py
deleted file mode 100644
index f52bdacb..00000000
--- a/branches/bug1734/src/ZConfig/loader.py
+++ /dev/null
@@ -1,307 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Schema loader utility."""
-
-import os.path
-import sys
-import urllib
-import urllib2
-
-import ZConfig
-import ZConfig.cfgparser
-import ZConfig.datatypes
-import ZConfig.info
-import ZConfig.matcher
-import ZConfig.schema
-import ZConfig.url
-
-
-def loadSchema(url):
-    return SchemaLoader().loadURL(url)
-
-def loadSchemaFile(file, url=None):
-    return SchemaLoader().loadFile(file, url)
-
-def loadConfig(schema, url, overrides=()):
-    return _get_config_loader(schema, overrides).loadURL(url)
-
-def loadConfigFile(schema, file, url=None, overrides=()):
-    return _get_config_loader(schema, overrides).loadFile(file, url)
-
-
-def _get_config_loader(schema, overrides):
-    if overrides:
-        from ZConfig import cmdline
-        loader = cmdline.ExtendedConfigLoader(schema)
-        for opt in overrides:
-            loader.addOption(opt)
-    else:
-        loader = ConfigLoader(schema)
-    return loader
-
-
-class BaseLoader:
-    def __init__(self):
-        pass
-
-    def createResource(self, file, url):
-        return Resource(file, url)
-
-    def loadURL(self, url):
-        url = self.normalizeURL(url)
-        r = self.openResource(url)
-        try:
-            return self.loadResource(r)
-        finally:
-            r.close()
-
-    def loadFile(self, file, url=None):
-        if not url:
-            url = _url_from_file(file)
-        r = self.createResource(file, url)
-        try:
-            return self.loadResource(r)
-        finally:
-            r.close()
-
-    # utilities
-
-    def loadResource(self, resource):
-        raise NotImplementedError(
-            "BaseLoader.loadResource() must be overridden by a subclass")
-
-    def openResource(self, url):
-        # ConfigurationError exceptions raised here should be
-        # str()able to generate a message for an end user.
-        #
-        # XXX This should be replaced to use a local cache for remote
-        # resources.  The policy needs to support both re-retrieve on
-        # change and provide the cached resource when the remote
-        # resource is not accessible.
-        url = str(url)
-        try:
-            file = urllib2.urlopen(url)
-        except urllib2.URLError, e:
-            # urllib2.URLError has a particularly hostile str(), so we
-            # generally don't want to pass it along to the user.
-            self._raise_open_error(url, e.reason)
-        except (IOError, OSError), e:
-            # Python 2.1 raises a different error from Python 2.2+,
-            # so we catch both to make sure we detect the situation.
-            self._raise_open_error(url, str(e))
-        return self.createResource(file, url)
-
-    def _raise_open_error(self, url, message):
-        if url[:7].lower() == "file://":
-            what = "file"
-            ident = urllib.url2pathname(url[7:])
-        else:
-            what = "URL"
-            ident = url
-        raise ZConfig.ConfigurationError(
-            "error opening %s %s: %s" % (what, ident, message),
-            url)
-
-    def normalizeURL(self, url):
-        if self.isPath(url):
-            url = "file://" + urllib.pathname2url(os.path.abspath(url))
-        newurl, fragment = ZConfig.url.urldefrag(url)
-        if fragment:
-            raise ZConfig.ConfigurationError(
-                "fragment identifiers are not supported",
-                url)
-        return newurl
-
-    def isPath(self, s):
-        """Return True iff 's' should be handled as a filesystem path."""
-        if ":" in s:
-            # XXX This assumes that one-character scheme identifiers
-            # are always Windows drive letters; I don't know of any
-            # one-character scheme identifiers.
-            scheme, rest = urllib.splittype(s)
-            return len(scheme) == 1
-        else:
-            return True
-
-
-
-def _url_from_file(file):
-    name = getattr(file, "name", None)
-    if name and name[0] != "<" and name[-1] != ">":
-        return "file://" + urllib.pathname2url(os.path.abspath(name))
-    else:
-        return None
-
-
-class SchemaLoader(BaseLoader):
-    def __init__(self, registry=None):
-        if registry is None:
-            registry = ZConfig.datatypes.Registry()
-        BaseLoader.__init__(self)
-        self.registry = registry
-        self._cache = {}
-
-    def loadResource(self, resource):
-        if resource.url and self._cache.has_key(resource.url):
-            schema = self._cache[resource.url]
-        else:
-            schema = ZConfig.schema.parseResource(resource, self)
-            self._cache[resource.url] = schema
-        return schema
-
-    # schema parser support API
-
-    def schemaComponentSource(self, package, file):
-        parts = package.split(".")
-        if not parts:
-            raise ZConfig.SchemaError(
-                "illegal schema component name: " + `package`)
-        if "" in parts:
-            # '' somewhere in the package spec; still illegal
-            raise ZConfig.SchemaError(
-                "illegal schema component name: " + `package`)
-        file = file or "component.xml"
-        try:
-            __import__(package)
-        except ImportError, e:
-            raise ZConfig.SchemaResourceError(
-                "could not load package %s: %s" % (package, str(e)),
-                filename=file,
-                package=package)
-        pkg = sys.modules[package]
-        if not hasattr(pkg, "__path__"):
-            raise ZConfig.SchemaResourceError(
-                "import name does not refer to a package",
-                filename=file, package=package)
-        for dir in pkg.__path__:
-            dirname = os.path.abspath(dir)
-            fn = os.path.join(dirname, file)
-            if os.path.exists(fn):
-                return "file://" + urllib.pathname2url(fn)
-        else:
-            raise ZConfig.SchemaResourceError("schema component not found",
-                                              filename=file,
-                                              package=package,
-                                              path=pkg.__path__)
-
-
-class ConfigLoader(BaseLoader):
-    def __init__(self, schema):
-        if schema.isabstract():
-            raise ZConfig.SchemaError(
-                "cannot check a configuration an abstract type")
-        BaseLoader.__init__(self)
-        self.schema = schema
-        self._private_schema = False
-
-    def loadResource(self, resource):
-        sm = self.createSchemaMatcher()
-        self._parse_resource(sm, resource)
-        result = sm.finish(), CompositeHandler(sm.handlers, self.schema)
-        return result
-
-    def createSchemaMatcher(self):
-        return ZConfig.matcher.SchemaMatcher(self.schema)
-
-    # config parser support API
-
-    def startSection(self, parent, type, name):
-        t = self.schema.gettype(type)
-        if t.isabstract():
-            raise ZConfig.ConfigurationError(
-                "concrete sections cannot match abstract section types;"
-                " found abstract type " + `type`)
-        return parent.createChildMatcher(t, name)
-
-    def endSection(self, parent, type, name, matcher):
-        sectvalue = matcher.finish()
-        parent.addSection(type, name, sectvalue)
-
-    def importSchemaComponent(self, pkgname):
-        schema = self.schema
-        if not self._private_schema:
-            # replace the schema with an extended schema on the first %import
-            self._loader = SchemaLoader(self.schema.registry)
-            schema = ZConfig.info.createDerivedSchema(self.schema)
-            self._private_schema = True
-            self.schema = schema
-        url = self._loader.schemaComponentSource(pkgname, '')
-        if schema.hasComponent(url):
-            return
-        resource = self.openResource(url)
-        schema.addComponent(url)
-        try:
-            ZConfig.schema.parseComponent(resource, self._loader, schema)
-        finally:
-            resource.close()
-
-    def includeConfiguration(self, section, url, defines):
-        url = self.normalizeURL(url)
-        r = self.openResource(url)
-        try:
-            self._parse_resource(section, r, defines)
-        finally:
-            r.close()
-
-    # internal helper
-
-    def _parse_resource(self, matcher, resource, defines=None):
-        parser = ZConfig.cfgparser.ZConfigParser(resource, self, defines)
-        parser.parse(matcher)
-
-
-class CompositeHandler:
-
-    def __init__(self, handlers, schema):
-        self._handlers = handlers
-        self._convert = schema.registry.get("basic-key")
-
-    def __call__(self, handlermap):
-        d = {}
-        for name, callback in handlermap.items():
-            n = self._convert(name)
-            if d.has_key(n):
-                raise ZConfig.ConfigurationError(
-                    "handler name not unique when converted to a basic-key: "
-                    + `name`)
-            d[n] = callback
-        L = []
-        for handler, value in self._handlers:
-            if not d.has_key(handler):
-                L.append(handler)
-        if L:
-            raise ZConfig.ConfigurationError(
-                "undefined handlers: " + ", ".join(L))
-        for handler, value in self._handlers:
-            f = d[handler]
-            if f is not None:
-                f(value)
-
-    def __len__(self):
-        return len(self._handlers)
-
-
-class Resource:
-    def __init__(self, file, url):
-        self.file = file
-        self.url = url
-
-    def close(self):
-        if self.file is not None:
-            self.file.close()
-            self.file = None
-            self.closed = True
-
-    def __getattr__(self, name):
-        return getattr(self.file, name)
diff --git a/branches/bug1734/src/ZConfig/matcher.py b/branches/bug1734/src/ZConfig/matcher.py
deleted file mode 100644
index 44bcb1f3..00000000
--- a/branches/bug1734/src/ZConfig/matcher.py
+++ /dev/null
@@ -1,302 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Utility that manages the binding of configuration data to a section."""
-
-import ZConfig
-
-from ZConfig.info import ValueInfo
-
-
-class BaseMatcher:
-    def __init__(self, info, type, handlers):
-        self.info = info
-        self.type = type
-        self._values = {}
-        for key, info in type:
-            if info.name == "+" and not info.issection():
-                v = {}
-            elif info.ismulti():
-                v = []
-            else:
-                v = None
-            assert info.attribute is not None
-            self._values[info.attribute] = v
-        self._sectionnames = {}
-        if handlers is None:
-            handlers = []
-        self.handlers = handlers
-
-    def __repr__(self):
-        clsname = self.__class__.__name__
-        extra = "type " + `self.type.name`
-        return "<%s for %s>" % (clsname, extra)
-
-    def addSection(self, type, name, sectvalue):
-        if name:
-            if self._sectionnames.has_key(name):
-                raise ZConfig.ConfigurationError(
-                    "section names must not be re-used within the"
-                    " same container:" + `name`)
-            self._sectionnames[name] = name
-        ci = self.type.getsectioninfo(type, name)
-        attr = ci.attribute
-        v = self._values[attr]
-        if ci.ismulti():
-            v.append(sectvalue)
-        elif v is None:
-            self._values[attr] = sectvalue
-        else:
-            raise ZConfig.ConfigurationError(
-                "too many instances of %s section" % `ci.sectiontype.name`)
-
-    def addValue(self, key, value, position):
-        try:
-            realkey = self.type.keytype(key)
-        except ValueError, e:
-            raise ZConfig.DataConversionError(e, key, position)
-        arbkey_info = None
-        for i in range(len(self.type)):
-            k, ci = self.type[i]
-            if k == realkey:
-                break
-            if ci.name == "+" and not ci.issection():
-                arbkey_info = k, ci
-        else:
-            if arbkey_info is None:
-                raise ZConfig.ConfigurationError(
-                    `key` + " is not a known key name")
-            k, ci = arbkey_info
-        if ci.issection():
-            if ci.name:
-                extra = " in %s sections" % `self.type.name`
-            else:
-                extra = ""
-            raise ZConfig.ConfigurationError(
-                "%s is not a valid key name%s" % (`key`, extra))
-
-        ismulti = ci.ismulti()
-        attr = ci.attribute
-        assert attr is not None
-        v = self._values[attr]
-        if v is None:
-            if k == '+':
-                v = {}
-            elif ismulti:
-                v = []
-            self._values[attr] = v
-        elif not ismulti:
-            if k != '+':
-                raise ZConfig.ConfigurationError(
-                    `key` + " does not support multiple values")
-        elif len(v) == ci.maxOccurs:
-            raise ZConfig.ConfigurationError(
-                "too many values for " + `name`)
-
-        value = ValueInfo(value, position)
-        if k == '+':
-            if ismulti:
-                if v.has_key(realkey):
-                    v[realkey].append(value)
-                else:
-                    v[realkey] = [value]
-            else:
-                if v.has_key(realkey):
-                    raise ZConfig.ConfigurationError(
-                        "too many values for " + `key`)
-                v[realkey] = value
-        elif ismulti:
-            v.append(value)
-        else:
-            self._values[attr] = value
-
-    def createChildMatcher(self, type, name):
-        ci = self.type.getsectioninfo(type.name, name)
-        assert not ci.isabstract()
-        if not ci.isAllowedName(name):
-            raise ZConfig.ConfigurationError(
-                "%s is not an allowed name for %s sections"
-                % (`name`, `ci.sectiontype.name`))
-        return SectionMatcher(ci, type, name, self.handlers)
-
-    def finish(self):
-        """Check the constraints of the section and convert to an application
-        object."""
-        values = self._values
-        for key, ci in self.type:
-            if key:
-                key = repr(key)
-            else:
-                key = "section type " + `ci.sectiontype.name`
-            assert ci.attribute is not None
-            attr = ci.attribute
-            v = values[attr]
-            if ci.name == '+' and not ci.issection():
-                # v is a dict
-                if ci.minOccurs > len(v):
-                    raise ZConfig.ConfigurationError(
-                        "no keys defined for the %s key/value map; at least %d"
-                        " must be specified" % (attr, ci.minOccurs))
-            if v is None and ci.minOccurs:
-                default = ci.getdefault()
-                if default is None:
-                    raise ZConfig.ConfigurationError(
-                        "no values for %s; %s required" % (key, ci.minOccurs))
-                else:
-                    v = values[attr] = default[:]
-            if ci.ismulti():
-                if not v:
-                    default = ci.getdefault()
-                    if isinstance(default, dict):
-                        v.update(default)
-                    else:
-                        v[:] = default
-                if len(v) < ci.minOccurs:
-                    raise ZConfig.ConfigurationError(
-                        "not enough values for %s; %d found, %d required"
-                        % (key, len(v), ci.minOccurs))
-            if v is None and not ci.issection():
-                if ci.ismulti():
-                    v = ci.getdefault()[:]
-                else:
-                    v = ci.getdefault()
-                values[attr] = v
-        return self.constuct()
-
-    def constuct(self):
-        values = self._values
-        for name, ci in self.type:
-            assert ci.attribute is not None
-            attr = ci.attribute
-            if ci.ismulti():
-                if ci.issection():
-                    v = []
-                    for s in values[attr]:
-                        if s is not None:
-                            st = s.getSectionDefinition()
-                            try:
-                                s = st.datatype(s)
-                            except ValueError, e:
-                                raise ZConfig.DataConversionError(
-                                    e, s, (-1, -1, None))
-                        v.append(s)
-                elif ci.name == '+':
-                    v = values[attr]
-                    for key, val in v.items():
-                        v[key] = [vi.convert(ci.datatype) for vi in val]
-                else:
-                    v = [vi.convert(ci.datatype) for vi in values[attr]]
-            elif ci.issection():
-                if values[attr] is not None:
-                    st = values[attr].getSectionDefinition()
-                    try:
-                        v = st.datatype(values[attr])
-                    except ValueError, e:
-                        raise ZConfig.DataConversionError(
-                            e, values[attr], (-1, -1, None))
-                else:
-                    v = None
-            elif name == '+':
-                v = values[attr]
-                if not v:
-                    for key, val in ci.getdefault().items():
-                        v[key] = val.convert(ci.datatype)
-                else:
-                    for key, val in v.items():
-                        v[key] = val.convert(ci.datatype)
-            else:
-                v = values[attr]
-                if v is not None:
-                    v = v.convert(ci.datatype)
-            values[attr] = v
-            if ci.handler is not None:
-                self.handlers.append((ci.handler, v))
-        return self.createValue()
-
-    def createValue(self):
-        return SectionValue(self._values, None, self)
-
-
-class SectionMatcher(BaseMatcher):
-    def __init__(self, info, type, name, handlers):
-        if name or info.allowUnnamed():
-            self.name = name
-        else:
-            raise ZConfig.ConfigurationError(
-                `type.name` + " sections may not be unnamed")
-        BaseMatcher.__init__(self, info, type, handlers)
-
-    def createValue(self):
-        return SectionValue(self._values, self.name, self)
-
-
-class SchemaMatcher(BaseMatcher):
-    def __init__(self, schema):
-        BaseMatcher.__init__(self, schema, schema, [])
-
-    def finish(self):
-        # Since there's no outer container to call datatype()
-        # for the schema, we convert on the way out.
-        v = BaseMatcher.finish(self)
-        v = self.type.datatype(v)
-        if self.type.handler is not None:
-            self.handlers.append((self.type.handler, v))
-        return v
-
-
-class SectionValue:
-    """Generic 'bag-of-values' object for a section.
-
-    Derived classes should always call the SectionValue constructor
-    before attempting to modify self.
-    """
-
-    def __init__(self, values, name, matcher):
-        self.__dict__.update(values)
-        self._name = name
-        self._matcher = matcher
-        self._attributes = tuple(values.keys())
-
-    def __repr__(self):
-        if self._name:
-            # probably unique for a given config file; more readable than id()
-            name = `self._name`
-        else:
-            # identify uniquely
-            name = "at %#x" % id(self)
-        clsname = self.__class__.__name__
-        return "<%s for %s %s>" % (clsname, self._matcher.type.name, name)
-
-    def __str__(self):
-        l = []
-        attrnames = [s for s in self.__dict__.keys() if s[0] != "_"]
-        attrnames.sort()
-        for k in attrnames:
-            v = getattr(self, k)
-            l.append('%-40s: %s' % (k, v))
-        return '\n'.join(l)
-
-    def getSectionName(self):
-        return self._name
-
-    def getSectionType(self):
-        return self._matcher.type.name
-
-    def getSectionDefinition(self):
-        return self._matcher.type
-
-    def getSectionMatcher(self):
-        return self._matcher
-
-    def getSectionAttributes(self):
-        return self._attributes
diff --git a/branches/bug1734/src/ZConfig/schema.py b/branches/bug1734/src/ZConfig/schema.py
deleted file mode 100644
index 783fb47c..00000000
--- a/branches/bug1734/src/ZConfig/schema.py
+++ /dev/null
@@ -1,581 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Parser for ZConfig schemas."""
-
-import os
-import xml.sax
-
-import ZConfig
-
-from ZConfig import info
-from ZConfig import url
-
-
-def parseResource(resource, loader):
-    parser = SchemaParser(loader, resource.url)
-    xml.sax.parse(resource.file, parser)
-    return parser._schema
-
-
-def parseComponent(resource, loader, schema):
-    parser = ComponentParser(loader, resource.url, schema)
-    xml.sax.parse(resource.file, parser)
-
-
-def _srepr(ob):
-    if isinstance(ob, type(u'')):
-        # drop the leading "u" from a unicode repr
-        return `ob`[1:]
-    else:
-        return `ob`
-
-
-class BaseParser(xml.sax.ContentHandler):
-
-    _cdata_tags = "description", "metadefault", "example", "default"
-    _handled_tags = ("import", "abstracttype", "sectiontype",
-                     "key", "multikey", "section", "multisection")
-
-    _allowed_parents = {
-        "description": ["key", "section", "multikey", "multisection",
-                        "sectiontype", "abstracttype",
-                        "schema", "component"],
-        "example": ["key", "section", "multikey", "multisection"],
-        "metadefault": ["key", "section", "multikey", "multisection"],
-        "default": ["key", "multikey"],
-        "import": ["schema", "component"],
-        "abstracttype": ["schema", "component"],
-        "sectiontype": ["schema", "component"],
-        "key": ["schema", "sectiontype"],
-        "multikey": ["schema", "sectiontype"],
-        "section": ["schema", "sectiontype"],
-        "multisection": ["schema", "sectiontype"],
-        }
-
-    def __init__(self, loader, url):
-        self._registry = loader.registry
-        self._loader = loader
-        self._basic_key = self._registry.get("basic-key")
-        self._identifier = self._registry.get("identifier")
-        self._cdata = None
-        self._locator = None
-        self._prefixes = []
-        self._schema = None
-        self._stack = []
-        self._url = url
-        self._elem_stack = []
-
-    # SAX 2 ContentHandler methods
-
-    def setDocumentLocator(self, locator):
-        self._locator = locator
-
-    def startElement(self, name, attrs):
-        attrs = dict(attrs)
-        if self._elem_stack:
-            parent = self._elem_stack[-1]
-            if not self._allowed_parents.has_key(name):
-                self.error("Unknown tag " + name)
-            if parent not in self._allowed_parents[name]:
-                self.error("%s elements may not be nested in %s elements"
-                           % (_srepr(name), _srepr(parent)))
-        elif name != self._top_level:
-            self.error("Unknown document type " + name)
-        self._elem_stack.append(name)
-        if name == self._top_level:
-            if self._schema is not None:
-                self.error("schema element improperly nested")
-            getattr(self, "start_" + name)(attrs)
-        elif name in self._handled_tags:
-            if self._schema is None:
-                self.error(name + " element outside of schema")
-            getattr(self, "start_" + name)(attrs)
-        elif name in self._cdata_tags:
-            if self._schema is None:
-                self.error(name + " element outside of schema")
-            if self._cdata is not None:
-                self.error(name + " element improperly nested")
-            self._cdata = []
-            self._position = None
-            self._attrs = attrs
-
-    def characters(self, data):
-        if self._cdata is not None:
-            if self._position is None:
-                self._position = self.get_position()
-            self._cdata.append(data)
-        elif data.strip():
-            self.error("unexpected non-blank character data: "
-                       + `data.strip()`)
-
-    def endElement(self, name):
-        del self._elem_stack[-1]
-        if name in self._handled_tags:
-            getattr(self, "end_" + name)()
-        else:
-            data = ''.join(self._cdata).strip()
-            self._cdata = None
-            getattr(self, "characters_" + name)(data)
-
-    def endDocument(self):
-        if self._schema is None:
-            self.error("no %s found" % self._top_level)
-
-    # helper methods
-
-    def get_position(self):
-        if self._locator:
-            return (self._locator.getLineNumber(),
-                    self._locator.getColumnNumber(),
-                    (self._locator.getSystemId() or self._url))
-        else:
-            return None, None, self._url
-
-    def get_handler(self, attrs):
-        v = attrs.get("handler")
-        if v is None:
-            return v
-        else:
-            return self.basic_key(v)
-
-    def push_prefix(self, attrs):
-        name = attrs.get("prefix")
-        if name:
-            if self._prefixes:
-                convert = self._registry.get("dotted-suffix")
-            else:
-                convert = self._registry.get("dotted-name")
-            try:
-                name = convert(name)
-            except ValueError, err:
-                self.error("not a valid prefix: %s (%s)"
-                           % (_srepr(name), str(err)))
-            if name[0] == ".":
-                prefix = self._prefixes[-1] + name
-            else:
-                prefix = name
-        elif self._prefixes:
-            prefix = self._prefixes[-1]
-        else:
-            prefix = ''
-        self._prefixes.append(prefix)
-
-    def pop_prefix(self):
-        del self._prefixes[-1]
-
-    def get_classname(self, name):
-        name = str(name)
-        if name.startswith("."):
-            return self._prefixes[-1] + name
-        else:
-            return name
-
-    def get_datatype(self, attrs, attrkey, default, base=None):
-        if attrs.has_key(attrkey):
-            dtname = self.get_classname(attrs[attrkey])
-        else:
-            convert = getattr(base, attrkey, None)
-            if convert is not None:
-                return convert
-            dtname = default
-
-        try:
-            return self._registry.get(dtname)
-        except ValueError, e:
-            self.error(e[0])
-
-    def get_sect_typeinfo(self, attrs, base=None):
-        keytype = self.get_datatype(attrs, "keytype", "basic-key", base)
-        valuetype = self.get_datatype(attrs, "valuetype", "string")
-        datatype = self.get_datatype(attrs, "datatype", "null", base)
-        return keytype, valuetype, datatype
-
-    def get_required(self, attrs):
-        if attrs.has_key("required"):
-            v = attrs["required"]
-            if v == "yes":
-                return True
-            elif v == "no":
-                return False
-            self.error("value for 'required' must be 'yes' or 'no'")
-        else:
-            return False
-
-    def get_ordinality(self, attrs):
-        min, max = 0, info.Unbounded
-        if self.get_required(attrs):
-            min = 1
-        return min, max
-
-    def get_sectiontype(self, attrs):
-        type = attrs.get("type")
-        if not type:
-            self.error("section must specify type")
-        return self._schema.gettype(type)
-
-    def get_key_info(self, attrs, element):
-        any, name, attribute = self.get_name_info(attrs, element)
-        if any == '*':
-            self.error(element + " may not specify '*' for name")
-        if not name and any != '+':
-            self.error(element + " name may not be omitted or empty")
-        datatype = self.get_datatype(attrs, "datatype", "string")
-        handler = self.get_handler(attrs)
-        return name or any, datatype, handler, attribute
-
-    def get_name_info(self, attrs, element):
-        name = attrs.get("name")
-        if not name:
-            self.error(element + " name must be specified and non-empty")
-        aname = attrs.get("attribute")
-        if aname:
-            aname = self.identifier(aname)
-            if aname.startswith("getSection"):
-                # reserved; used for SectionValue methods to get meta-info
-                self.error("attribute names may not start with 'getSection'")
-        if name in ("*", "+"):
-            if not aname:
-                self.error(
-                    "container attribute must be specified and non-empty"
-                    " when using '*' or '+' for a section name")
-            return name, None, aname
-        else:
-            # run the keytype converter to make sure this is a valid key
-            try:
-                name = self._stack[-1].keytype(name)
-            except ValueError, e:
-                self.error("could not convert key name to keytype: " + str(e))
-            if not aname:
-                aname = self.basic_key(name)
-                aname = self.identifier(aname.replace('-', '_'))
-            return None, name, aname
-
-    # schema loading logic
-
-    def characters_default(self, data):
-        key = self._attrs.get("key")
-        self._stack[-1].adddefault(data, self._position, key)
-
-    def characters_description(self, data):
-        if self._stack[-1].description is not None:
-            self.error(
-                "at most one <description> may be used for each element")
-        self._stack[-1].description = data
-
-    def characters_example(self, data):
-        self._stack[-1].example = data
-
-    def characters_metadefault(self, data):
-        self._stack[-1].metadefault = data
-
-    def start_import(self, attrs):
-        src = attrs.get("src", "").strip()
-        pkg = attrs.get("package", "").strip()
-        file = attrs.get("file", "").strip()
-        if not (src or pkg):
-            self.error("import must specify either src or package")
-        if src and pkg:
-            self.error("import may only specify one of src or package")
-        if src:
-            if file:
-                self.error("import may not specify file and src")
-            src = url.urljoin(self._url, src)
-            src, fragment = url.urldefrag(src)
-            if fragment:
-                self.error("import src many not include"
-                           " a fragment identifier")
-            schema = self._loader.loadURL(src)
-            for n in schema.gettypenames():
-                self._schema.addtype(schema.gettype(n))
-        else:
-            if os.path.dirname(file):
-                self.error("file may not include a directory part")
-            pkg = self.get_classname(pkg)
-            src = self._loader.schemaComponentSource(pkg, file)
-            if not self._schema.hasComponent(src):
-                self._schema.addComponent(src)
-                self.loadComponent(src)
-
-    def loadComponent(self, src):
-        r = self._loader.openResource(src)
-        parser = ComponentParser(self._loader, src, self._schema)
-        try:
-            xml.sax.parse(r.file, parser)
-        finally:
-            r.close()
-
-    def end_import(self):
-        pass
-
-    def start_sectiontype(self, attrs):
-        name = attrs.get("name")
-        if not name:
-            self.error("sectiontype name must not be omitted or empty")
-        name = self.basic_key(name)
-        self.push_prefix(attrs)
-        if attrs.has_key("extends"):
-            basename = self.basic_key(attrs["extends"])
-            base = self._schema.gettype(basename)
-            if base.isabstract():
-                self.error("sectiontype cannot extend an abstract type")
-            keytype, valuetype, datatype = self.get_sect_typeinfo(attrs, base)
-            sectinfo = self._schema.deriveSectionType(
-                base, name, keytype, valuetype, datatype)
-        else:
-            keytype, valuetype, datatype = self.get_sect_typeinfo(attrs)
-            sectinfo = self._schema.createSectionType(
-                name, keytype, valuetype, datatype)
-        if attrs.has_key("implements"):
-            ifname = self.basic_key(attrs["implements"])
-            interface = self._schema.gettype(ifname)
-            if not interface.isabstract():
-                self.error(
-                    "type specified by implements is not an abstracttype")
-            interface.addsubtype(sectinfo)
-        self._stack.append(sectinfo)
-
-    def end_sectiontype(self):
-        self.pop_prefix()
-        self._stack.pop()
-
-    def start_section(self, attrs):
-        sectiontype = self.get_sectiontype(attrs)
-        handler = self.get_handler(attrs)
-        min = self.get_required(attrs) and 1 or 0
-        any, name, attribute = self.get_name_info(attrs, "section")
-        if any and not attribute:
-            self.error(
-                "attribute must be specified if section name is '*' or '+'")
-        section = info.SectionInfo(any or name, sectiontype,
-                                   min, 1, handler, attribute)
-        self._stack[-1].addsection(name, section)
-        self._stack.append(section)
-
-    def end_section(self):
-        self._stack.pop()
-
-    def start_multisection(self, attrs):
-        sectiontype = self.get_sectiontype(attrs)
-        min, max = self.get_ordinality(attrs)
-        any, name, attribute = self.get_name_info(attrs, "multisection")
-        if any not in ("*", "+"):
-            self.error("multisection must specify '*' or '+' for the name")
-        handler = self.get_handler(attrs)
-        section = info.SectionInfo(any or name, sectiontype,
-                                   min, max, handler, attribute)
-        self._stack[-1].addsection(name, section)
-        self._stack.append(section)
-
-    def end_multisection(self):
-        self._stack.pop()
-
-    def start_abstracttype(self, attrs):
-        name = attrs.get("name")
-        if not name:
-            self.error("abstracttype name must not be omitted or empty")
-        name = self.basic_key(name)
-        abstype = info.AbstractType(name)
-        self._schema.addtype(abstype)
-        self._stack.append(abstype)
-
-    def end_abstracttype(self):
-        self._stack.pop()
-
-    def start_key(self, attrs):
-        name, datatype, handler, attribute = self.get_key_info(attrs, "key")
-        min = self.get_required(attrs) and 1 or 0
-        key = info.KeyInfo(name, datatype, min, handler, attribute)
-        if attrs.has_key("default"):
-            if min:
-                self.error("required key cannot have a default value")
-            key.adddefault(str(attrs["default"]).strip(),
-                           self.get_position())
-        if name != "+":
-            key.finish()
-        self._stack[-1].addkey(key)
-        self._stack.append(key)
-
-    def end_key(self):
-        key = self._stack.pop()
-        if key.name == "+":
-            key.computedefault(self._stack[-1].keytype)
-            key.finish()
-
-    def start_multikey(self, attrs):
-        if attrs.has_key("default"):
-            self.error("default values for multikey must be given using"
-                       " 'default' elements")
-        name, datatype, handler, attribute = self.get_key_info(attrs,
-                                                               "multikey")
-        min, max = self.get_ordinality(attrs)
-        key = info.MultiKeyInfo(name, datatype, min, max, handler, attribute)
-        self._stack[-1].addkey(key)
-        self._stack.append(key)
-
-    def end_multikey(self):
-        multikey = self._stack.pop()
-        if multikey.name == "+":
-            multikey.computedefault(self._stack[-1].keytype)
-        multikey.finish()
-
-    # datatype conversion wrappers
-
-    def basic_key(self, s):
-        try:
-            return self._basic_key(s)
-        except ValueError, e:
-            self.error(e[0])
-
-    def identifier(self, s):
-        try:
-            return self._identifier(s)
-        except ValueError, e:
-            self.error(e[0])
-
-    # exception setup helpers
-
-    def initerror(self, e):
-        if self._locator is not None:
-            e.colno = self._locator.getColumnNumber()
-            e.lineno = self._locator.getLineNumber()
-            e.url = self._locator.getSystemId()
-        return e
-
-    def error(self, message):
-        raise self.initerror(ZConfig.SchemaError(message))
-
-
-class SchemaParser(BaseParser):
-
-    # needed by startElement() and endElement()
-    _handled_tags = BaseParser._handled_tags + ("schema",)
-    _top_level = "schema"
-
-    def __init__(self, loader, url, extending_parser=None):
-        BaseParser.__init__(self, loader, url)
-        self._extending_parser = extending_parser
-        self._base_keytypes = []
-        self._base_datatypes = []
-
-    def start_schema(self, attrs):
-        self.push_prefix(attrs)
-        handler = self.get_handler(attrs)
-        keytype, valuetype, datatype = self.get_sect_typeinfo(attrs)
-
-        if self._extending_parser is None:
-            # We're not being inherited, so we need to create the schema
-            self._schema = info.SchemaType(keytype, valuetype, datatype,
-                                           handler, self._url, self._registry)
-        else:
-            # Parse into the extending ("subclass") parser's schema
-            self._schema = self._extending_parser._schema
-
-        self._stack = [self._schema]
-
-        if attrs.has_key("extends"):
-            sources = attrs["extends"].split()
-            sources.reverse()
-
-            for src in sources:
-                src = url.urljoin(self._url, src)
-                src, fragment = url.urldefrag(src)
-                if fragment:
-                    self.error("schema extends many not include"
-                               " a fragment identifier")
-                self.extendSchema(src)
-
-            # Inherit keytype from bases, if unspecified and not conflicting
-            if self._base_keytypes and not attrs.has_key("keytype"):
-                keytype = self._base_keytypes[0]
-                for kt in self._base_keytypes[1:]:
-                    if kt is not keytype:
-                        self.error("base schemas have conflicting keytypes,"
-                                   " but no keytype was specified in the"
-                                   " extending schema")
-
-            # Inherit datatype from bases, if unspecified and not conflicting
-            if self._base_datatypes and not attrs.has_key("datatype"):
-                datatype = self._base_datatypes[0]
-                for dt in self._base_datatypes[1:]:
-                    if dt is not datatype:
-                        self.error("base schemas have conflicting datatypes,"
-                                   " but no datatype was specified in the"
-                                   " extending schema")
-
-        # Reset the schema types to our own, while we parse the schema body
-        self._schema.keytype = keytype
-        self._schema.valuetype = valuetype
-        self._schema.datatype = datatype
-
-        # Update base key/datatypes for the "extending" parser
-        if self._extending_parser is not None:
-            self._extending_parser._base_keytypes.append(keytype)
-            self._extending_parser._base_datatypes.append(datatype)
-
-
-    def extendSchema(self,src):
-        parser = SchemaParser(self._loader, src, self)
-        r = self._loader.openResource(src)
-        try:
-            xml.sax.parse(r.file, parser)
-        finally:
-            r.close()
-
-    def end_schema(self):
-        del self._stack[-1]
-        assert not self._stack
-        self.pop_prefix()
-        assert not self._prefixes
-
-
-class ComponentParser(BaseParser):
-
-    _handled_tags = BaseParser._handled_tags + ("component",)
-    _top_level = "component"
-
-    def __init__(self, loader, url, schema):
-        BaseParser.__init__(self, loader, url)
-        self._parent = schema
-
-    def characters_description(self, data):
-        if self._stack:
-            self._stack[-1].description = data
-
-    def start_key(self, attrs):
-        self._check_not_toplevel("key")
-        BaseParser.start_key(self, attrs)
-
-    def start_multikey(self, attrs):
-        self._check_not_toplevel("multikey")
-        BaseParser.start_multikey(self, attrs)
-
-    def start_section(self, attrs):
-        self._check_not_toplevel("section")
-        BaseParser.start_section(self, attrs)
-
-    def start_multisection(self, attrs):
-        self._check_not_toplevel("multisection")
-        BaseParser.start_multisection(self, attrs)
-
-    def start_component(self, attrs):
-        self._schema = self._parent
-        self.push_prefix(attrs)
-
-    def end_component(self):
-        self.pop_prefix()
-
-    def _check_not_toplevel(self, what):
-        if not self._stack:
-            self.error("cannot define top-level %s in a schema %s"
-                       % (what, self._top_level))
diff --git a/branches/bug1734/src/ZConfig/scripts/zconfig b/branches/bug1734/src/ZConfig/scripts/zconfig
deleted file mode 100755
index 29c79340..00000000
--- a/branches/bug1734/src/ZConfig/scripts/zconfig
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""zconfig: Script to check validity of a configuration file.
-
-Usage:
-
-    zconfig [options] [file...]
-
-Options:
-
-    -h
-    --help          Print this help text.
-
-    -s file
-    --schema file   Use the schema in 'file' to validate the configuration;
-                    this must be specified.
-
-Each file named on the command line is checked for syntactical errors
-and schema conformance.  The schema must be specified.  If no files
-are specified and standard input is not a TTY, standard in is treated
-as a configuration file.  Specifying a schema and no configuration
-files causes the schema to be checked.
-
-"""
-
-import optparse
-import sys
-
-import ZConfig
-
-
-def main():
-    optparser = optparse.OptionParser(
-        usage="usage: %prog [-s FILE] [file ...]")
-    optparser.add_option(
-        "-s", "--schema", dest="schema",
-        help="use the schema in FILE (can be a URL)",
-        metavar="FILE")
-    options, args = optparser.parse_args()
-
-    if not options.schema:
-        print >>sys.stderr, "No schema specified, but is required."
-        usage(sys.stderr)
-        return 2
-    schema = ZConfig.loadSchema(options.schema)
-
-    if not args:
-        if sys.stdin.isatty():
-            # just checking the schema
-            return 0
-        else:
-            # stdin is a pipe
-            args = ["-"]
-
-    errors = 0
-    for fn in args:
-        try:
-            if fn == "-":
-                ZConfig.loadConfigFile(schema, sys.stdin)
-            else:
-                ZConfig.loadConfig(schema, fn)
-        except ZConfig.ConfigurationError, e:
-            print >>sys.stderr, str(e)
-            errors += 1
-
-    if errors:
-        return 1
-    else:
-        return 0
-
-
-def usage(fp):
-    print >>fp, __doc__
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/branches/bug1734/src/ZConfig/scripts/zconfig_schema2html b/branches/bug1734/src/ZConfig/scripts/zconfig_schema2html
deleted file mode 100755
index c2f9d713..00000000
--- a/branches/bug1734/src/ZConfig/scripts/zconfig_schema2html
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-__version__ = '$Revision: 1.3 $'[11:-2]
-
-import ZConfig.loader
-from ZConfig.info import *
-import sys, cgi
-
-def esc(x): return cgi.escape(str(x))
-def dt(x):
-    tn = type(x).__name__
-    if tn == 'instance':
-        return '%s %s'%(tn, x.__class__.__module__ + '.' + x.__class__.__name__)
-    elif tn == 'class':
-        return '%s %s'%(tn, x.__module__ + '.' + x.__name__)
-    else:
-        return '%s %s'%(tn, x.__name__)
-
-class explain:
-    done = []
-    def __call__(self, st):
-        if st.name in self.done:
-            return
-        self.done.append(st.name)
-
-        if st.description:
-            print st.description
-        for sub in st.getsubtypenames():
-            print '<dl>'
-            printContents(None, st.getsubtype(sub))
-            print '</dl>'
-explain = explain()
-
-def printSchema(schema):
-    print '<dl>'
-    for child in schema:
-        printContents(*child)
-    print '</dl>'
-
-def printContents(name, info):
-    if isinstance(info, SectionType):
-        print '<dt><b><i>', info.name, '</i></b> (%s)</dt>'%dt(info.datatype)
-        print '<dd>'
-        if info.description:
-            print info.description
-        print '<dl>'
-        for sub in info:
-            printContents(*sub)
-        print '</dl></dd>'
-    elif isinstance(info, SectionInfo):
-        st = info.sectiontype
-        if st.isabstract():
-            print '<dt><b><i>', st.name, '</i>', info.name, '</b></dt>'
-            print '<dd>'
-            if info.description:
-                print info.description
-            explain(st)
-            print '</dd>'
-        else:
-            print '<dt><b>', info.attribute, info.name, '</b>'
-            print '(%s)</dt>'%dt(info.datatype)
-            print '<dd><dl>'
-            for sub in info.sectiontype:
-                printContents(*sub)
-            print '</dl></dd>'
-    else:
-        print '<dt><b>',info.name, '</b>', '(%s)'%dt(info.datatype)
-        default = info.getdefault()
-        if isinstance(default, ValueInfo):
-            print '(default: %r)'%esc(default.value)
-        elif default is not None:
-            print '(default: %r)'%esc(default)
-        if info.metadefault:
-            print '(metadefault: %s)' % info.metadefault
-        print '</dt>'
-        if info.description:
-            print '<dd>',info.description,'</dd>'
-
-schema = ZConfig.loader.loadSchemaFile(sys.argv[1])
-
-print '''<html><body>
-<style>
-dl {margin: 0 0 1em 0;}
-</style>
-'''
-printSchema(schema)
-print '</body></html>'
-
-# vim: set filetype=python ts=4 sw=4 et si
-
diff --git a/branches/bug1734/src/ZConfig/substitution.py b/branches/bug1734/src/ZConfig/substitution.py
deleted file mode 100644
index 5b86c320..00000000
--- a/branches/bug1734/src/ZConfig/substitution.py
+++ /dev/null
@@ -1,86 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Substitution support for ZConfig values."""
-
-import ZConfig
-
-
-def substitute(s, mapping):
-    """Interpolate variables from `mapping` into `s`."""
-    if "$" in s:
-        result = ''
-        rest = s
-        while rest:
-            p, name, namecase, rest = _split(rest)
-            result += p
-            if name:
-                v = mapping.get(name)
-                if v is None:
-                    raise ZConfig.SubstitutionReplacementError(s, namecase)
-                result += v
-        return result
-    else:
-        return s
-
-
-def isname(s):
-    """Return True iff s is a valid substitution name."""
-    m = _name_match(s)
-    if m:
-        return m.group() == s
-    else:
-        return False
-
-
-def _split(s):
-    # Return a four tuple:  prefix, name, namecase, suffix
-    # - prefix is text that can be used literally in the result (may be '')
-    # - name is a referenced name, or None
-    # - namecase is the name with case preserved
-    # - suffix is trailling text that may contain additional references
-    #   (may be '' or None)
-    if "$" in s:
-        i = s.find("$")
-        c = s[i+1:i+2]
-        if c == "":
-            raise ZConfig.SubstitutionSyntaxError(
-                "illegal lone '$' at end of source")
-        if c == "$":
-            return s[:i+1], None, None, s[i+2:]
-        prefix = s[:i]
-        if c == "{":
-            m = _name_match(s, i + 2)
-            if not m:
-                raise ZConfig.SubstitutionSyntaxError(
-                    "'${' not followed by name")
-            name = m.group(0)
-            i = m.end() + 1
-            if not s.startswith("}", i - 1):
-                raise ZConfig.SubstitutionSyntaxError(
-                    "'${%s' not followed by '}'" % name)
-        else:
-            m = _name_match(s, i+1)
-            if not m:
-                raise ZConfig.SubstitutionSyntaxError(
-                    "'$' not followed by '$' or name")
-            name = m.group(0)
-            i = m.end()
-        return prefix, name.lower(), name, s[i:]
-    else:
-        return s, None, None, None
-
-
-import re
-_name_match = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*").match
-del re
diff --git a/branches/bug1734/src/ZConfig/tests/__init__.py b/branches/bug1734/src/ZConfig/tests/__init__.py
deleted file mode 100644
index db362ef8..00000000
--- a/branches/bug1734/src/ZConfig/tests/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for the configuration data structures and loader.
-
-$Id: __init__.py,v 1.2 2003/01/03 21:05:56 fdrake Exp $
-"""
diff --git a/branches/bug1734/src/ZConfig/tests/input/base-datatype1.xml b/branches/bug1734/src/ZConfig/tests/input/base-datatype1.xml
deleted file mode 100644
index d0659396..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/base-datatype1.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<schema datatype="ZConfig.tests.test_schema.MySection">
-  <sectiontype name="type-1"/>
-</schema>
-
diff --git a/branches/bug1734/src/ZConfig/tests/input/base-datatype2.xml b/branches/bug1734/src/ZConfig/tests/input/base-datatype2.xml
deleted file mode 100644
index 076b0bc8..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/base-datatype2.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<schema datatype="ZConfig.tests.test_schema.appsection">
-  <sectiontype name="type-2"/>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/base-keytype1.xml b/branches/bug1734/src/ZConfig/tests/input/base-keytype1.xml
deleted file mode 100644
index 11b89cdb..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/base-keytype1.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<schema keytype="basic-key">
-  <sectiontype name="type-1"/>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/base-keytype2.xml b/branches/bug1734/src/ZConfig/tests/input/base-keytype2.xml
deleted file mode 100644
index c5953422..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/base-keytype2.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<schema keytype="ZConfig.tests.test_schema.uppercase">
-  <sectiontype name="type-2"/>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/base.xml b/branches/bug1734/src/ZConfig/tests/input/base.xml
deleted file mode 100644
index 46824eed..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/base.xml
+++ /dev/null
@@ -1,3 +0,0 @@
-<schema>
-  <sectiontype name="type-X"/>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/include.conf b/branches/bug1734/src/ZConfig/tests/input/include.conf
deleted file mode 100644
index 52d5b56d..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/include.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-var2 value2
-%include simple.conf
-var3 value3
-var4 $name
diff --git a/branches/bug1734/src/ZConfig/tests/input/inner.conf b/branches/bug1734/src/ZConfig/tests/input/inner.conf
deleted file mode 100644
index d3b9f978..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/inner.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-refouter $outervar
-%define innervar inner
diff --git a/branches/bug1734/src/ZConfig/tests/input/library.xml b/branches/bug1734/src/ZConfig/tests/input/library.xml
deleted file mode 100644
index 0bc4507d..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/library.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<schema>
-  <description>
-    Sample library of reusable data types.
-  </description>
-  <sectiontype name="type-a"/>
-  <sectiontype name="type-b"/>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/logger.xml b/branches/bug1734/src/ZConfig/tests/input/logger.xml
deleted file mode 100644
index 46e296aa..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/logger.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<schema>
-  <sectiontype name="loghandler">
-    <key name="constructor" datatype="constructor" required="yes"/>
-    <key name="formatter" datatype="constructor"
-         default="logging.Formatter()"/>
-  </sectiontype>
-
-  <sectiontype name="logger">
-     <key name="level" datatype="integer" default="info"/>
-     <multisection type="loghandler" attribute="handlers" name="*"/>
-  </sectiontype>
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/outer.conf b/branches/bug1734/src/ZConfig/tests/input/outer.conf
deleted file mode 100644
index 7d857460..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/outer.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-%define outervar outer
-%include inner.conf
-refinner $innervar
diff --git a/branches/bug1734/src/ZConfig/tests/input/simple.conf b/branches/bug1734/src/ZConfig/tests/input/simple.conf
deleted file mode 100644
index 6cec15e9..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/simple.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-empty
-
-var1 abc
-int-var  12
-float-var	12.02
-neg-int -2
-
-true-var-1 true
-true-var-2 on
-true-var-3 yes
-
-false-var-1 false
-false-var-2 off
-false-var-3 no
-
-list-1
-list-2 abc
-list-3 abc def ghi
-list-4 	[  what	now? 	]
-
-# These test the %define mechanism:
-
-%define dollars $$$$
-%define empty
-%define name value
-%define twowords two words
-
-getname $name
-getnametwice $name${name}
-getdollars $dollars
-getempty x${empty}y
-getwords abc $twowords def
diff --git a/branches/bug1734/src/ZConfig/tests/input/simple.xml b/branches/bug1734/src/ZConfig/tests/input/simple.xml
deleted file mode 100644
index c0703f49..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/simple.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<schema>
-  <key name="empty"        />
-  <key name="var1"         />
-  <key name="getname"      />
-  <key name="getnametwice" />
-  <key name="getdollars"   />
-  <key name="getempty"     />
-  <key name="getwords"     />
-
-  <key name="int-var"     datatype="integer" />
-  <key name="float-var"   datatype="float"   />
-  <key name="neg-int"     datatype="integer" />
-  <key name="true-var-1"  datatype="boolean" />
-  <key name="true-var-2"  datatype="boolean" />
-  <key name="true-var-3"  datatype="boolean" />
-  <key name="false-var-1" datatype="boolean" />
-  <key name="false-var-2" datatype="boolean" />
-  <key name="false-var-3" datatype="boolean" />
-
-  <key name="list-1" datatype="string-list" />
-  <key name="list-2" datatype="string-list" />
-  <key name="list-3" datatype="string-list" />
-  <key name="list-4" datatype="string-list" />
-
-  <!-- added by include.conf -->
-  <key name="var2" />
-  <key name="var3" />
-  <key name="var4" />
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/input/simplesections.conf b/branches/bug1734/src/ZConfig/tests/input/simplesections.conf
deleted file mode 100644
index d00023f7..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/simplesections.conf
+++ /dev/null
@@ -1,40 +0,0 @@
-var foo
-var-0 foo-0
-
-<section name>
-  var bar
-  var-one  splat
-</section>
-
-var-1 foo-1
-
-<section delegate>
-  var spam
-  var-two  stuff
-</section>
-
-var-2 foo-2
-
-<section another>
-  var  quack!
-  var-three  yet
-</section>
-
-var-3 foo-3
-
-# An anonymous empty section:
-<section />
-
-var-4 foo-4
-
-# A fairly trivial section:
-<trivial>
-  var triv
-</trivial>
-
-var-5 foo-5
-
-# A minimal section:
-<minimal/>
-
-var-6 foo-6
diff --git a/branches/bug1734/src/ZConfig/tests/input/simplesections.xml b/branches/bug1734/src/ZConfig/tests/input/simplesections.xml
deleted file mode 100644
index 2638539c..00000000
--- a/branches/bug1734/src/ZConfig/tests/input/simplesections.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-<schema>
-  <sectiontype name="section">
-    <key name="var" />
-    <key name="var-one" />
-    <key name="var-two" />
-    <key name="var-three" />
-  </sectiontype>
-  <sectiontype name="minimal" />
-  <sectiontype name="trivial">
-    <key name="var" />
-  </sectiontype>
-
-  <multisection type="section" name="*" attribute="sections" />
-  <section type="minimal" name="*" attribute="minimal" />
-  <section type="trivial" name="*" attribute="trivial" />
-
-  <key name="var" />
-  <key name="var-0" />
-  <key name="var-1" />
-  <key name="var-2" />
-  <key name="var-3" />
-  <key name="var-4" />
-  <key name="var-5" />
-  <key name="var-6" />
-</schema>
diff --git a/branches/bug1734/src/ZConfig/tests/library/README.txt b/branches/bug1734/src/ZConfig/tests/library/README.txt
deleted file mode 100644
index bec9c57f..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/README.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-This is a sample library of configuration schema components.  This is
-used for testing.
diff --git a/branches/bug1734/src/ZConfig/tests/library/__init__.py b/branches/bug1734/src/ZConfig/tests/library/__init__.py
deleted file mode 100644
index 91b1aa49..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Make this a package.
diff --git a/branches/bug1734/src/ZConfig/tests/library/thing/__init__.py b/branches/bug1734/src/ZConfig/tests/library/thing/__init__.py
deleted file mode 100644
index 0a93d1ce..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/thing/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Example of a package that extends its __path__.
-
-$Id: __init__.py,v 1.2 2003/10/03 17:11:33 fdrake Exp $
-"""
-
-import os
-
-here = os.path.dirname(__file__)
-__path__.append(os.path.join(here, "extras"))
diff --git a/branches/bug1734/src/ZConfig/tests/library/thing/component.xml b/branches/bug1734/src/ZConfig/tests/library/thing/component.xml
deleted file mode 100644
index e130e757..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/thing/component.xml
+++ /dev/null
@@ -1,10 +0,0 @@
-<?xml version="1.0" encoding="us-ascii"?>
-<component>
-  <abstracttype name='thing'/>
-  <sectiontype name='thing-a' implements='thing'>
-    <key name='thing-a-key' default='thing-a-default'/>
-  </sectiontype>
-  <sectiontype name='thing-b' implements='thing'>
-    <key name='thing-b-key' default='thing-b-default'/>
-  </sectiontype>
-</component>
diff --git a/branches/bug1734/src/ZConfig/tests/library/thing/extras/extras.xml b/branches/bug1734/src/ZConfig/tests/library/thing/extras/extras.xml
deleted file mode 100644
index 001b4649..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/thing/extras/extras.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<component>
-  <sectiontype name='extra-thing'>
-    <key name='some-key'/>
-  </sectiontype>
-</component>
diff --git a/branches/bug1734/src/ZConfig/tests/library/widget/__init__.py b/branches/bug1734/src/ZConfig/tests/library/widget/__init__.py
deleted file mode 100644
index 91b1aa49..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/widget/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Make this a package.
diff --git a/branches/bug1734/src/ZConfig/tests/library/widget/component.xml b/branches/bug1734/src/ZConfig/tests/library/widget/component.xml
deleted file mode 100644
index d74706c8..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/widget/component.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<component>
-  <abstracttype name='widget'/>
-  <sectiontype name='widget-a' implements='widget'>
-    <key name='widget-a-key' default='widget-a-default'/>
-  </sectiontype>
-  <sectiontype name='widget-b' implements='widget' extends='widget-a'/>
-</component>
diff --git a/branches/bug1734/src/ZConfig/tests/library/widget/extra.xml b/branches/bug1734/src/ZConfig/tests/library/widget/extra.xml
deleted file mode 100644
index 5a2fe3f4..00000000
--- a/branches/bug1734/src/ZConfig/tests/library/widget/extra.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<component>
-  <sectiontype name='extra-type'>
-    <key name='some-key'/>
-  </sectiontype>
-</component>
diff --git a/branches/bug1734/src/ZConfig/tests/runtests.bat b/branches/bug1734/src/ZConfig/tests/runtests.bat
deleted file mode 100755
index 98979b67..00000000
--- a/branches/bug1734/src/ZConfig/tests/runtests.bat
+++ /dev/null
@@ -1,12 +0,0 @@
-@echo off
-
-rem  Simple script to run the tests on Windows.
-
-rem  The paths to different versions of Python need to be
-rem  edited for the system this is being run on; comment
-rem  out lines that aren't needed or wanted.
-
-\Python213\python runtests.py
-\Python221\python runtests.py
-\Python222\python runtests.py
-\Python230\python runtests.py
diff --git a/branches/bug1734/src/ZConfig/tests/runtests.py b/branches/bug1734/src/ZConfig/tests/runtests.py
deleted file mode 100755
index 31d3bf83..00000000
--- a/branches/bug1734/src/ZConfig/tests/runtests.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#! /usr/bin/env python
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Script to run all the regression tests for the ZConfig package."""
-
-import os
-import sys
-import unittest
-
-if __name__ == "__main__":
-    __file__ = sys.argv[0]
-
-TESTDIR = os.path.dirname(os.path.abspath(__file__))
-
-PKGDIR = os.path.dirname(TESTDIR) # the ZConfig package directory
-TOPDIR = os.path.dirname(PKGDIR)
-
-COMPONENTS = os.path.join(PKGDIR, "components")
-
-TESTDIRS = {
-    "ZConfig.tests": TESTDIR,
-    "ZConfig.components.basic.tests": os.path.join(COMPONENTS,
-                                                   "basic", "tests"),
-    "ZConfig.components.logger.tests": os.path.join(COMPONENTS,
-                                                    "logger", "tests"),
-    }
-
-
-def load_tests(pkgname, name):
-    name = "%s.%s" % (pkgname, name)
-    __import__(name)
-    mod = sys.modules[name]
-    return mod.test_suite()
-
-def test_suite():
-    L = []
-    for pkgname, path in TESTDIRS.items():
-        for fn in os.listdir(path):
-            name, ext = os.path.splitext(fn)
-            if name[:4] == "test" and ext == ".py":
-                L.append(load_tests(pkgname, name))
-    if len(L) == 1:
-        return L[0]
-    else:
-        suite = unittest.TestSuite()
-        for t in L:
-            suite.addTest(t)
-        return suite
-
-if __name__ == "__main__":
-    if TOPDIR not in sys.path:
-        sys.path.insert(0, TOPDIR)
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZConfig/tests/support.py b/branches/bug1734/src/ZConfig/tests/support.py
deleted file mode 100644
index 155f9047..00000000
--- a/branches/bug1734/src/ZConfig/tests/support.py
+++ /dev/null
@@ -1,72 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Support code shared among the tests."""
-
-import os
-import StringIO
-import unittest
-import urllib
-
-import ZConfig
-
-from ZConfig.loader import ConfigLoader
-from ZConfig.url import urljoin
-
-
-try:
-    __file__
-except NameError:
-    import sys
-    __file__ = sys.argv[0]
-
-d = os.path.abspath(os.path.join(os.path.dirname(__file__), "input"))
-CONFIG_BASE = "file://%s/" % urllib.pathname2url(d)
-
-
-class TestBase(unittest.TestCase):
-    """Utility methods which can be used with the schema support."""
-
-    def load_both(self, schema_url, conf_url):
-        schema = self.load_schema(schema_url)
-        conf = self.load_config(schema, conf_url)
-        return schema, conf
-
-    def load_schema(self, relurl):
-        self.url = urljoin(CONFIG_BASE, relurl)
-        self.schema = ZConfig.loadSchema(self.url)
-        self.assert_(self.schema.issection())
-        return self.schema
-
-    def load_schema_text(self, text, url=None):
-        sio = StringIO.StringIO(text)
-        self.schema = ZConfig.loadSchemaFile(sio, url)
-        return self.schema
-
-    def load_config(self, schema, conf_url, num_handlers=0):
-        conf_url = urljoin(CONFIG_BASE, conf_url)
-        loader = self.create_config_loader(schema)
-        self.conf, self.handlers = loader.loadURL(conf_url)
-        self.assertEqual(len(self.handlers), num_handlers)
-        return self.conf
-
-    def load_config_text(self, schema, text, num_handlers=0, url=None):
-        sio = StringIO.StringIO(text)
-        loader = self.create_config_loader(schema)
-        self.conf, self.handlers = loader.loadFile(sio, url)
-        self.assertEqual(len(self.handlers), num_handlers)
-        return self.conf
-
-    def create_config_loader(self, schema):
-        return ConfigLoader(schema)
diff --git a/branches/bug1734/src/ZConfig/tests/test_cfgimports.py b/branches/bug1734/src/ZConfig/tests/test_cfgimports.py
deleted file mode 100644
index a3c58829..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_cfgimports.py
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the %import mechanism.
-
-$Id: test_cfgimports.py,v 1.1 2003/10/03 20:01:57 fdrake Exp $
-"""
-
-import unittest
-
-from StringIO import StringIO
-
-import ZConfig
-import ZConfig.tests.support
-
-
-class TestImportFromConfiguration(ZConfig.tests.support.TestBase):
-
-    def test_simple_import(self):
-        schema = self.load_schema_text("<schema/>")
-        loader = self.create_config_loader(schema)
-        config, _ = loader.loadFile(
-            StringIO("%import ZConfig.tests.library.widget\n"))
-        # make sure we now have a "private" schema object; the only
-        # way to get it is from the loader itself
-        self.assert_(schema is not loader.schema)
-        # make sure component types are only found on the private schema:
-        loader.schema.gettype("widget-b")
-        self.assertRaises(ZConfig.SchemaError, schema.gettype, "widget-b")
-
-    def test_repeated_import(self):
-        schema = self.load_schema_text("<schema/>")
-        loader = self.create_config_loader(schema)
-        config, _ = loader.loadFile(
-            StringIO("%import ZConfig.tests.library.widget\n"
-                     "%import ZConfig.tests.library.widget\n"))
-
-    def test_missing_import(self):
-        schema = self.load_schema_text("<schema/>")
-        loader = self.create_config_loader(schema)
-        self.assertRaises(ZConfig.SchemaError, loader.loadFile,
-                          StringIO("%import ZConfig.tests.missing\n"))
-
-
-def test_suite():
-    return unittest.makeSuite(TestImportFromConfiguration)
diff --git a/branches/bug1734/src/ZConfig/tests/test_cmdline.py b/branches/bug1734/src/ZConfig/tests/test_cmdline.py
deleted file mode 100644
index 0b1b2774..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_cmdline.py
+++ /dev/null
@@ -1,180 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Tests of the command-line integration."""
-
-import unittest
-
-import ZConfig
-
-from ZConfig.cmdline import ExtendedConfigLoader
-from ZConfig.tests.support import TestBase
-
-
-class CommandLineTest(TestBase):
-
-    def create_config_loader(self, schema):
-        loader = ExtendedConfigLoader(schema)
-        for item in self.clopts:
-            loader.addOption(*item)
-        return loader
-
-    def test_loading(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='st'>
-                <key name='innerkey'/>
-              </sectiontype>
-              <key name='mykey'/>
-              <section name='*' type='st' attribute='sect'/>
-            </schema>
-            """)
-        self.clopts = [("mykey=splat!", None),
-                       ("section/innerkey=spoogey", None)]
-        bag = self.create_config_loader(schema).cook()
-        # Test a variety of queries on the OptionBag:
-        self.assert_(bag.has_key("mykey"))
-        self.assert_(not bag.has_key("another"))
-        self.assertEqual(bag.get_section_info("st", None), None)
-        self.assertEqual(bag.get_section_info("st", "missing-sect"), None)
-        # Consume everything in the OptionBag:
-        L = bag.get_key("mykey")
-        s, pos = L[0]
-        self.assertEqual(len(L), 1)
-        self.assertEqual(s, "splat!")
-        bag2 = bag.get_section_info("st", "section")
-        self.assert_(bag2.has_key("innerkey"))
-        self.assert_(not bag2.has_key("another"))
-        L = bag2.get_key("innerkey")
-        s, pos = L[0]
-        self.assertEqual(len(L), 1)
-        self.assertEqual(s, "spoogey")
-        # "Finish" to make sure everything has been consumed:
-        bag2.finish()
-        bag.finish()
-
-    def test_named_sections(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <abstracttype name='at'/>
-              <sectiontype name='st1' implements='at'>
-                <key name='k1'/>
-              </sectiontype>
-              <sectiontype name='st2' implements='at'>
-                <key name='k2'/>
-              </sectiontype>
-              <section name='foo' type='at'/>
-              <section name='bar' type='st2'/>
-            </schema>
-            """)
-        self.clopts = [("foo/k1=v1", None), ("bar/k2=v2", ("someurl", 2, 3))]
-        bag = self.create_config_loader(schema).cook()
-        foo = bag.get_section_info("st2", "foo")
-        bar = bag.get_section_info("st2", "bar")
-        bag.finish()
-        self.assertEqual(bar.get_key("k2"), [("v2", ("someurl", 2, 3))])
-        bar.finish()
-        # Ignore foo for now; it's not really important *when* it fails.
-
-    simple_schema = None
-
-    def get_simple_schema(self):
-        if self.simple_schema is None:
-            self.__class__.simple_schema = self.load_schema_text("""\
-                <schema>
-                  <key name='k0'/>
-                  <key name='k1'/>
-                  <key name='k2' datatype='integer'/>
-                  <key name='k3' datatype='integer' default='19'/>
-                </schema>
-                """)
-        return self.simple_schema
-
-    def test_reading_config(self):
-        self.clopts = [("k1=stringvalue", None), ("k2=12", None)]
-        schema = self.get_simple_schema()
-        conf = self.load_config_text(schema, """\
-            k0 stuff
-            k1 replaced-stuff
-            k2 42
-            """)
-        self.assertEqual(conf.k0, "stuff")
-        self.assertEqual(conf.k1, "stringvalue")
-        self.assertEqual(conf.k2, 12)
-        self.assertEqual(conf.k3, 19)
-
-    def test_unknown_key(self):
-        self.clopts = [("foo=bar", None)]
-        schema = self.get_simple_schema()
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_too_many_keys(self):
-        self.clopts = [("k1=v1", None), ("k1=v2", None)]
-        schema = self.get_simple_schema()
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_bad_datatype(self):
-        self.clopts = [("k2=42.0", None)]
-        schema = self.get_simple_schema()
-        self.assertRaises(ZConfig.DataConversionError,
-                          self.load_config_text, schema, "")
-
-    def test_without_clopts(self):
-        self.clopts = []
-        schema = self.get_simple_schema()
-        conf = self.load_config_text(schema, "k3 42")
-        self.assertEqual(conf.k0, None)
-        self.assertEqual(conf.k1, None)
-        self.assertEqual(conf.k2, None)
-        self.assertEqual(conf.k3, 42)
-
-    def test_section_contents(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='st'>
-                <key name='k1'/>
-                <key name='k2' default='3' datatype='integer'/>
-                <multikey name='k3'>
-                  <default>k3-v1</default>
-                  <default>k3-v2</default>
-                  <default>k3-v3</default>
-                </multikey>
-              </sectiontype>
-              <section name='s1' type='st'/>
-              <section name='s2' type='st'/>
-            </schema>
-            """)
-        self.clopts = [("s1/k1=foo", None),
-                       ("s2/k3=value1", None),
-                       ("s2/k3=value2", None),
-                       ("s1/k2=99", None),
-                       ("s2/k3=value3", None),
-                       ("s2/k3=value4", None),
-                       ]
-        conf = self.load_config_text(schema, "<st s1/>\n<st s2/>")
-        self.assertEqual(conf.s1.k1, "foo")
-        self.assertEqual(conf.s1.k2, 99)
-        self.assertEqual(conf.s1.k3, ["k3-v1", "k3-v2", "k3-v3"])
-        self.assertEqual(conf.s2.k1, None)
-        self.assertEqual(conf.s2.k2, 3)
-        self.assertEqual(conf.s2.k3, ["value1", "value2", "value3", "value4"])
-
-
-def test_suite():
-    return unittest.makeSuite(CommandLineTest)
-
-if __name__ == "__main__":
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZConfig/tests/test_config.py b/branches/bug1734/src/ZConfig/tests/test_config.py
deleted file mode 100644
index db922938..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_config.py
+++ /dev/null
@@ -1,182 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the configuration data structures and loader."""
-
-import os
-import StringIO
-import tempfile
-import unittest
-
-import ZConfig
-
-from ZConfig.tests.support import CONFIG_BASE
-
-
-class ConfigurationTestCase(unittest.TestCase):
-
-    schema = None
-
-    def get_schema(self):
-        if self.schema is None:
-            ConfigurationTestCase.schema = ZConfig.loadSchema(
-                CONFIG_BASE + "simple.xml")
-        return self.schema
-
-    def load(self, relurl, context=None):
-        url = CONFIG_BASE + relurl
-        self.conf, self.handlers = ZConfig.loadConfig(self.get_schema(), url)
-        conf = self.conf
-        #self.assertEqual(conf.url, url)
-        self.assert_(conf.getSectionName() is None)
-        self.assert_(conf.getSectionType() is None)
-        #self.assert_(conf.delegate is None)
-        return conf
-
-    def loadtext(self, text):
-        sio = StringIO.StringIO(text)
-        return self.loadfile(sio)
-
-    def loadfile(self, file):
-        schema = self.get_schema()
-        self.conf, self.handlers = ZConfig.loadConfigFile(schema, file)
-        return self.conf
-
-    def check_simple_gets(self, conf):
-        self.assertEqual(conf.empty, '')
-        self.assertEqual(conf.int_var, 12)
-        self.assertEqual(conf.neg_int, -2)
-        self.assertEqual(conf.float_var, 12.02)
-        self.assertEqual(conf.var1, 'abc')
-        self.assert_(conf.true_var_1)
-        self.assert_(conf.true_var_2)
-        self.assert_(conf.true_var_3)
-        self.assert_(not conf.false_var_1)
-        self.assert_(not conf.false_var_2)
-        self.assert_(not conf.false_var_3)
-        self.assertEqual(conf.list_1, [])
-        self.assertEqual(conf.list_2, ['abc'])
-        self.assertEqual(conf.list_3, ['abc', 'def', 'ghi'])
-        self.assertEqual(conf.list_4, ['[', 'what', 'now?', ']'])
-
-    def test_simple_gets(self):
-        conf = self.load("simple.conf")
-        self.check_simple_gets(conf)
-
-    def test_type_errors(self):
-        Error = ZConfig.DataConversionError
-        raises = self.assertRaises
-        raises(Error, self.loadtext, "int-var true")
-        raises(Error, self.loadtext, "float-var true")
-        raises(Error, self.loadtext, "neg-int false")
-        raises(Error, self.loadtext, "true-var-1 0")
-        raises(Error, self.loadtext, "true-var-1 1")
-        raises(Error, self.loadtext, "true-var-1 -1")
-
-    def test_simple_sections(self):
-        self.schema = ZConfig.loadSchema(CONFIG_BASE + "simplesections.xml")
-        conf = self.load("simplesections.conf")
-        self.assertEqual(conf.var, "foo")
-        # check each interleaved position between sections
-        for c in "0123456":
-            self.assertEqual(getattr(conf, "var_" +c), "foo-" + c)
-        sect = [sect for sect in conf.sections
-                if sect.getSectionName() == "name"][0]
-        self.assertEqual(sect.var, "bar")
-        self.assertEqual(sect.var_one, "splat")
-        self.assert_(sect.var_three is None)
-        sect = [sect for sect in conf.sections
-                if sect.getSectionName() == "delegate"][0]
-        self.assertEqual(sect.var, "spam")
-        self.assertEqual(sect.var_two, "stuff")
-        self.assert_(sect.var_three is None)
-
-    def test_include(self):
-        conf = self.load("include.conf")
-        self.assertEqual(conf.var1, "abc")
-        self.assertEqual(conf.var2, "value2")
-        self.assertEqual(conf.var3, "value3")
-        self.assertEqual(conf.var4, "value")
-
-    def test_includes_with_defines(self):
-        self.schema = ZConfig.loadSchemaFile(StringIO.StringIO("""\
-            <schema>
-              <key name='refinner' />
-              <key name='refouter' />
-            </schema>
-            """))
-        conf = self.load("outer.conf")
-        self.assertEqual(conf.refinner, "inner")
-        self.assertEqual(conf.refouter, "outer")
-
-    def test_define(self):
-        conf = self.load("simple.conf")
-        self.assertEqual(conf.getname, "value")
-        self.assertEqual(conf.getnametwice, "valuevalue")
-        self.assertEqual(conf.getdollars, "$$")
-        self.assertEqual(conf.getempty, "xy")
-        self.assertEqual(conf.getwords, "abc two words def")
-
-    def test_define_errors(self):
-        self.assertRaises(ZConfig.ConfigurationSyntaxError,
-                          self.loadtext, "%define\n")
-        self.assertRaises(ZConfig.ConfigurationSyntaxError,
-                          self.loadtext, "%define abc-def\n")
-        self.assertRaises(ZConfig.ConfigurationSyntaxError,
-                          self.loadtext, "%define a value\n%define a value\n")
-
-    def test_fragment_ident_disallowed(self):
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load, "simplesections.conf#another")
-
-    def test_load_from_fileobj(self):
-        sio = StringIO.StringIO("%define name value\n"
-                                "getname x $name y \n")
-        cf = self.loadfile(sio)
-        self.assertEqual(cf.getname, "x value y")
-
-    def test_load_from_abspath(self):
-        fn = self.write_tempfile()
-        try:
-            self.check_load_from_path(fn)
-        finally:
-            os.unlink(fn)
-
-    def test_load_from_relpath(self):
-        fn = self.write_tempfile()
-        dir, name = os.path.split(fn)
-        pwd = os.getcwd()
-        try:
-            os.chdir(dir)
-            self.check_load_from_path(name)
-        finally:
-            os.chdir(pwd)
-            os.unlink(fn)
-
-    def write_tempfile(self):
-        fn = tempfile.mktemp()
-        fp = open(fn, "w")
-        fp.write("var1 value\n")
-        fp.close()
-        return fn
-
-    def check_load_from_path(self, path):
-        schema = self.get_schema()
-        ZConfig.loadConfig(schema, path)
-
-
-def test_suite():
-    return unittest.makeSuite(ConfigurationTestCase)
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZConfig/tests/test_cookbook.py b/branches/bug1734/src/ZConfig/tests/test_cookbook.py
deleted file mode 100644
index d895fc34..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_cookbook.py
+++ /dev/null
@@ -1,71 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of examples from the online cookbook, so we don't break them
-down the road.  Unless we really mean to.
-
-The ZConfig Cookbook is available online at:
-
-    http://dev.zope.org/Zope3/ZConfig
-
-"""
-
-import unittest
-
-from ZConfig.tests.support import TestBase
-
-
-def basic_key_mapping_password_to_passwd(key):
-    # Lower-case the key since that's what basic-key does:
-    key = key.lower()
-    # Now map password to passwd:
-    if key == "password":
-        key = "passwd"
-    return key
-
-def user_info_conversion(section):
-    return section
-
-
-class CookbookTestCase(TestBase):
-
-    def test_rewriting_key_names(self):
-        schema = self.load_schema_text("""
-            <schema prefix='%s'>
-              <sectiontype name='userinfo' datatype='.user_info_conversion'
-                           keytype='.basic_key_mapping_password_to_passwd'>
-                <key name='userid' datatype='integer'/>
-                <key name='username' datatype='identifier'/>
-                <key name='password'/>
-              </sectiontype>
-              <section type='userinfo' name='*' attribute='userinfo'/>
-            </schema>
-            """ % __name__)
-        config = self.load_config_text(schema, """\
-            <userinfo>
-              USERID 42
-              USERNAME foouser
-              PASSWORD yeah-right
-            </userinfo>
-            """)
-        self.assertEqual(config.userinfo.userid, 42)
-        self.assertEqual(config.userinfo.username, "foouser")
-        self.assertEqual(config.userinfo.passwd, "yeah-right")
-        self.assert_(not hasattr(config.userinfo, "password"))
-
-
-def test_suite():
-    return unittest.makeSuite(CookbookTestCase)
-
-if __name__ == "__main__":
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZConfig/tests/test_datatypes.py b/branches/bug1734/src/ZConfig/tests/test_datatypes.py
deleted file mode 100644
index 55656023..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_datatypes.py
+++ /dev/null
@@ -1,393 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of standard ZConfig datatypes."""
-
-import os
-import sys
-import shutil
-import socket
-import datetime
-import tempfile
-import unittest
-
-import ZConfig.datatypes
-
-try:
-    here = __file__
-except NameError:
-    here = sys.argv[0]
-
-here = os.path.abspath(here)
-
-try:
-    unicode
-except NameError:
-    have_unicode = False
-else:
-    have_unicode = True
-
-
-class DatatypeTestCase(unittest.TestCase):
-    types = ZConfig.datatypes.Registry()
-
-    def test_datatype_basickey(self):
-        convert = self.types.get("basic-key")
-        eq = self.assertEqual
-        raises = self.assertRaises
-
-        eq(convert("abc"), "abc")
-        eq(convert("ABC_DEF.123"), "abc_def.123")
-        eq(convert("Abc-Def-456"), "abc-def-456")
-        eq(convert("Abc.Def"), "abc.def")
-
-        raises(ValueError, convert, "_abc")
-        raises(ValueError, convert, "-abc")
-        raises(ValueError, convert, "123")
-        raises(ValueError, convert, "")
-
-    def test_datatype_boolean(self):
-        convert = self.types.get("boolean")
-        check = self.assert_
-        raises = self.assertRaises
-
-        check(convert("on"))
-        check(convert("true"))
-        check(convert("yes"))
-        check(not convert("off"))
-        check(not convert("false"))
-        check(not convert("no"))
-        raises(ValueError, convert, '0')
-        raises(ValueError, convert, '1')
-        raises(ValueError, convert, '')
-        raises(ValueError, convert, 'junk')
-
-    def test_datatype_float(self):
-        convert = self.types.get("float")
-        eq = self.assertEqual
-        raises = self.assertRaises
-
-        eq(convert("1"), 1.0)
-        self.assert_(type(convert(1)) is type(1.0))
-        eq(convert("1.1"), 1.1)
-        eq(convert("50.50"), 50.50)
-        eq(convert("-50.50"), -50.50)
-        eq(convert(0), 0.0)
-        eq(convert("0"), 0.0)
-        eq(convert("-0"), 0.0)
-        eq(convert("0.0"), 0.0)
-
-        raises(ValueError, convert, "junk")
-        raises(ValueError, convert, "0x234.1.9")
-        raises(ValueError, convert, "0.9-")
-
-        # These are not portable representations; make sure they are
-        # disallowed everywhere for consistency.
-        raises(ValueError, convert, "inf")
-        raises(ValueError, convert, "-inf")
-        raises(ValueError, convert, "nan")
-
-        if have_unicode:
-            raises(ValueError, convert, unicode("inf"))
-            raises(ValueError, convert, unicode("-inf"))
-            raises(ValueError, convert, unicode("nan"))
-
-    def test_datatype_identifier(self):
-        convert = self.types.get("identifier")
-        raises = self.assertRaises
-        self.check_names(convert)
-        self.check_never_namelike(convert)
-        raises(ValueError, convert, ".abc")
-
-    def check_names(self, convert):
-        eq = self.assert_ascii_equal
-        eq(convert, "AbcDef")
-        eq(convert, "a________")
-        eq(convert, "abc_def")
-        eq(convert, "int123")
-        eq(convert, "_abc")
-        eq(convert, "_123")
-        eq(convert, "__dict__")
-
-    def assert_ascii_equal(self, convert, value):
-        v = convert(value)
-        self.assertEqual(v, value)
-        self.assert_(isinstance(v, str))
-        if have_unicode:
-            unicode_value = unicode(value)
-            v = convert(unicode_value)
-            self.assertEqual(v, value)
-            self.assert_(isinstance(v, str))
-
-    def check_never_namelike(self, convert):
-        raises = self.assertRaises
-        raises(ValueError, convert, "2345")
-        raises(ValueError, convert, "23.45")
-        raises(ValueError, convert, ".45")
-        raises(ValueError, convert, "23.")
-        raises(ValueError, convert, "abc.")
-        raises(ValueError, convert, "-abc")
-        raises(ValueError, convert, "-123")
-        raises(ValueError, convert, "abc-")
-        raises(ValueError, convert, "123-")
-        raises(ValueError, convert, "-")
-        raises(ValueError, convert, ".")
-        raises(ValueError, convert, "&%$*()")
-        raises(ValueError, convert, "")
-
-    def test_datatype_dotted_name(self):
-        convert = self.types.get("dotted-name")
-        raises = self.assertRaises
-        self.check_names(convert)
-        self.check_dotted_names(convert)
-        self.check_never_namelike(convert)
-        raises(ValueError, convert, "abc.")
-        raises(ValueError, convert, ".abc.")
-        raises(ValueError, convert, "abc.def.")
-        raises(ValueError, convert, ".abc.def.")
-        raises(ValueError, convert, ".abc.def")
-
-    def test_datatype_dotted_suffix(self):
-        convert = self.types.get("dotted-suffix")
-        eq = self.assert_ascii_equal
-        raises = self.assertRaises
-        self.check_names(convert)
-        self.check_dotted_names(convert)
-        self.check_never_namelike(convert)
-        eq(convert, ".a")
-        eq(convert, ".a.b")
-        eq(convert, ".a.b.c.d.e.f.g.h.i.j.k.l.m.n.o")
-        raises(ValueError, convert, "abc.")
-        raises(ValueError, convert, ".abc.")
-        raises(ValueError, convert, "abc.def.")
-        raises(ValueError, convert, ".abc.def.")
-
-    def check_dotted_names(self, convert):
-        eq = self.assert_ascii_equal
-        eq(convert, "abc.def")
-        eq(convert, "abc.def.ghi")
-        eq(convert, "a.d.g.g.g.g.g.g.g")
-
-    def test_datatype_inet_address(self):
-        convert = self.types.get("inet-address")
-        eq = self.assertEqual
-        defhost = ZConfig.datatypes.DEFAULT_HOST
-        eq(convert("Host.Example.Com:80"), ("host.example.com", 80))
-        eq(convert(":80"),                 (defhost, 80))
-        eq(convert("80"),                  (defhost, 80))
-        eq(convert("host.EXAMPLE.com"),    ("host.example.com", None))
-        self.assertRaises(ValueError, convert, "40 # foo")
-
-    def test_datatype_integer(self):
-        convert = self.types.get("integer")
-        eq = self.assertEqual
-        raises = self.assertRaises
-
-        eq(convert('-100'), -100)
-        eq(convert('-1'), -1)
-        eq(convert('-0'), 0)
-        eq(convert('0'), 0)
-        eq(convert('1'), 1)
-        eq(convert('100'), 100)
-        eq(convert('65535'), 65535)
-        eq(convert('65536'), 65536)
-
-        big = sys.maxint + 1L  # Python 2.1 needs the L suffix here
-        s = str(big)           # s won't have the suffix
-        eq(convert(s), big)
-        eq(convert("-" + s), -big)
-
-        raises(ValueError, convert, 'abc')
-        raises(ValueError, convert, '-0xabc')
-        raises(ValueError, convert, '')
-        raises(ValueError, convert, '123 456')
-        raises(ValueError, convert, '123-')
-
-    def test_datatype_locale(self):
-        convert = self.types.get("locale")
-        # Python supports "C" even when the _locale module is not available
-        self.assertEqual(convert("C"), "C")
-        self.assertRaises(ValueError, convert, "locale-does-not-exist")
-
-    def test_datatype_port(self):
-        convert = self.types.get("port-number")
-        eq = self.assertEqual
-        raises = self.assertRaises
-
-        raises(ValueError, convert, '-1')
-        raises(ValueError, convert, '0')
-        eq(convert('1'), 1)
-        eq(convert('80'), 80)
-        eq(convert('1023'), 1023)
-        eq(convert('1024'), 1024)
-        eq(convert('60000'), 60000)
-        eq(convert('65535'), 0xffff)
-        raises(ValueError, convert, '65536')
-
-    def test_datatype_socket_address(self):
-        convert = self.types.get("socket-address")
-        eq = self.assertEqual
-        AF_INET = socket.AF_INET
-        defhost = ZConfig.datatypes.DEFAULT_HOST
-
-        def check(value, family, address, self=self, convert=convert):
-            a = convert(value)
-            self.assertEqual(a.family, family)
-            self.assertEqual(a.address, address)
-
-        check("Host.Example.Com:80", AF_INET, ("host.example.com", 80))
-        check(":80",                 AF_INET, (defhost, 80))
-        check("80",                  AF_INET, (defhost, 80))
-        check("host.EXAMPLE.com",    AF_INET, ("host.example.com",None))
-        a1 = convert("/tmp/var/@345.4")
-        a2 = convert("/tmp/var/@345.4:80")
-        self.assertEqual(a1.address, "/tmp/var/@345.4")
-        self.assertEqual(a2.address, "/tmp/var/@345.4:80")
-        if hasattr(socket, "AF_UNIX"):
-            self.assertEqual(a1.family, socket.AF_UNIX)
-            self.assertEqual(a2.family, socket.AF_UNIX)
-        else:
-            self.assert_(a1.family is None)
-            self.assert_(a2.family is None)
-
-    def test_ipaddr_or_hostname(self):
-        convert = self.types.get('ipaddr-or-hostname')
-        eq = self.assertEqual
-        raises = self.assertRaises
-        eq(convert('hostname'),          'hostname')
-        eq(convert('hostname.com'),      'hostname.com')
-        eq(convert('www.hostname.com'),  'www.hostname.com')
-        eq(convert('HOSTNAME'),          'hostname')
-        eq(convert('HOSTNAME.COM'),      'hostname.com')
-        eq(convert('WWW.HOSTNAME.COM'),  'www.hostname.com')
-        eq(convert('127.0.0.1'),         '127.0.0.1')
-        raises(ValueError, convert,  '1hostnamewithleadingnumeric')
-        raises(ValueError, convert,  '255.255')
-        raises(ValueError, convert,  '12345678')
-        raises(ValueError, convert,  '999.999.999.999')
-        raises(ValueError, convert,  'a!badhostname')
-
-    def test_existing_directory(self):
-        convert = self.types.get('existing-directory')
-        eq = self.assertEqual
-        raises = self.assertRaises
-        eq(convert('.'), '.')
-        eq(convert(os.path.dirname(here)), os.path.dirname(here))
-        raises(ValueError, convert, tempfile.mktemp())
-
-    def test_existing_file(self):
-        convert = self.types.get('existing-file')
-        eq = self.assertEqual
-        raises = self.assertRaises
-        eq(convert('.'), '.')
-        eq(convert(here), here)
-        raises(ValueError, convert, tempfile.mktemp())
-
-    def test_existing_path(self):
-        convert = self.types.get('existing-path')
-        eq = self.assertEqual
-        raises = self.assertRaises
-        eq(convert('.'), '.')
-        eq(convert(here), here)
-        eq(convert(os.path.dirname(here)), os.path.dirname(here))
-        raises(ValueError, convert, tempfile.mktemp())
-
-    def test_existing_dirpath(self):
-        convert = self.types.get('existing-dirpath')
-        eq = self.assertEqual
-        raises = self.assertRaises
-        eq(convert('.'), '.')
-        eq(convert(here), here)
-        raises(ValueError, convert, '/a/hopefully/nonexistent/path')
-        raises(ValueError, convert, here + '/bogus')
-
-    def test_byte_size(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        convert = self.types.get('byte-size')
-        eq(convert('128'), 128)
-        eq(convert('128KB'), 128*1024)
-        eq(convert('128MB'), 128*1024*1024)
-        eq(convert('128GB'), 128*1024*1024*1024L)
-        raises(ValueError, convert, '128TB')
-        eq(convert('128'), 128)
-        eq(convert('128kb'), 128*1024)
-        eq(convert('128mb'), 128*1024*1024)
-        eq(convert('128gb'), 128*1024*1024*1024L)
-        raises(ValueError, convert, '128tb')
-
-    def test_time_interval(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        convert = self.types.get('time-interval')
-        eq(convert('120'), 120)
-        eq(convert('120S'), 120)
-        eq(convert('120M'), 120*60)
-        eq(convert('120H'), 120*60*60)
-        eq(convert('120D'), 120*60*60*24)
-        raises(ValueError, convert, '120W')
-        eq(convert('120'), 120)
-        eq(convert('120s'), 120)
-        eq(convert('120m'), 120*60)
-        eq(convert('120h'), 120*60*60)
-        eq(convert('120d'), 120*60*60*24)
-        raises(ValueError, convert, '120w')
-
-    def test_timedelta(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        convert = self.types.get('timedelta')
-        eq(convert('4w'), datetime.timedelta(weeks=4))
-        eq(convert('2d'), datetime.timedelta(days=2))
-        eq(convert('7h'), datetime.timedelta(hours=7))
-        eq(convert('12m'), datetime.timedelta(minutes=12))
-        eq(convert('14s'), datetime.timedelta(seconds=14))
-        eq(convert('4w 2d 7h 12m 14s'),
-           datetime.timedelta(2, 14, minutes=12, hours=7, weeks=4))
-
-
-class RegistryTestCase(unittest.TestCase):
-
-    def test_registry_does_not_mask_toplevel_imports(self):
-        old_sys_path = sys.path[:]
-        tmpdir = tempfile.mkdtemp(prefix="test_datatypes_")
-        fn = os.path.join(tmpdir, "datatypes.py")
-        f = open(fn, "w")
-        f.write(TEST_DATATYPE_SOURCE)
-        f.close()
-        registry = ZConfig.datatypes.Registry()
-
-        # we really want the temp area to override everything else:
-        sys.path.insert(0, tmpdir)
-        try:
-            datatype = registry.get("datatypes.my_sample_datatype")
-        finally:
-            shutil.rmtree(tmpdir)
-            sys.path[:] = old_sys_path
-        self.assertEqual(datatype, 42)
-
-TEST_DATATYPE_SOURCE = """
-# sample datatypes file
-
-my_sample_datatype = 42
-"""
-
-
-def test_suite():
-    suite = unittest.makeSuite(DatatypeTestCase)
-    suite.addTest(unittest.makeSuite(RegistryTestCase))
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZConfig/tests/test_loader.py b/branches/bug1734/src/ZConfig/tests/test_loader.py
deleted file mode 100644
index 1c2ad724..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_loader.py
+++ /dev/null
@@ -1,293 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of ZConfig.loader classes and helper functions."""
-
-import os.path
-import sys
-import tempfile
-import unittest
-import urllib2
-
-from StringIO import StringIO
-
-import ZConfig
-import ZConfig.loader
-import ZConfig.url
-
-from ZConfig.tests.support import CONFIG_BASE, TestBase
-
-
-try:
-    myfile = __file__
-except NameError:
-    myfile = sys.argv[0]
-
-myfile = os.path.abspath(myfile)
-LIBRARY_DIR = os.path.join(os.path.dirname(myfile), "library")
-
-
-class LoaderTestCase(TestBase):
-
-    def test_schema_caching(self):
-        loader = ZConfig.loader.SchemaLoader()
-        url = ZConfig.url.urljoin(CONFIG_BASE, "simple.xml")
-        schema1 = loader.loadURL(url)
-        schema2 = loader.loadURL(url)
-        self.assert_(schema1 is schema2)
-
-    def test_simple_import_with_cache(self):
-        loader = ZConfig.loader.SchemaLoader()
-        url1 = ZConfig.url.urljoin(CONFIG_BASE, "library.xml")
-        schema1 = loader.loadURL(url1)
-        sio = StringIO("<schema>"
-                       "  <import src='library.xml'/>"
-                       "  <section type='type-a' name='section'/>"
-                       "</schema>")
-        url2 = ZConfig.url.urljoin(CONFIG_BASE, "stringio")
-        schema2 = loader.loadFile(sio, url2)
-        self.assert_(schema1.gettype("type-a") is schema2.gettype("type-a"))
-
-    def test_simple_import_using_prefix(self):
-        self.load_schema_text("""\
-            <schema prefix='ZConfig.tests.library'>
-              <import package='.thing'/>
-            </schema>
-            """)
-
-    def test_import_errors(self):
-        # must specify exactly one of package or src
-        self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
-                          StringIO("<schema><import/></schema>"))
-        self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
-                          StringIO("<schema>"
-                                   "  <import src='library.xml'"
-                                   "          package='ZConfig'/>"
-                                   "</schema>"))
-        # cannot specify src and file
-        self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile,
-                          StringIO("<schema>"
-                                   "  <import src='library.xml'"
-                                   "          file='other.xml'/>"
-                                   "</schema>"))
-        # cannot specify module as package
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.test_loader'/>"
-                       "</schema>")
-        try:
-            ZConfig.loadSchemaFile(sio)
-        except ZConfig.SchemaResourceError, e:
-            self.assertEqual(e.filename, "component.xml")
-            self.assertEqual(e.package, "ZConfig.tests.test_loader")
-            self.assert_(e.path is None)
-            # make sure the str() doesn't raise an unexpected exception
-            str(e)
-        else:
-            self.fail("expected SchemaResourceError")
-
-    def test_import_from_package(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget'/>"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        self.assert_(schema.gettype("widget-a") is not None)
-
-    def test_import_from_package_with_file(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget'"
-                       "          file='extra.xml' />"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        self.assert_(schema.gettype("extra-type") is not None)
-
-    def test_import_from_package_extra_directory(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.thing'"
-                       "          file='extras.xml' />"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        self.assert_(schema.gettype("extra-thing") is not None)
-
-    def test_import_from_package_with_missing_file(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget'"
-                       "          file='notthere.xml' />"
-                       "</schema>")
-        try:
-            loader.loadFile(sio)
-        except ZConfig.SchemaResourceError, e:
-            self.assertEqual(e.filename, "notthere.xml")
-            self.assertEqual(e.package, "ZConfig.tests.library.widget")
-            self.assert_(e.path)
-            # make sure the str() doesn't raise an unexpected exception
-            str(e)
-        else:
-            self.fail("expected SchemaResourceError")
-
-    def test_import_from_package_with_directory_file(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget'"
-                       "          file='really/notthere.xml' />"
-                       "</schema>")
-        self.assertRaises(ZConfig.SchemaError, loader.loadFile, sio)
-
-    def test_import_two_components_one_package(self):
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget' />"
-                       "  <import package='ZConfig.tests.library.widget'"
-                       "          file='extra.xml' />"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        schema.gettype("widget-a")
-        schema.gettype("extra-type")
-
-    def test_import_component_twice_1(self):
-        # Make sure we can import a component twice from a schema.
-        # This is most likely to occur when the component is imported
-        # from each of two other components, or from the top-level
-        # schema and a component.
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget' />"
-                       "  <import package='ZConfig.tests.library.widget' />"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        schema.gettype("widget-a")
-
-    def test_import_component_twice_2(self):
-        # Make sure we can import a component from a config file even
-        # if it has already been imported from the schema.
-        loader = ZConfig.loader.SchemaLoader()
-        sio = StringIO("<schema>"
-                       "  <import package='ZConfig.tests.library.widget' />"
-                       "</schema>")
-        schema = loader.loadFile(sio)
-        loader = ZConfig.loader.ConfigLoader(schema)
-        sio = StringIO("%import ZConfig.tests.library.widget")
-        loader.loadFile(sio)
-
-    def test_urlsplit_urlunsplit(self):
-        # Extracted from Python's test.test_urlparse module:
-        for url, parsed, split in [
-            ('http://www.python.org',
-             ('http', 'www.python.org', '', '', '', ''),
-             ('http', 'www.python.org', '', '', '')),
-            ('http://www.python.org#abc',
-             ('http', 'www.python.org', '', '', '', 'abc'),
-             ('http', 'www.python.org', '', '', 'abc')),
-            ('http://www.python.org/#abc',
-             ('http', 'www.python.org', '/', '', '', 'abc'),
-             ('http', 'www.python.org', '/', '', 'abc')),
-            ("http://a/b/c/d;p?q#f",
-             ('http', 'a', '/b/c/d', 'p', 'q', 'f'),
-             ('http', 'a', '/b/c/d;p', 'q', 'f')),
-            ('file:///tmp/junk.txt',
-             ('file', '', '/tmp/junk.txt', '', '', ''),
-             ('file', '', '/tmp/junk.txt', '', '')),
-            ]:
-            result = ZConfig.url.urlsplit(url)
-            self.assertEqual(result, split)
-            result2 = ZConfig.url.urlunsplit(result)
-            self.assertEqual(result2, url)
-
-    def test_file_url_normalization(self):
-        self.assertEqual(
-            ZConfig.url.urlnormalize("file:/abc/def"),
-            "file:///abc/def")
-        self.assertEqual(
-            ZConfig.url.urlunsplit(("file", "", "/abc/def", "", "")),
-            "file:///abc/def")
-        self.assertEqual(
-            ZConfig.url.urljoin("file:/abc/", "def"),
-            "file:///abc/def")
-        self.assertEqual(
-            ZConfig.url.urldefrag("file:/abc/def#frag"),
-            ("file:///abc/def", "frag"))
-
-    def test_isPath(self):
-        assert_ = self.assert_
-        isPath = ZConfig.loader.BaseLoader().isPath
-        assert_(isPath("abc"))
-        assert_(isPath("abc/def"))
-        assert_(isPath("/abc"))
-        assert_(isPath("/abc/def"))
-        assert_(isPath(r"\abc"))
-        assert_(isPath(r"\abc\def"))
-        assert_(isPath(r"c:\abc\def"))
-        assert_(not isPath("http://www.example.com/"))
-        assert_(not isPath("http://www.example.com/sample.conf"))
-        assert_(not isPath("file:///etc/zope/zope.conf"))
-        assert_(not isPath("file:///c|/foo/bar.conf"))
-
-
-class TestNonExistentResources(unittest.TestCase):
-
-    # XXX Not sure if this is the best approach for these.  These
-    # tests make sure that the error reported by ZConfig for missing
-    # resources is handled in a consistent way.  Since ZConfig uses
-    # urllib2.urlopen() for opening all resources, what we do is
-    # replace that function with one that always raises an exception.
-    # Since urllib2.urlopen() can raise either IOError or OSError
-    # (depending on the version of Python), we run test for each
-    # exception.  urllib2.urlopen() is restored after running the
-    # test.
-
-    def setUp(self):
-        self.urllib2_urlopen = urllib2.urlopen
-        urllib2.urlopen = self.fake_urlopen
-
-    def tearDown(self):
-        urllib2.urlopen = self.urllib2_urlopen
-
-    def fake_urlopen(self, url):
-        raise self.error()
-
-    def test_nonexistent_file_ioerror(self):
-        self.error = IOError
-        self.check_nonexistent_file()
-
-    def test_nonexistent_file_oserror(self):
-        self.error = OSError
-        self.check_nonexistent_file()
-
-    def check_nonexistent_file(self):
-        fn = tempfile.mktemp()
-        schema = ZConfig.loadSchemaFile(StringIO("<schema/>"))
-        self.assertRaises(ZConfig.ConfigurationError,
-                          ZConfig.loadSchema, fn)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          ZConfig.loadConfig, schema, fn)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          ZConfig.loadConfigFile, schema,
-                          StringIO("%include " + fn))
-        self.assertRaises(ZConfig.ConfigurationError,
-                          ZConfig.loadSchema,
-                          "http://www.zope.org/no-such-document/")
-        self.assertRaises(ZConfig.ConfigurationError,
-                          ZConfig.loadConfig, schema,
-                          "http://www.zope.org/no-such-document/")
-
-
-def test_suite():
-    suite = unittest.makeSuite(LoaderTestCase)
-    suite.addTest(unittest.makeSuite(TestNonExistentResources))
-    return suite
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZConfig/tests/test_schema.py b/branches/bug1734/src/ZConfig/tests/test_schema.py
deleted file mode 100644
index 6fe0e414..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_schema.py
+++ /dev/null
@@ -1,1037 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of ZConfig schemas."""
-
-import unittest
-
-import ZConfig
-
-from ZConfig.tests.support import TestBase, CONFIG_BASE
-
-
-def uppercase(value):
-    return str(value).upper()
-
-def appsection(value):
-    return MySection(value)
-
-def get_foo(section):
-    return section.foo
-
-class MySection:
-    def __init__(self, value):
-        self.conf = value
-
-
-def get_section_attributes(section):
-    L = list(section.getSectionAttributes())
-    L.sort()
-    return L
-
-
-class SchemaTestCase(TestBase):
-    """Tests of the basic schema support itself."""
-
-    def test_minimal_schema(self):
-        schema = self.load_schema_text("<schema/>")
-        self.assertEqual(len(schema), 0)
-        self.assertRaises(IndexError,
-                          lambda schema=schema: schema[0])
-        self.assertRaises(ZConfig.ConfigurationError,
-                          schema.getinfo, "foo")
-
-    def test_simple(self):
-        schema, conf = self.load_both("simple.xml", "simple.conf")
-        self._verifySimpleConf(conf)
-
-    def _verifySimpleConf(self,conf):
-        eq = self.assertEqual
-        eq(conf.var1, 'abc')
-        eq(conf.int_var, 12)
-        eq(conf.float_var, 12.02)
-        eq(conf.neg_int, -2)
-
-        check = self.assert_
-        check(conf.true_var_1)
-        check(conf.true_var_2)
-        check(conf.true_var_3)
-        check(not conf.false_var_1)
-        check(not conf.false_var_2)
-        check(not conf.false_var_3)
-
-    def test_app_datatype(self):
-        dtname = __name__ + ".uppercase"
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='a' datatype='%s'/>
-              <key name='b' datatype='%s' default='abc'/>
-              <multikey name='c' datatype='%s'>
-                <default>abc</default>
-                <default>abc</default>
-                </multikey>
-              <multikey name='d' datatype='%s'>
-                <default>not</default>
-                <default>lower</default>
-                <default>case</default>
-                </multikey>
-            </schema>
-            """ % (dtname, dtname, dtname, dtname))
-        conf = self.load_config_text(schema, """\
-                                     a qwerty
-                                     c upp
-                                     c er
-                                     c case
-                                     """)
-        eq = self.assertEqual
-        eq(conf.a, 'QWERTY')
-        eq(conf.b, 'ABC')
-        eq(conf.c, ['UPP', 'ER', 'CASE'])
-        eq(conf.d, ['NOT', 'LOWER', 'CASE'])
-        eq(get_section_attributes(conf),
-           ["a", "b", "c", "d"])
-
-    def test_app_sectiontype(self):
-        schema = self.load_schema_text("""\
-            <schema datatype='.appsection' prefix='%s'>
-              <sectiontype name='foo' datatype='.MySection'>
-                <key name='sample' datatype='integer' default='345'/>
-                </sectiontype>
-              <section name='sect' type='foo' />
-            </schema>
-            """ % __name__)
-        conf = self.load_config_text(schema, """\
-                                     <foo sect>
-                                       sample 42
-                                     </foo>
-                                     """)
-        self.assert_(isinstance(conf, MySection))
-        o1 = conf.conf.sect
-        self.assert_(isinstance(o1, MySection))
-        self.assertEqual(o1.conf.sample, 42)
-
-    def test_empty_sections(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='section'/>
-              <section type='section' name='s1'/>
-              <section type='section' name='s2'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <section s1>
-                                     </section>
-                                     <section s2/>
-                                     """)
-        self.assert_(conf.s1 is not None)
-        self.assert_(conf.s2 is not None)
-        self.assertEqual(get_section_attributes(conf),
-                         ["s1", "s2"])
-
-    def test_deeply_nested_sections(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='type1'>
-                <key name='key' default='type1-value'/>
-              </sectiontype>
-              <sectiontype name='type2'>
-                <key name='key' default='type2-value'/>
-                <section name='sect' type='type1'/>
-              </sectiontype>
-              <sectiontype name='type3'>
-                <key name='key' default='type3-value'/>
-                <section name='sect' type='type2'/>
-              </sectiontype>
-              <section name='sect' type='type3'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <type3 sect>
-                                       key sect3-value
-                                       <type2 sect>
-                                         key sect2-value
-                                         <type1 sect/>
-                                       </type2>
-                                     </type3>
-                                     """)
-        eq = self.assertEqual
-        eq(conf.sect.sect.sect.key, "type1-value")
-        eq(conf.sect.sect.key, "sect2-value")
-        eq(conf.sect.key, "sect3-value")
-        eq(get_section_attributes(conf),
-           ["sect"])
-        eq(get_section_attributes(conf.sect),
-           ["key", "sect"])
-        eq(get_section_attributes(conf.sect.sect),
-           ["key", "sect"])
-        eq(get_section_attributes(conf.sect.sect.sect),
-           ["key"])
-
-    def test_multivalued_keys(self):
-        schema = self.load_schema_text("""\
-            <schema handler='def'>
-              <multikey name='a' handler='ABC' />
-              <multikey name='b' datatype='integer'>
-                <default>1</default>
-                <default>2</default>
-              </multikey>
-              <multikey name='c' datatype='integer'>
-                <default>3</default>
-                <default>4</default>
-                <default>5</default>
-              </multikey>
-              <multikey name='d' />
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     a foo
-                                     a bar
-                                     c 41
-                                     c 42
-                                     c 43
-                                     """, num_handlers=2)
-        L = []
-        self.handlers({'abc': L.append,
-                       'DEF': L.append})
-        self.assertEqual(L, [['foo', 'bar'], conf])
-        L = []
-        self.handlers({'abc': None,
-                       'DEF': L.append})
-        self.assertEqual(L, [conf])
-        self.assertEqual(conf.a, ['foo', 'bar'])
-        self.assertEqual(conf.b, [1, 2])
-        self.assertEqual(conf.c, [41, 42, 43])
-        self.assertEqual(conf.d, [])
-        self.assertEqual(get_section_attributes(conf),
-                         ["a", "b", "c", "d"])
-
-    def test_multikey_required(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <multikey name='k' required='yes'/>
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_multisection_required(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='s'/>
-              <multisection name='*' attribute='s' type='s' required='yes'/>
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_key_required_but_missing(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='k' required='yes'/>
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_section_required_but_missing(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='k'/>
-              <section name='k' type='k' required='yes'/>
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "")
-
-    def test_key_default_element(self):
-        self.assertRaises(
-            ZConfig.SchemaError, self.load_schema_text, """\
-            <schema>
-              <key name='name'>
-                <default>text</default>
-              </key>
-            </schema>
-            """)
-
-    def test_bad_handler_maps(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='a' handler='abc'/>
-              <key name='b' handler='def'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     a foo
-                                     b bar
-                                     """, num_handlers=2)
-        self.assertEqual(get_section_attributes(conf),
-                         ["a", "b"])
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.handlers, {'abc': id, 'ABC': id, 'def': id})
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.handlers, {})
-
-    def test_handler_ordering(self):
-        schema = self.load_schema_text("""\
-            <schema handler='c'>
-              <sectiontype name='inner'>
-              </sectiontype>
-              <sectiontype name='outer'>
-                <section type='inner' name='sect-inner' handler='a'/>
-              </sectiontype>
-              <section type='outer' name='sect-outer' handler='b'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <outer sect-outer>
-                                       <inner sect-inner/>
-                                     </outer>
-                                     """, num_handlers=3)
-        L = []
-        self.handlers({'a': L.append,
-                       'b': L.append,
-                       'c': L.append})
-        outer = conf.sect_outer
-        inner = outer.sect_inner
-        self.assertEqual(L, [inner, outer, conf])
-
-    def test_duplicate_section_names(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'/>
-              <sectiontype name='nesting'>
-                <section name='a' type='sect'/>
-              </sectiontype>
-              <section name='a' type='nesting'/>
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError, self.load_config_text,
-                          schema, """\
-                          <sect a/>
-                          <sect a/>
-                          """)
-        conf = self.load_config_text(schema, """\
-                                     <nesting a>
-                                       <sect a/>
-                                     </nesting>
-                                     """)
-
-    def test_disallowed_duplicate_attribute(self):
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <key name='a'/>
-                            <key name='b' attribute='a'/>
-                          </schema>
-                          """)
-
-    def test_unknown_datatype_name(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, "<schema datatype='foobar'/>")
-
-    def test_load_abstracttype(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <abstracttype name='group'>
-                <description>This is an abstract section type.</description>
-              </abstracttype>
-              <sectiontype name='t1' implements='group'>
-                <key name='k1' default='default1'/>
-              </sectiontype>
-              <sectiontype name='t2' implements='group'>
-                <key name='k2' default='default2'/>
-              </sectiontype>
-              <multisection name='*' type='group' attribute='g'/>
-            </schema>
-            """)
-        # check the types that get defined
-        t = schema.gettype("group")
-        self.assert_(t.isabstract())
-        t1 = schema.gettype("t1")
-        self.assert_(not t1.isabstract())
-        self.assert_(t.getsubtype("t1") is t1)
-        t2 = schema.gettype("t2")
-        self.assert_(not t2.isabstract())
-        self.assert_(t.getsubtype("t2") is t2)
-        self.assertRaises(ZConfig.ConfigurationError, t.getsubtype, "group")
-        self.assert_(t1 is not t2)
-        # try loading a config that relies on this schema
-        conf = self.load_config_text(schema, """\
-                                     <t1/>
-                                     <t1>
-                                       k1 value1
-                                     </t1>
-                                     <t2/>
-                                     <t2>
-                                       k2 value2
-                                     </t2>
-                                     """)
-        eq = self.assertEqual
-        eq(get_section_attributes(conf), ["g"])
-        eq(len(conf.g), 4)
-        eq(conf.g[0].k1, "default1")
-        eq(conf.g[1].k1, "value1")
-        eq(conf.g[2].k2, "default2")
-        eq(conf.g[3].k2, "value2")
-
-        # white box:
-        self.assert_(conf.g[0].getSectionDefinition() is t1)
-        self.assert_(conf.g[1].getSectionDefinition() is t1)
-        self.assert_(conf.g[2].getSectionDefinition() is t2)
-        self.assert_(conf.g[3].getSectionDefinition() is t2)
-
-    def test_abstracttype_extension(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <abstracttype name='group'/>
-              <sectiontype name='extra' implements='group'/>
-              <section name='thing' type='group'/>
-            </schema>
-            """)
-        abstype = schema.gettype("group")
-        self.assert_(schema.gettype("extra") is abstype.getsubtype("extra"))
-
-        # make sure we can use the extension in a config:
-        conf = self.load_config_text(schema, "<extra thing/>")
-        self.assertEqual(conf.thing.getSectionType(), "extra")
-        self.assertEqual(get_section_attributes(conf), ["thing"])
-        self.assertEqual(get_section_attributes(conf.thing), [])
-
-    def test_abstracttype_extension_errors(self):
-        # specifying a non-existant abstracttype
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <sectiontype name='s' implements='group'/>
-                          </schema>
-                          """)
-        # specifying something that isn't an abstracttype
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <sectiontype name='t1'/>
-                            <sectiontype name='t2' implements='t1'/>
-                          </schema>
-                          """)
-
-    def test_arbitrary_key(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='+' required='yes' attribute='keymap'
-                   datatype='integer'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "some-key 42")
-        self.assertEqual(conf.keymap, {'some-key': 42})
-        self.assertEqual(get_section_attributes(conf), ["keymap"])
-
-    def test_arbitrary_multikey_required(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <multikey name='+' required='yes' attribute='keymap'
-                        datatype='integer'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     some-key 42
-                                     some-key 43
-                                     """)
-        self.assertEqual(conf.keymap, {'some-key': [42, 43]})
-
-    def test_arbitrary_multikey_optional(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'>
-                <multikey name='+' attribute='keymap'/>
-              </sectiontype>
-              <section name='+' type='sect' attribute='stuff'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <sect foo>
-                                       some-key 42
-                                       some-key 43
-                                     </sect>
-                                     """)
-        self.assertEqual(conf.stuff.keymap, {'some-key': ['42', '43']})
-        self.assertEqual(get_section_attributes(conf), ["stuff"])
-
-    def test_arbitrary_multikey_optional_empty(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'>
-                <multikey name='+' attribute='keymap'/>
-              </sectiontype>
-              <section name='+' type='sect' attribute='stuff'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "<sect foo/>")
-        self.assertEqual(conf.stuff.keymap, {})
-
-    def test_arbitrary_multikey_with_defaults(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <multikey name='+' attribute='keymap'>
-                <default key='a'>value-a1</default>
-                <default key='a'>value-a2</default>
-                <default key='b'>value-b</default>
-              </multikey>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "")
-        self.assertEqual(conf.keymap, {'a': ['value-a1', 'value-a2'],
-                                       'b': ['value-b']})
-
-    def test_arbitrary_multikey_with_unkeyed_default(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-                          <schema>
-                            <multikey name='+' attribute='keymap'>
-                              <default>value-a1</default>
-                            </multikey>
-                          </schema>
-                          """)
-
-    def test_arbitrary_key_with_defaults(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='+' attribute='keymap'>
-                <default key='a'>value-a</default>
-                <default key='b'>value-b</default>
-              </key>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "")
-        self.assertEqual(conf.keymap, {'a': 'value-a', 'b': 'value-b'})
-
-    def test_arbitrary_key_with_unkeyed_default(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-                          <schema>
-                            <key name='+' attribute='keymap'>
-                              <default>value-a1</default>
-                            </key>
-                          </schema>
-                          """)
-
-    def test_arbitrary_keys_with_others(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='k1' default='v1'/>
-              <key name='k2' default='2' datatype='integer'/>
-              <key name='+' required='yes' attribute='keymap'
-                   datatype='integer'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     some-key 42
-                                     k2 3
-                                     """)
-        self.assertEqual(conf.k1, 'v1')
-        self.assertEqual(conf.k2, 3)
-        self.assertEqual(conf.keymap, {'some-key': 42})
-        self.assertEqual(get_section_attributes(conf),
-                         ["k1", "k2", "keymap"])
-
-    def test_arbitrary_key_missing(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <key name='+' required='yes' attribute='keymap' />
-            </schema>
-            """)
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "# empty config file")
-
-    def test_arbitrary_key_bad_schema(self):
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <key name='+' attribute='attr1'/>
-                            <key name='+' attribute='attr2'/>
-                          </schema>
-                          """)
-
-    def test_getrequiredtypes(self):
-        schema = self.load_schema("library.xml")
-        self.assertEqual(schema.getrequiredtypes(), [])
-
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='used'/>
-              <sectiontype name='unused'/>
-              <section type='used' name='a'/>
-            </schema>
-            """)
-        L = schema.getrequiredtypes()
-        L.sort()
-        self.assertEqual(L, ["used"])
-
-    def test_getunusedtypes(self):
-        schema = self.load_schema("library.xml")
-        L = schema.getunusedtypes()
-        L.sort()
-        self.assertEqual(L, ["type-a", "type-b"])
-
-        schema = self.load_schema_text("""\
-            <schema type='top'>
-              <sectiontype name='used'/>
-              <sectiontype name='unused'/>
-              <section type='used' name='a'/>
-            </schema>
-            """)
-        self.assertEqual(schema.getunusedtypes(), ["unused"])
-
-    def test_section_value_mutation(self):
-        schema, conf = self.load_both("simple.xml", "simple.conf")
-        orig = conf.empty
-        new = []
-        conf.empty = new
-        self.assert_(conf.empty is new)
-
-    def test_simple_anonymous_section(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'>
-                <key name='key' default='value'/>
-              </sectiontype>
-              <section name='*' type='sect' attribute='attr'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "<sect/>")
-        self.assertEqual(conf.attr.key, "value")
-
-    def test_simple_anynamed_section(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'>
-                <key name='key' default='value'/>
-              </sectiontype>
-              <section name='+' type='sect' attribute='attr'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "<sect name/>")
-        self.assertEqual(conf.attr.key, "value")
-        self.assertEqual(conf.attr.getSectionName(), "name")
-
-        # if we omit the name, it's an error
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "<sect/>")
-
-    def test_nested_abstract_sectiontype(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <abstracttype name='abstract'/>
-              <sectiontype name='t1' implements='abstract'/>
-              <sectiontype name='t2' implements='abstract'>
-                <section type='abstract' name='s1'/>
-              </sectiontype>
-              <section type='abstract' name='*' attribute='s2'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <t2>
-                                       <t1 s1/>
-                                     </t2>
-                                     """)
-
-    def test_reserved_attribute_prefix(self):
-        template = """\
-            <schema>
-              <sectiontype name='s'/>
-                %s
-            </schema>
-            """
-        def check(thing, self=self, template=template):
-            text = template % thing
-            self.assertRaises(ZConfig.SchemaError,
-                              self.load_schema_text, text)
-
-        check("<key name='a' attribute='getSection'/>")
-        check("<key name='a' attribute='getSectionThing'/>")
-        check("<multikey name='a' attribute='getSection'/>")
-        check("<multikey name='a' attribute='getSectionThing'/>")
-        check("<section type='s' name='*' attribute='getSection'/>")
-        check("<section type='s' name='*' attribute='getSectionThing'/>")
-        check("<multisection type='s' name='*' attribute='getSection'/>")
-        check("<multisection type='s' name='*' attribute='getSectionThing'/>")
-
-    def test_sectiontype_as_schema(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='s'>
-                <key name='skey' default='skey-default'/>
-              </sectiontype>
-              <sectiontype name='t'>
-                <key name='tkey' default='tkey-default'/>
-                <section name='*' type='s' attribute='section'/>
-              </sectiontype>
-            </schema>
-            """)
-        t = schema.gettype("t")
-        conf = self.load_config_text(t, "<s/>")
-        self.assertEqual(conf.tkey, "tkey-default")
-        self.assertEqual(conf.section.skey, "skey-default")
-        self.assertEqual(get_section_attributes(conf), ["section", "tkey"])
-        self.assertEqual(get_section_attributes(conf.section), ["skey"])
-
-    def test_datatype_conversion_error(self):
-        schema_url = "file:///tmp/fake-url-1.xml"
-        config_url = "file:///tmp/fake-url-2.xml"
-        schema = self.load_schema_text("""\
-             <schema>
-               <key name='key' default='bogus' datatype='integer'/>
-             </schema>
-             """, url=schema_url)
-        e = self.get_data_conversion_error(
-            schema, "", config_url)
-        self.assertEqual(e.url, schema_url)
-        self.assertEqual(e.lineno, 2)
-
-        e = self.get_data_conversion_error(schema, """\
-                                           # comment
-
-                                           key splat
-                                           """, config_url)
-        self.assertEqual(e.url, config_url)
-        self.assertEqual(e.lineno, 3)
-
-    def get_data_conversion_error(self, schema, src, url):
-        try:
-            self.load_config_text(schema, src, url=url)
-        except ZConfig.DataConversionError, e:
-            return e
-        else:
-            self.fail("expected ZConfig.DataConversionError")
-
-    def test_numeric_section_name(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'/>
-              <multisection name='*' type='sect' attribute='things'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "<sect 1 />")
-        self.assertEqual(len(conf.things), 1)
-
-    def test_sectiontype_extension(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='t1'>
-                <key name='k1'/>
-              </sectiontype>
-              <sectiontype name='t2' extends='t1'>
-                <key name='k2'/>
-              </sectiontype>
-              <section name='s' type='t2'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-                                     <t2 s>
-                                       k1 k1-value
-                                       k2 k2-value
-                                     </t2>
-                                     """)
-        eq = self.assertEqual
-        eq(conf.s.k1, "k1-value")
-        eq(conf.s.k2, "k2-value")
-        eq(get_section_attributes(conf), ["s"])
-        eq(get_section_attributes(conf.s), ["k1", "k2"])
-
-    def test_sectiontype_extension_errors(self):
-        # cannot override key from base
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <sectiontype name='t1'>
-                              <key name='k1'/>
-                            </sectiontype>
-                            <sectiontype name='t2' extends='t1'>
-                              <key name='k1'/>
-                            </sectiontype>
-                          </schema>
-                          """)
-        # cannot extend non-existing section
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <sectiontype name='t2' extends='t1'/>
-                          </schema>
-                          """)
-        # cannot extend abstract type
-        self.assertRaises(ZConfig.SchemaError, self.load_schema_text, """\
-                          <schema>
-                            <abstracttype name='t1'/>
-                            <sectiontype name='t2' extends='t1'/>
-                          </schema>
-                          """)
-
-    def test_sectiontype_derived_keytype(self):
-        # make sure that a derived section type inherits the keytype
-        # of its base
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect' keytype='identifier'/>
-              <sectiontype name='derived' extends='sect'>
-                <key name='foo' attribute='foo'/>
-                <key name='Foo' attribute='Foo'/>
-              </sectiontype>
-              <section name='foo' type='derived'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-            <derived foo>
-              foo bar
-              Foo BAR
-            </derived>
-            """)
-        self.assertEqual(conf.foo.foo, "bar")
-        self.assertEqual(conf.foo.Foo, "BAR")
-        self.assertEqual(get_section_attributes(conf.foo), ["Foo", "foo"])
-
-    def test_sectiontype_override_keytype(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='base' keytype='identifier' >
-                <key name='+' attribute='map' />
-              </sectiontype>
-              <sectiontype name='derived' keytype='ipaddr-or-hostname'
-                           extends='base' />
-              <section name='*' type='base' attribute='base' />
-              <section name='*' type='derived' attribute='derived' />
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-            <base>
-              ident1 foo
-              Ident2 bar
-            </base>
-            <derived>
-              EXAMPLE.COM foo
-            </derived>
-            """)
-        L = conf.base.map.items()
-        L.sort()
-        self.assertEqual(L, [("Ident2", "bar"), ("ident1", "foo")])
-        L = conf.derived.map.items()
-        L.sort()
-        self.assertEqual(L, [("example.com", "foo")])
-        self.assertEqual(get_section_attributes(conf), ["base", "derived"])
-
-    def test_keytype_applies_to_default_key(self):
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='sect'>
-                <key name='+' attribute='mapping'>
-                  <default key='foo'>42</default>
-                  <default key='BAR'>24</default>
-                </key>
-              </sectiontype>
-              <section type='sect' name='*' attribute='sect'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, "<sect/>")
-        items = conf.sect.mapping.items()
-        items.sort()
-        self.assertEqual(items, [("bar", "24"), ("foo", "42")])
-
-    def test_duplicate_default_key_checked_in_schema(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-            <schema>
-              <sectiontype name='sect'>
-                <key name='+' attribute='mapping'>
-                  <default key='foo'>42</default>
-                  <default key='Foo'>24</default>
-                </key>
-              </sectiontype>
-              <section type='sect' name='*' attribute='sect'/>
-            </schema>
-            """)
-
-    def test_default_keys_rechecked_clash_in_derived_sectiontype(self):
-        # If the default values associated with a <key name="+"> can't
-        # be supported by a new keytype for a derived sectiontype, an
-        # error should be indicated.
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-            <schema>
-              <sectiontype name='base' keytype='identifier'>
-                <key name='+' attribute='mapping'>
-                  <default key='foo'>42</default>
-                  <default key='Foo'>42</default>
-                </key>
-              </sectiontype>
-              <sectiontype name='sect' keytype='basic-key'
-                           extends='base'>
-                <!-- should cry foul here -->
-              </sectiontype>
-              <section type='sect' name='*' attribute='sect'/>
-            </schema>
-            """)
-
-    def test_default_keys_rechecked_dont_clash_in_derived_sectiontype(self):
-        # If the default values associated with a <key name="+"> can't
-        # be supported by a new keytype for a derived sectiontype, an
-        # error should be indicated.
-        schema = self.load_schema_text("""\
-            <schema>
-              <sectiontype name='base' keytype='identifier'>
-                <multikey name='+' attribute='mapping'>
-                  <default key='foo'>42</default>
-                  <default key='Foo'>42</default>
-                </multikey>
-              </sectiontype>
-              <sectiontype name='sect' keytype='basic-key'
-                           extends='base'>
-                <!-- should cry foul here -->
-              </sectiontype>
-              <section type='base' name='*' attribute='base'/>
-              <section type='sect' name='*' attribute='sect'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-            <base/>
-            <sect/>
-            """)
-        base = conf.base.mapping.items()
-        base.sort()
-        self.assertEqual(base, [("Foo", ["42"]), ("foo", ["42"])])
-        sect = conf.sect.mapping.items()
-        sect.sort()
-        self.assertEqual(sect, [("foo", ["42", "42"])])
-
-    def test_sectiontype_inherited_datatype(self):
-        schema = self.load_schema_text("""\
-            <schema prefix='ZConfig.tests.test_schema'>
-              <sectiontype name='base' datatype='.get_foo'>
-                <key name="foo"/>
-              </sectiontype>
-              <sectiontype name='derived' extends='base'/>
-              <section name='*' type='derived' attribute='splat'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema, """\
-            <derived>
-              foo bar
-            </derived>
-            """)
-        self.assertEqual(conf.splat, "bar")
-
-    def test_schema_keytype(self):
-        schema = self.load_schema_text("""\
-            <schema keytype='ipaddr-or-hostname'>
-              <key name='+' attribute='table' datatype='ipaddr-or-hostname'/>
-            </schema>
-            """)
-        conf = self.load_config_text(schema,
-                                     "host.example.com 127.0.0.1\n"
-                                     "www.example.org 127.0.0.2\n")
-        table = conf.table
-        self.assertEqual(len(table), 2)
-        L = table.items()
-        L.sort()
-        self.assertEqual(L, [("host.example.com", "127.0.0.1"),
-                             ("www.example.org", "127.0.0.2")])
-
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "abc.  127.0.0.1")
-
-    def test_keytype_identifier(self):
-        schema = self.load_schema_text("""\
-           <schema keytype='identifier'>
-             <key name='foo' attribute='foo'/>
-             <key name='Foo' attribute='Foo'/>
-           </schema>
-           """)
-        conf = self.load_config_text(schema,
-                                     "Foo Foo-value\n"
-                                     "foo foo-value\n")
-        self.assertEqual(conf.foo, "foo-value")
-        self.assertEqual(conf.Foo, "Foo-value")
-        self.assertEqual(get_section_attributes(conf), ["Foo", "foo"])
-        # key mis-match based on case:
-        self.assertRaises(ZConfig.ConfigurationError,
-                          self.load_config_text, schema, "FOO frob\n")
-        # attribute names conflict, since the keytype isn't used to
-        # generate attribute names
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-                          <schema keytype='identifier'>
-                            <key name='foo'/>
-                            <key name='Foo'/>
-                          </schema>
-                          """)
-
-    def test_datatype_casesensitivity(self):
-        self.load_schema_text("<schema datatype='NULL'/>")
-
-    def test_simple_extends(self):
-        schema = self.load_schema_text("""\
-           <schema extends='%s/simple.xml %s/library.xml'>
-             <section name='A' type='type-a' />
-           </schema>
-           """ % (CONFIG_BASE, CONFIG_BASE))
-        self._verifySimpleConf(self.load_config(schema, "simple.conf"))
-
-    def test_extends_fragment_failure(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text,
-            "<schema extends='%s/library.xml#foo'/>" % CONFIG_BASE)
-
-    def test_multi_extends_implicit_OK(self):
-        self.load_schema_text("""\
-           <schema extends='%s/base.xml %s/library.xml'>
-             <section name='A' type='type-a' />
-             <section name='X' type='type-X' />
-           </schema>
-           """ % (CONFIG_BASE, CONFIG_BASE))
-
-    def test_multi_extends_explicit_datatype_OK(self):
-        self.load_schema_text("""\
-           <schema extends='%s/base-datatype1.xml %s/base-datatype2.xml'
-                   datatype='null'>
-             <section name='One' type='type-1' />
-             <section name='Two' type='type-2' />
-           </schema>
-           """ % (CONFIG_BASE, CONFIG_BASE))
-
-    def test_multi_extends_explicit_keytype_OK(self):
-        self.load_schema_text("""\
-           <schema extends='%s/base-keytype1.xml %s/base-keytype2.xml'
-                   keytype='%s.uppercase'>
-             <section name='One' type='type-1' />
-             <section name='Two' type='type-2' />
-           </schema>
-           """ % (CONFIG_BASE, CONFIG_BASE, __name__))
-
-    def test_multi_extends_datatype_conflict(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-            <schema extends='%s/base-datatype1.xml %s/base-datatype2.xml'/>
-            """ % (CONFIG_BASE, CONFIG_BASE))
-
-    def test_multi_extends_keytype_conflict(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-            <schema extends='%s/base-keytype1.xml %s/base-keytype2.xml'/>
-            """ % (CONFIG_BASE, CONFIG_BASE))
-
-    def test_multiple_descriptions_is_error(self):
-        self.assertRaises(ZConfig.SchemaError,
-                          self.load_schema_text, """\
-            <schema>
-              <description>  foo  </description>
-              <description>  bar  </description>
-            </schema>
-            """)
-
-
-def test_suite():
-    return unittest.makeSuite(SchemaTestCase)
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZConfig/tests/test_subst.py b/branches/bug1734/src/ZConfig/tests/test_subst.py
deleted file mode 100644
index 1daf3a0a..00000000
--- a/branches/bug1734/src/ZConfig/tests/test_subst.py
+++ /dev/null
@@ -1,97 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the string interpolation module."""
-
-# This is needed to support Python 2.1.
-from __future__ import nested_scopes
-
-import unittest
-
-from ZConfig import SubstitutionReplacementError, SubstitutionSyntaxError
-from ZConfig.substitution import isname, substitute
-
-
-class SubstitutionTestCase(unittest.TestCase):
-    def test_simple_names(self):
-        d = {"name": "value",
-             "name1": "abc",
-             "name_": "def",
-             "_123": "ghi"}
-        def check(s, v):
-            self.assertEqual(substitute(s, d), v)
-        check("$name", "value")
-        check(" $name ", " value ")
-        check("${name}", "value")
-        check(" ${name} ", " value ")
-        check("$name$name", "valuevalue")
-        check("$name1$name", "abcvalue")
-        check("$name_$name", "defvalue")
-        check("$_123$name", "ghivalue")
-        check("$name $name", "value value")
-        check("$name1 $name", "abc value")
-        check("$name_ $name", "def value")
-        check("$_123 $name", "ghi value")
-        check("splat", "splat")
-        check("$$", "$")
-        check("$$$name$$", "$value$")
-
-    def test_undefined_names(self):
-        d = {"name": "value"}
-        self.assertRaises(SubstitutionReplacementError,
-                          substitute, "$splat", d)
-        self.assertRaises(SubstitutionReplacementError,
-                          substitute, "$splat1", d)
-        self.assertRaises(SubstitutionReplacementError,
-                          substitute, "$splat_", d)
-
-    def test_syntax_errors(self):
-        d = {"name": "${next"}
-        def check(s):
-            self.assertRaises(SubstitutionSyntaxError,
-                              substitute, s, d)
-        check("${")
-        check("${name")
-        check("${1name}")
-        check("${ name}")
-
-    def test_edge_cases(self):
-        # It's debatable what should happen for these cases, so we'll
-        # follow the lead of the Bourne shell here.
-        def check(s):
-            self.assertRaises(SubstitutionSyntaxError,
-                              substitute, s, {})
-        check("$1")
-        check("$")
-        check("$ stuff")
-
-    def test_non_nesting(self):
-        d = {"name": "$value"}
-        self.assertEqual(substitute("$name", d), "$value")
-
-    def test_isname(self):
-        self.assert_(isname("abc"))
-        self.assert_(isname("abc_def"))
-        self.assert_(isname("_abc"))
-        self.assert_(isname("abc_"))
-        self.assert_(not isname("abc-def"))
-        self.assert_(not isname("-def"))
-        self.assert_(not isname("abc-"))
-        self.assert_(not isname(""))
-
-
-def test_suite():
-    return unittest.makeSuite(SubstitutionTestCase)
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZConfig/url.py b/branches/bug1734/src/ZConfig/url.py
deleted file mode 100644
index fd4e99fd..00000000
--- a/branches/bug1734/src/ZConfig/url.py
+++ /dev/null
@@ -1,67 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""urlparse-like helpers that normalize file: URLs.
-
-ZConfig and urllib2 expect file: URLs to consistently use the '//'
-hostpart seperator; the functions here enforce this constraint.
-"""
-
-import urlparse as _urlparse
-
-try:
-    from urlparse import urlsplit
-except ImportError:
-    def urlsplit(url):
-        # Check for the fragment here, since Python 2.1.3 didn't get
-        # it right for things like "http://www.python.org#frag".
-        if '#' in url:
-            url, fragment = url.split('#', 1)
-        else:
-            fragment = ''
-        parts = list(_urlparse.urlparse(url))
-        parts[-1] = fragment
-        param = parts.pop(3)
-        if param:
-            parts[2] += ";" + param
-        return tuple(parts)
-
-
-def urlnormalize(url):
-    lc = url.lower()
-    if lc.startswith("file:/") and not lc.startswith("file:///"):
-        url = "file://" + url[5:]
-    return url
-
-
-def urlunsplit(parts):
-    parts = list(parts)
-    parts.insert(3, '')
-    url = _urlparse.urlunparse(tuple(parts))
-    if (parts[0] == "file"
-        and url.startswith("file:/")
-        and not url.startswith("file:///")):
-        url = "file://" + url[5:]
-    return url
-
-
-def urldefrag(url):
-    url, fragment = _urlparse.urldefrag(url)
-    return urlnormalize(url), fragment
-
-
-def urljoin(base, relurl):
-    url = _urlparse.urljoin(base, relurl)
-    if url.startswith("file:/") and not url.startswith("file:///"):
-        url = "file://" + url[5:]
-    return url
diff --git a/branches/bug1734/src/ZEO/ClientStorage.py b/branches/bug1734/src/ZEO/ClientStorage.py
deleted file mode 100644
index 154fd045..00000000
--- a/branches/bug1734/src/ZEO/ClientStorage.py
+++ /dev/null
@@ -1,1141 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""The ClientStorage class and the exceptions that it may raise.
-
-Public contents of this module:
-
-ClientStorage -- the main class, implementing the Storage API
-"""
-
-import cPickle
-import os
-import socket
-import tempfile
-import threading
-import time
-import types
-import logging
-
-from ZEO import ServerStub
-from ZEO.cache import ClientCache
-from ZEO.TransactionBuffer import TransactionBuffer
-from ZEO.Exceptions import ClientStorageError, ClientDisconnected, AuthError
-from ZEO.auth import get_module
-from ZEO.zrpc.client import ConnectionManager
-
-from ZODB import POSException
-from ZODB.loglevels import BLATHER
-from persistent.TimeStamp import TimeStamp
-
-logger = logging.getLogger('ZEO.ClientStorage')
-_pid = str(os.getpid())
-
-def log2(msg, level=logging.INFO, subsys=_pid, exc_info=False):
-    message = "(%s) %s" % (subsys, msg)
-    logger.log(level, message, exc_info=exc_info)
-
-try:
-    from ZODB.ConflictResolution import ResolvedSerial
-except ImportError:
-    ResolvedSerial = 'rs'
-
-def tid2time(tid):
-    return str(TimeStamp(tid))
-
-def get_timestamp(prev_ts=None):
-    """Internal helper to return a unique TimeStamp instance.
-
-    If the optional argument is not None, it must be a TimeStamp; the
-    return value is then guaranteed to be at least 1 microsecond later
-    the argument.
-    """
-    t = time.time()
-    t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
-    if prev_ts is not None:
-        t = t.laterThan(prev_ts)
-    return t
-
-class DisconnectedServerStub:
-    """Internal helper class used as a faux RPC stub when disconnected.
-
-    This raises ClientDisconnected on all attribute accesses.
-
-    This is a singleton class -- there should be only one instance,
-    the global disconnected_stub, os it can be tested by identity.
-    """
-
-    def __getattr__(self, attr):
-        raise ClientDisconnected()
-
-# Singleton instance of DisconnectedServerStub
-disconnected_stub = DisconnectedServerStub()
-
-MB = 1024**2
-
-class ClientStorage(object):
-
-    """A Storage class that is a network client to a remote storage.
-
-    This is a faithful implementation of the Storage API.
-
-    This class is thread-safe; transactions are serialized in
-    tpc_begin().
-    """
-
-    # Classes we instantiate.  A subclass might override.
-
-    TransactionBufferClass = TransactionBuffer
-    ClientCacheClass = ClientCache
-    ConnectionManagerClass = ConnectionManager
-    StorageServerStubClass = ServerStub.StorageServer
-
-    def __init__(self, addr, storage='1', cache_size=20 * MB,
-                 name='', client=None, debug=0, var=None,
-                 min_disconnect_poll=5, max_disconnect_poll=300,
-                 wait_for_server_on_startup=None, # deprecated alias for wait
-                 wait=None, wait_timeout=None,
-                 read_only=0, read_only_fallback=0,
-                 username='', password='', realm=None):
-        """ClientStorage constructor.
-
-        This is typically invoked from a custom_zodb.py file.
-
-        All arguments except addr should be keyword arguments.
-        Arguments:
-
-        addr -- The server address(es).  This is either a list of
-            addresses or a single address.  Each address can be a
-            (hostname, port) tuple to signify a TCP/IP connection or
-            a pathname string to signify a Unix domain socket
-            connection.  A hostname may be a DNS name or a dotted IP
-            address.  Required.
-
-        storage -- The storage name, defaulting to '1'.  The name must
-            match one of the storage names supported by the server(s)
-            specified by the addr argument.  The storage name is
-            displayed in the Zope control panel.
-
-        cache_size -- The disk cache size, defaulting to 20 megabytes.
-            This is passed to the ClientCache constructor.
-
-        name -- The storage name, defaulting to ''.  If this is false,
-            str(addr) is used as the storage name.
-
-        client -- A name used to construct persistent cache filenames.
-            Defaults to None, in which case the cache is not persistent.
-            See ClientCache for more info.
-
-        debug -- Ignored.  This is present only for backwards
-            compatibility with ZEO 1.
-
-        var -- When client is not None, this specifies the directory
-            where the persistent cache files are created.  It defaults
-            to None, in whichcase the current directory is used.
-
-        min_disconnect_poll -- The minimum delay in seconds between
-            attempts to connect to the server, in seconds.  Defaults
-            to 5 seconds.
-
-        max_disconnect_poll -- The maximum delay in seconds between
-            attempts to connect to the server, in seconds.  Defaults
-            to 300 seconds.
-
-        wait_for_server_on_startup -- A backwards compatible alias for
-            the wait argument.
-
-        wait -- A flag indicating whether to wait until a connection
-            with a server is made, defaulting to true.
-
-        wait_timeout -- Maximum time to wait for a connection before
-            giving up.  Only meaningful if wait is True.
-
-        read_only -- A flag indicating whether this should be a
-            read-only storage, defaulting to false (i.e. writing is
-            allowed by default).
-
-        read_only_fallback -- A flag indicating whether a read-only
-            remote storage should be acceptable as a fallback when no
-            writable storages are available.  Defaults to false.  At
-            most one of read_only and read_only_fallback should be
-            true.
-
-        username -- string with username to be used when authenticating.
-            These only need to be provided if you are connecting to an
-            authenticated server storage.
-
-        password -- string with plaintext password to be used
-            when authenticated.
-
-        Note that the authentication protocol is defined by the server
-        and is detected by the ClientStorage upon connecting (see
-        testConnection() and doAuth() for details).
-        """
-
-        log2("%s (pid=%d) created %s/%s for storage: %r" %
-             (self.__class__.__name__,
-              os.getpid(),
-              read_only and "RO" or "RW",
-              read_only_fallback and "fallback" or "normal",
-              storage))
-
-        if debug:
-            log2("ClientStorage(): debug argument is no longer used")
-
-        # wait defaults to True, but wait_for_server_on_startup overrides
-        # if not None
-        if wait_for_server_on_startup is not None:
-            if wait is not None and wait != wait_for_server_on_startup:
-                log2("ClientStorage(): conflicting values for wait and "
-                     "wait_for_server_on_startup; wait prevails",
-                     level=logging.WARNING)
-            else:
-                log2("ClientStorage(): wait_for_server_on_startup "
-                     "is deprecated; please use wait instead")
-                wait = wait_for_server_on_startup
-        elif wait is None:
-            wait = 1
-
-        self._addr = addr # For tests
-
-        # A ZEO client can run in disconnected mode, using data from
-        # its cache, or in connected mode.  Several instance variables
-        # are related to whether the client is connected.
-
-        # _server: All method calls are invoked through the server
-        #    stub.  When not connect, set to disconnected_stub an
-        #    object that raises ClientDisconnected errors.
-
-        # _ready: A threading Event that is set only if _server
-        #    is set to a real stub.
-
-        # _connection: The current zrpc connection or None.
-
-        # _connection is set as soon as a connection is established,
-        # but _server is set only after cache verification has finished
-        # and clients can safely use the server.  _pending_server holds
-        # a server stub while it is being verified.
-
-        self._server = disconnected_stub
-        self._connection = None
-        self._pending_server = None
-        self._ready = threading.Event()
-
-        # _is_read_only stores the constructor argument
-        self._is_read_only = read_only
-        # _conn_is_read_only stores the status of the current connection
-        self._conn_is_read_only = 0
-        self._storage = storage
-        self._read_only_fallback = read_only_fallback
-        self._username = username
-        self._password = password
-        self._realm = realm
-
-        # Flag tracking disconnections in the middle of a transaction.  This
-        # is reset in tpc_begin() and set in notifyDisconnected().
-        self._midtxn_disconnect = 0
-
-        # _server_addr is used by sortKey()
-        self._server_addr = None
-        self._tfile = None
-        self._pickler = None
-
-        self._info = {'length': 0, 'size': 0, 'name': 'ZEO Client',
-                      'supportsUndo':0, 'supportsVersions': 0,
-                      'supportsTransactionalUndo': 0}
-
-        self._tbuf = self.TransactionBufferClass()
-        self._db = None
-        self._ltid = None # the last committed transaction
-
-        # _serials: stores (oid, serialno) as returned by server
-        # _seriald: _check_serials() moves from _serials to _seriald,
-        #           which maps oid to serialno
-
-        # TODO:  If serial number matches transaction id, then there is
-        # no need to have all this extra infrastructure for handling
-        # serial numbers.  The vote call can just return the tid.
-        # If there is a conflict error, we can't have a special method
-        # called just to propagate the error.
-        self._serials = []
-        self._seriald = {}
-
-        self.__name__ = name or str(addr) # Standard convention for storages
-
-        # A ClientStorage only allows one thread to commit at a time.
-        # Mutual exclusion is achieved using _tpc_cond, which
-        # protects _transaction.  A thread that wants to assign to
-        # self._transaction must acquire _tpc_cond first.  A thread
-        # that decides it's done with a transaction (whether via success
-        # or failure) must set _transaction to None and do
-        # _tpc_cond.notify() before releasing _tpc_cond.
-        self._tpc_cond = threading.Condition()
-        self._transaction = None
-
-        # Prevent multiple new_oid calls from going out.  The _oids
-        # variable should only be modified while holding the
-        # _oid_lock.
-        self._oid_lock = threading.Lock()
-        self._oids = [] # Object ids retrieved from new_oids()
-
-        # load() and tpc_finish() must be serialized to guarantee
-        # that cache modifications from each occur atomically.
-        # It also prevents multiple load calls occuring simultaneously,
-        # which simplifies the cache logic.
-        self._load_lock = threading.Lock()
-        # _load_oid and _load_status are protected by _lock
-        self._load_oid = None
-        self._load_status = None
-
-        # Can't read data in one thread while writing data
-        # (tpc_finish) in another thread.  In general, the lock
-        # must prevent access to the cache while _update_cache
-        # is executing.
-        self._lock = threading.Lock()
-
-        # Decide whether to use non-temporary files
-        if client is not None:
-            dir = var or os.getcwd()
-            cache_path = os.path.join(dir, "%s-%s.zec" % (client, storage))
-        else:
-            cache_path = None
-        self._cache = self.ClientCacheClass(cache_path, size=cache_size)
-        # TODO:  maybe there's a better time to open the cache?  Unclear.
-        self._cache.open()
-
-        self._rpc_mgr = self.ConnectionManagerClass(addr, self,
-                                                    tmin=min_disconnect_poll,
-                                                    tmax=max_disconnect_poll)
-
-        if wait:
-            self._wait(wait_timeout)
-        else:
-            # attempt_connect() will make an attempt that doesn't block
-            # "too long," for a very vague notion of too long.  If that
-            # doesn't succeed, call connect() to start a thread.
-            if not self._rpc_mgr.attempt_connect():
-                self._rpc_mgr.connect()
-
-    def _wait(self, timeout=None):
-        if timeout is not None:
-            deadline = time.time() + timeout
-            log2("Setting deadline to %f" % deadline, level=BLATHER)
-        else:
-            deadline = None
-        # Wait for a connection to be established.
-        self._rpc_mgr.connect(sync=1)
-        # When a synchronous connect() call returns, there is
-        # a valid _connection object but cache validation may
-        # still be going on.  This code must wait until validation
-        # finishes, but if the connection isn't a zrpc async
-        # connection it also needs to poll for input.
-        if self._connection.is_async():
-            while 1:
-                self._ready.wait(30)
-                if self._ready.isSet():
-                    break
-                if timeout and time.time() > deadline:
-                    log2("Timed out waiting for connection",
-                         level=logging.WARNING)
-                    break
-                log2("Waiting for cache verification to finish")
-        else:
-            self._wait_sync(deadline)
-
-    def _wait_sync(self, deadline=None):
-        # Log no more than one "waiting" message per LOG_THROTTLE seconds.
-        LOG_THROTTLE = 300 # 5 minutes
-        next_log_time = time.time()
-
-        while not self._ready.isSet():
-            now = time.time()
-            if deadline and now > deadline:
-                log2("Timed out waiting for connection", level=logging.WARNING)
-                break
-            if now >= next_log_time:
-                log2("Waiting for cache verification to finish")
-                next_log_time = now + LOG_THROTTLE
-            if self._connection is None:
-                # If the connection was closed while we were
-                # waiting for it to become ready, start over.
-                if deadline is None:
-                    timeout = None
-                else:
-                    timeout = deadline - now
-                return self._wait(timeout)
-            # No mainloop ia running, so we need to call something fancy to
-            # handle asyncore events.
-            self._connection.pending(30)
-
-    def close(self):
-        """Storage API: finalize the storage, releasing external resources."""
-        self._tbuf.close()
-        if self._cache is not None:
-            self._cache.close()
-            self._cache = None
-        if self._rpc_mgr is not None:
-            self._rpc_mgr.close()
-            self._rpc_mgr = None
-
-    def registerDB(self, db, limit):
-        """Storage API: register a database for invalidation messages.
-
-        This is called by ZODB.DB (and by some tests).
-
-        The storage isn't really ready to use until after this call.
-        """
-        self._db = db
-
-    def is_connected(self):
-        """Return whether the storage is currently connected to a server."""
-        # This function is used by clients, so we only report that a
-        # connection exists when the connection is ready to use.
-        return self._ready.isSet()
-
-    def sync(self):
-        """Handle any pending invalidation messages.
-
-        This is called by the sync method in ZODB.Connection.
-        """
-        # If there is no connection, return immediately.  Technically,
-        # there are no pending invalidations so they are all handled.
-        # There doesn't seem to be much benefit to raising an exception.
-
-        cn = self._connection
-        if cn is not None:
-            cn.pending()
-
-    def doAuth(self, protocol, stub):
-        if not (self._username and self._password):
-            raise AuthError, "empty username or password"
-
-        module = get_module(protocol)
-        if not module:
-            log2("%s: no such an auth protocol: %s" %
-                 (self.__class__.__name__, protocol), level=logging.WARNING)
-            return
-
-        storage_class, client, db_class = module
-
-        if not client:
-            log2("%s: %s isn't a valid protocol, must have a Client class" %
-                 (self.__class__.__name__, protocol), level=logging.WARNING)
-            raise AuthError, "invalid protocol"
-
-        c = client(stub)
-
-        # Initiate authentication, returns boolean specifying whether OK
-        return c.start(self._username, self._realm, self._password)
-
-    def testConnection(self, conn):
-        """Internal: test the given connection.
-
-        This returns:   1 if the connection is an optimal match,
-                        0 if it is a suboptimal but acceptable match.
-        It can also raise DisconnectedError or ReadOnlyError.
-
-        This is called by ZEO.zrpc.ConnectionManager to decide which
-        connection to use in case there are multiple, and some are
-        read-only and others are read-write.
-
-        This works by calling register() on the server.  In read-only
-        mode, register() is called with the read_only flag set.  In
-        writable mode and in read-only fallback mode, register() is
-        called with the read_only flag cleared.  In read-only fallback
-        mode only, if the register() call raises ReadOnlyError, it is
-        retried with the read-only flag set, and if this succeeds,
-        this is deemed a suboptimal match.  In all other cases, a
-        succeeding register() call is deemed an optimal match, and any
-        exception raised by register() is passed through.
-        """
-        log2("Testing connection %r" % conn)
-        # TODO:  Should we check the protocol version here?
-        self._conn_is_read_only = 0
-        stub = self.StorageServerStubClass(conn)
-
-        auth = stub.getAuthProtocol()
-        log2("Server authentication protocol %r" % auth)
-        if auth:
-            skey = self.doAuth(auth, stub)
-            if skey:
-                log2("Client authentication successful")
-                conn.setSessionKey(skey)
-            else:
-                log2("Authentication failed")
-                raise AuthError, "Authentication failed"
-
-        try:
-            stub.register(str(self._storage), self._is_read_only)
-            return 1
-        except POSException.ReadOnlyError:
-            if not self._read_only_fallback:
-                raise
-            log2("Got ReadOnlyError; trying again with read_only=1")
-            stub.register(str(self._storage), read_only=1)
-            self._conn_is_read_only = 1
-            return 0
-
-    def notifyConnected(self, conn):
-        """Internal: start using the given connection.
-
-        This is called by ConnectionManager after it has decided which
-        connection should be used.
-        """
-        if self._cache is None:
-            # the storage was closed, but the connect thread called
-            # this method before it was stopped.
-            return
-
-        # TODO:  report whether we get a read-only connection.
-        if self._connection is not None:
-            reconnect = 1
-        else:
-            reconnect = 0
-        self.set_server_addr(conn.get_addr())
-
-        # If we are upgrading from a read-only fallback connection,
-        # we must close the old connection to prevent it from being
-        # used while the cache is verified against the new connection.
-        if self._connection is not None:
-            self._connection.close()
-        self._connection = conn
-
-        if reconnect:
-            log2("Reconnected to storage: %s" % self._server_addr)
-        else:
-            log2("Connected to storage: %s" % self._server_addr)
-
-        stub = self.StorageServerStubClass(conn)
-        self._oids = []
-        self._info.update(stub.get_info())
-        self.verify_cache(stub)
-        if not conn.is_async():
-            log2("Waiting for cache verification to finish")
-            self._wait_sync()
-        self._handle_extensions()
-
-    def _handle_extensions(self):
-        for name in self.getExtensionMethods().keys():
-            if not hasattr(self, name):
-                setattr(self, name, self._server.extensionMethod(name))
-
-    def set_server_addr(self, addr):
-        # Normalize server address and convert to string
-        if isinstance(addr, types.StringType):
-            self._server_addr = addr
-        else:
-            assert isinstance(addr, types.TupleType)
-            # If the server is on a remote host, we need to guarantee
-            # that all clients used the same name for the server.  If
-            # they don't, the sortKey() may be different for each client.
-            # The best solution seems to be the official name reported
-            # by gethostbyaddr().
-            host = addr[0]
-            try:
-                canonical, aliases, addrs = socket.gethostbyaddr(host)
-            except socket.error, err:
-                log2("Error resolving host: %s (%s)" % (host, err),
-                     level=BLATHER)
-                canonical = host
-            self._server_addr = str((canonical, addr[1]))
-
-    def sortKey(self):
-        # If the client isn't connected to anything, it can't have a
-        # valid sortKey().  Raise an error to stop the transaction early.
-        if self._server_addr is None:
-            raise ClientDisconnected
-        else:
-            return '%s:%s' % (self._storage, self._server_addr)
-
-    def verify_cache(self, server):
-        """Internal routine called to verify the cache.
-
-        The return value (indicating which path we took) is used by
-        the test suite.
-        """
-
-        # If verify_cache() finishes the cache verification process,
-        # it should set self._server.  If it goes through full cache
-        # verification, then endVerify() should self._server.
-
-        last_inval_tid = self._cache.getLastTid()
-        if last_inval_tid is not None:
-            ltid = server.lastTransaction()
-            if ltid == last_inval_tid:
-                log2("No verification necessary (last_inval_tid up-to-date)")
-                self._server = server
-                self._ready.set()
-                return "no verification"
-
-            # log some hints about last transaction
-            log2("last inval tid: %r %s\n"
-                 % (last_inval_tid, tid2time(last_inval_tid)))
-            log2("last transaction: %r %s" %
-                 (ltid, ltid and tid2time(ltid)))
-
-            pair = server.getInvalidations(last_inval_tid)
-            if pair is not None:
-                log2("Recovering %d invalidations" % len(pair[1]))
-                self.invalidateTransaction(*pair)
-                self._server = server
-                self._ready.set()
-                return "quick verification"
-
-        log2("Verifying cache")
-        # setup tempfile to hold zeoVerify results
-        self._tfile = tempfile.TemporaryFile(suffix=".inv")
-        self._pickler = cPickle.Pickler(self._tfile, 1)
-        self._pickler.fast = 1 # Don't use the memo
-
-        # TODO:  should batch these operations for efficiency; would need
-        # to acquire lock ...
-        for oid, tid, version in self._cache.contents():
-            server.verify(oid, version, tid)
-        self._pending_server = server
-        server.endZeoVerify()
-        return "full verification"
-
-    ### Is there a race condition between notifyConnected and
-    ### notifyDisconnected? In Particular, what if we get
-    ### notifyDisconnected in the middle of notifyConnected?
-    ### The danger is that we'll proceed as if we were connected
-    ### without worrying if we were, but this would happen any way if
-    ### notifyDisconnected had to get the instance lock.  There's
-    ### nothing to gain by getting the instance lock.
-
-    def notifyDisconnected(self):
-        """Internal: notify that the server connection was terminated.
-
-        This is called by ConnectionManager when the connection is
-        closed or when certain problems with the connection occur.
-        """
-        log2("Disconnected from storage: %s" % repr(self._server_addr))
-        self._connection = None
-        self._ready.clear()
-        self._server = disconnected_stub
-        self._midtxn_disconnect = 1
-
-    def __len__(self):
-        """Return the size of the storage."""
-        # TODO:  Is this method used?
-        return self._info['length']
-
-    def getName(self):
-        """Storage API: return the storage name as a string.
-
-        The return value consists of two parts: the name as determined
-        by the name and addr argments to the ClientStorage
-        constructor, and the string 'connected' or 'disconnected' in
-        parentheses indicating whether the storage is (currently)
-        connected.
-        """
-        return "%s (%s)" % (
-            self.__name__,
-            self.is_connected() and "connected" or "disconnected")
-
-    def getSize(self):
-        """Storage API: an approximate size of the database, in bytes."""
-        return self._info['size']
-
-    def getExtensionMethods(self):
-        """getExtensionMethods
-
-        This returns a dictionary whose keys are names of extra methods
-        provided by this storage. Storage proxies (such as ZEO) should
-        call this method to determine the extra methods that they need
-        to proxy in addition to the standard storage methods.
-        Dictionary values should be None; this will be a handy place
-        for extra marshalling information, should we need it
-        """
-        return self._info.get('extensionMethods', {})
-
-    def supportsUndo(self):
-        """Storage API: return whether we support undo."""
-        return self._info['supportsUndo']
-
-    def supportsVersions(self):
-        """Storage API: return whether we support versions."""
-        return self._info['supportsVersions']
-
-    def supportsTransactionalUndo(self):
-        """Storage API: return whether we support transactional undo."""
-        return self._info['supportsTransactionalUndo']
-
-    def isReadOnly(self):
-        """Storage API: return whether we are in read-only mode."""
-        if self._is_read_only:
-            return 1
-        else:
-            # If the client is configured for a read-write connection
-            # but has a read-only fallback connection, _conn_is_read_only
-            # will be True.
-            return self._conn_is_read_only
-
-    def _check_trans(self, trans):
-        """Internal helper to check a transaction argument for sanity."""
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if self._transaction is not trans:
-            raise POSException.StorageTransactionError(self._transaction,
-                                                       trans)
-
-    def abortVersion(self, version, txn):
-        """Storage API: clear any changes made by the given version."""
-        self._check_trans(txn)
-        tid, oids = self._server.abortVersion(version, id(txn))
-        # When a version aborts, invalidate the version and
-        # non-version data.  The non-version data should still be
-        # valid, but older versions of ZODB will change the
-        # non-version serialno on an abort version.  With those
-        # versions of ZODB, you'd get a conflict error if you tried to
-        # commit a transaction with the cached data.
-
-        # If we could guarantee that ZODB gave the right answer,
-        # we could just invalidate the version data.
-        for oid in oids:
-            self._tbuf.invalidate(oid, '')
-        return tid, oids
-
-    def commitVersion(self, source, destination, txn):
-        """Storage API: commit the source version in the destination."""
-        self._check_trans(txn)
-        tid, oids = self._server.commitVersion(source, destination, id(txn))
-        if destination:
-            # just invalidate our version data
-            for oid in oids:
-                self._tbuf.invalidate(oid, source)
-        else:
-            # destination is "", so invalidate version and non-version
-            for oid in oids:
-                self._tbuf.invalidate(oid, "")
-        return tid, oids
-
-    def history(self, oid, version, length=1):
-        """Storage API: return a sequence of HistoryEntry objects.
-
-        This does not support the optional filter argument defined by
-        the Storage API.
-        """
-        return self._server.history(oid, version, length)
-
-    def getSerial(self, oid):
-        """Storage API: return current serial number for oid."""
-        return self._server.getSerial(oid)
-
-    def loadSerial(self, oid, serial):
-        """Storage API: load a historical revision of an object."""
-        return self._server.loadSerial(oid, serial)
-
-    def load(self, oid, version):
-        """Storage API: return the data for a given object.
-
-        This returns the pickle data and serial number for the object
-        specified by the given object id and version, if they exist;
-        otherwise a KeyError is raised.
-        """
-        return self.loadEx(oid, version)[:2]
-
-    def loadEx(self, oid, version):
-        self._lock.acquire()    # for atomic processing of invalidations
-        try:
-            t = self._cache.load(oid, version)
-            if t:
-                return t
-        finally:
-            self._lock.release()
-
-        if self._server is None:
-            raise ClientDisconnected()
-
-        self._load_lock.acquire()
-        try:
-            self._lock.acquire()
-            try:
-                self._load_oid = oid
-                self._load_status = 1
-            finally:
-                self._lock.release()
-
-            data, tid, ver = self._server.loadEx(oid, version)
-
-            self._lock.acquire()    # for atomic processing of invalidations
-            try:
-                if self._load_status:
-                    self._cache.store(oid, ver, tid, None, data)
-                self._load_oid = None
-            finally:
-                self._lock.release()
-        finally:
-            self._load_lock.release()
-
-        return data, tid, ver
-
-    def loadBefore(self, oid, tid):
-        self._lock.acquire()
-        try:
-            t = self._cache.loadBefore(oid, tid)
-            if t is not None:
-                return t
-        finally:
-            self._lock.release()
-
-        t = self._server.loadBefore(oid, tid)
-        if t is None:
-            return None
-        data, start, end = t
-        if end is None:
-            # This method should not be used to get current data.  It
-            # doesn't use the _load_lock, so it is possble to overlap
-            # this load with an invalidation for the same object.
-
-            # If we call again, we're guaranteed to get the
-            # post-invalidation data.  But if the data is still
-            # current, we'll still get end == None.
-
-            # Maybe the best thing to do is to re-run the test with
-            # the load lock in the case.  That's slow performance, but
-            # I don't think real application code will ever care about
-            # it.
-
-            return data, start, end
-        self._lock.acquire()
-        try:
-            self._cache.store(oid, "", start, end, data)
-        finally:
-            self._lock.release()
-
-        return data, start, end
-
-    def modifiedInVersion(self, oid):
-        """Storage API: return the version, if any, that modfied an object.
-
-        If no version modified the object, return an empty string.
-        """
-        self._lock.acquire()
-        try:
-            v = self._cache.modifiedInVersion(oid)
-            if v is not None:
-                return v
-        finally:
-            self._lock.release()
-        return self._server.modifiedInVersion(oid)
-
-    def new_oid(self):
-        """Storage API: return a new object identifier."""
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        # avoid multiple oid requests to server at the same time
-        self._oid_lock.acquire()
-        try:
-            if not self._oids:
-                self._oids = self._server.new_oids()
-                self._oids.reverse()
-            return self._oids.pop()
-        finally:
-            self._oid_lock.release()
-
-    def pack(self, t=None, referencesf=None, wait=1, days=0):
-        """Storage API: pack the storage.
-
-        Deviations from the Storage API: the referencesf argument is
-        ignored; two additional optional arguments wait and days are
-        provided:
-
-        wait -- a flag indicating whether to wait for the pack to
-            complete; defaults to true.
-
-        days -- a number of days to subtract from the pack time;
-            defaults to zero.
-        """
-        # TODO: Is it okay that read-only connections allow pack()?
-        # rf argument ignored; server will provide its own implementation
-        if t is None:
-            t = time.time()
-        t = t - (days * 86400)
-        return self._server.pack(t, wait)
-
-    def _check_serials(self):
-        """Internal helper to move data from _serials to _seriald."""
-        # serials are always going to be the same, the only
-        # question is whether an exception has been raised.
-        if self._serials:
-            l = len(self._serials)
-            r = self._serials[:l]
-            del self._serials[:l]
-            for oid, s in r:
-                if isinstance(s, Exception):
-                    raise s
-                self._seriald[oid] = s
-            return r
-
-    def store(self, oid, serial, data, version, txn):
-        """Storage API: store data for an object."""
-        self._check_trans(txn)
-        self._server.storea(oid, serial, data, version, id(txn))
-        self._tbuf.store(oid, version, data)
-        return self._check_serials()
-
-    def tpc_vote(self, txn):
-        """Storage API: vote on a transaction."""
-        if txn is not self._transaction:
-            return
-        self._server.vote(id(txn))
-        return self._check_serials()
-
-    def tpc_begin(self, txn, tid=None, status=' '):
-        """Storage API: begin a transaction."""
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        self._tpc_cond.acquire()
-        self._midtxn_disconnect = 0
-        while self._transaction is not None:
-            # It is allowable for a client to call two tpc_begins in a
-            # row with the same transaction, and the second of these
-            # must be ignored.
-            if self._transaction == txn:
-                self._tpc_cond.release()
-                return
-            self._tpc_cond.wait(30)
-        self._transaction = txn
-        self._tpc_cond.release()
-
-        try:
-            self._server.tpc_begin(id(txn), txn.user, txn.description,
-                                   txn._extension, tid, status)
-        except:
-            # Client may have disconnected during the tpc_begin().
-            if self._server is not disconnected_stub:
-                self.end_transaction()
-            raise
-
-        self._tbuf.clear()
-        self._seriald.clear()
-        del self._serials[:]
-
-    def end_transaction(self):
-        """Internal helper to end a transaction."""
-        # the right way to set self._transaction to None
-        # calls notify() on _tpc_cond in case there are waiting threads
-        self._tpc_cond.acquire()
-        self._transaction = None
-        self._tpc_cond.notify()
-        self._tpc_cond.release()
-
-    def lastTransaction(self):
-        return self._cache.getLastTid()
-
-    def tpc_abort(self, txn):
-        """Storage API: abort a transaction."""
-        if txn is not self._transaction:
-            return
-        try:
-            # Caution:  Are there any exceptions that should prevent an
-            # abort from occurring?  It seems wrong to swallow them
-            # all, yet you want to be sure that other abort logic is
-            # executed regardless.
-            try:
-                self._server.tpc_abort(id(txn))
-            except ClientDisconnected:
-                log2("ClientDisconnected in tpc_abort() ignored",
-                     level=BLATHER)
-        finally:
-            self._tbuf.clear()
-            self._seriald.clear()
-            del self._serials[:]
-            self.end_transaction()
-
-    def tpc_finish(self, txn, f=None):
-        """Storage API: finish a transaction."""
-        if txn is not self._transaction:
-            return
-        self._load_lock.acquire()
-        try:
-            if self._midtxn_disconnect:
-                raise ClientDisconnected(
-                       'Calling tpc_finish() on a disconnected transaction')
-
-            # The calls to tpc_finish() and _update_cache() should
-            # never run currently with another thread, because the
-            # tpc_cond condition variable prevents more than one
-            # thread from calling tpc_finish() at a time.
-            tid = self._server.tpc_finish(id(txn))
-            self._lock.acquire()  # for atomic processing of invalidations
-            try:
-                self._update_cache(tid)
-                if f is not None:
-                    f(tid)
-            finally:
-                self._lock.release()
-
-            r = self._check_serials()
-            assert r is None or len(r) == 0, "unhandled serialnos: %s" % r
-        finally:
-            self._load_lock.release()
-            self.end_transaction()
-
-    def _update_cache(self, tid):
-        """Internal helper to handle objects modified by a transaction.
-
-        This iterates over the objects in the transaction buffer and
-        update or invalidate the cache.
-        """
-        # Must be called with _lock already acquired.
-
-        # Not sure why _update_cache() would be called on a closed storage.
-        if self._cache is None:
-            return
-
-        for oid, version, data in self._tbuf:
-            self._cache.invalidate(oid, version, tid)
-            # If data is None, we just invalidate.
-            if data is not None:
-                s = self._seriald[oid]
-                if s != ResolvedSerial:
-                    assert s == tid, (s, tid)
-                    self._cache.store(oid, version, s, None, data)
-        self._tbuf.clear()
-
-    def undo(self, trans_id, txn):
-        """Storage API: undo a transaction.
-
-        This is executed in a transactional context.  It has no effect
-        until the transaction is committed.  It can be undone itself.
-
-        Zope uses this to implement undo unless it is not supported by
-        a storage.
-        """
-        self._check_trans(txn)
-        tid, oids = self._server.undo(trans_id, id(txn))
-        for oid in oids:
-            self._tbuf.invalidate(oid, '')
-        return tid, oids
-
-    def undoInfo(self, first=0, last=-20, specification=None):
-        """Storage API: return undo information."""
-        return self._server.undoInfo(first, last, specification)
-
-    def undoLog(self, first=0, last=-20, filter=None):
-        """Storage API: return a sequence of TransactionDescription objects.
-
-        The filter argument should be None or left unspecified, since
-        it is impossible to pass the filter function to the server to
-        be executed there.  If filter is not None, an empty sequence
-        is returned.
-        """
-        if filter is not None:
-            return []
-        return self._server.undoLog(first, last)
-
-    def versionEmpty(self, version):
-        """Storage API: return whether the version has no transactions."""
-        return self._server.versionEmpty(version)
-
-    def versions(self, max=None):
-        """Storage API: return a sequence of versions in the storage."""
-        return self._server.versions(max)
-
-    # Below are methods invoked by the StorageServer
-
-    def serialnos(self, args):
-        """Server callback to pass a list of changed (oid, serial) pairs."""
-        self._serials.extend(args)
-
-    def info(self, dict):
-        """Server callback to update the info dictionary."""
-        self._info.update(dict)
-
-    def invalidateVerify(self, args):
-        """Server callback to invalidate an (oid, version) pair.
-
-        This is called as part of cache validation.
-        """
-        # Invalidation as result of verify_cache().
-        # Queue an invalidate for the end the verification procedure.
-        if self._pickler is None:
-            # This should never happen.  TODO:  assert it doesn't, or log
-            # if it does.
-            return
-        self._pickler.dump(args)
-
-    def _process_invalidations(self, invs):
-        # Invalidations are sent by the ZEO server as a sequence of
-        # oid, version pairs.  The DB's invalidate() method expects a
-        # dictionary of oids.
-
-        self._lock.acquire()
-        try:
-            # versions maps version names to dictionary of invalidations
-            versions = {}
-            for oid, version, tid in invs:
-                if oid == self._load_oid:
-                    self._load_status = 0
-                self._cache.invalidate(oid, version, tid)
-                versions.setdefault((version, tid), {})[oid] = tid
-
-            if self._db is not None:
-                for (version, tid), d in versions.items():
-                    self._db.invalidate(tid, d, version=version)
-        finally:
-            self._lock.release()
-
-    def endVerify(self):
-        """Server callback to signal end of cache validation."""
-        if self._pickler is None:
-            return
-        # write end-of-data marker
-        self._pickler.dump((None, None))
-        self._pickler = None
-        self._tfile.seek(0)
-        f = self._tfile
-        self._tfile = None
-        self._process_invalidations(InvalidationLogIterator(f))
-        f.close()
-
-        log2("endVerify finishing")
-        self._server = self._pending_server
-        self._ready.set()
-        self._pending_conn = None
-        log2("endVerify finished")
-
-    def invalidateTransaction(self, tid, args):
-        """Invalidate objects modified by tid."""
-        self._lock.acquire()
-        try:
-            self._cache.setLastTid(tid)
-        finally:
-            self._lock.release()
-        if self._pickler is not None:
-            log2("Transactional invalidation during cache verification",
-                 level=BLATHER)
-            for t in args:
-                self._pickler.dump(t)
-            return
-        self._process_invalidations([(oid, version, tid)
-                                     for oid, version in args])
-
-    # The following are for compatibility with protocol version 2.0.0
-
-    def invalidateTrans(self, args):
-        return self.invalidateTransaction(None, args)
-
-    invalidate = invalidateVerify
-    end = endVerify
-    Invalidate = invalidateTrans
-
-def InvalidationLogIterator(fileobj):
-    unpickler = cPickle.Unpickler(fileobj)
-    while 1:
-        oid, version = unpickler.load()
-        if oid is None:
-            break
-        yield oid, version, None
diff --git a/branches/bug1734/src/ZEO/ClientStub.py b/branches/bug1734/src/ZEO/ClientStub.py
deleted file mode 100644
index 912f743f..00000000
--- a/branches/bug1734/src/ZEO/ClientStub.py
+++ /dev/null
@@ -1,62 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""RPC stubs for interface exported by ClientStorage."""
-
-class ClientStorage:
-
-    """An RPC stub class for the interface exported by ClientStorage.
-
-    This is the interface presented by ClientStorage to the
-    StorageServer; i.e. the StorageServer calls these methods and they
-    are executed in the ClientStorage.
-
-    See the ClientStorage class for documentation on these methods.
-
-    It is currently important that all methods here are asynchronous
-    (meaning they don't have a return value and the caller doesn't
-    wait for them to complete), *and* that none of them cause any
-    calls from the client to the storage.  This is due to limitations
-    in the zrpc subpackage.
-
-    The on-the-wire names of some of the methods don't match the
-    Python method names.  That's because the on-the-wire protocol was
-    fixed for ZEO 2 and we don't want to change it.  There are some
-    aliases in ClientStorage.py to make up for this.
-    """
-
-    def __init__(self, rpc):
-        """Constructor.
-
-        The argument is a connection: an instance of the
-        zrpc.connection.Connection class.
-        """
-        self.rpc = rpc
-
-    def beginVerify(self):
-        self.rpc.callAsync('beginVerify')
-
-    def invalidateVerify(self, args):
-        self.rpc.callAsync('invalidateVerify', args)
-
-    def endVerify(self):
-        self.rpc.callAsync('endVerify')
-
-    def invalidateTransaction(self, tid, args):
-        self.rpc.callAsyncNoPoll('invalidateTransaction', tid, args)
-
-    def serialnos(self, arg):
-        self.rpc.callAsync('serialnos', arg)
-
-    def info(self, arg):
-        self.rpc.callAsync('info', arg)
diff --git a/branches/bug1734/src/ZEO/CommitLog.py b/branches/bug1734/src/ZEO/CommitLog.py
deleted file mode 100644
index 18386689..00000000
--- a/branches/bug1734/src/ZEO/CommitLog.py
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Log a transaction's commit info during two-phase commit.
-
-A storage server allows multiple clients to commit transactions, but
-must serialize them as the actually execute at the server.  The
-concurrent commits are achieved by logging actions up until the
-tpc_vote().  At that point, the entire transaction is committed on the
-real storage.
-"""
-import cPickle
-import tempfile
-
-class CommitLog:
-
-    def __init__(self):
-        self.file = tempfile.TemporaryFile(suffix=".log")
-        self.pickler = cPickle.Pickler(self.file, 1)
-        self.pickler.fast = 1
-        self.stores = 0
-        self.read = 0
-
-    def size(self):
-        return self.file.tell()
-
-    def store(self, oid, serial, data, version):
-        self.pickler.dump((oid, serial, data, version))
-        self.stores += 1
-
-    def get_loader(self):
-        self.read = 1
-        self.file.seek(0)
-        return self.stores, cPickle.Unpickler(self.file)
-
-    def close(self):
-        if self.file:
-            self.file.close()
-            self.file = None
diff --git a/branches/bug1734/src/ZEO/DEPENDENCIES.cfg b/branches/bug1734/src/ZEO/DEPENDENCIES.cfg
deleted file mode 100644
index d5fb80c8..00000000
--- a/branches/bug1734/src/ZEO/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-BTrees
-ThreadedAsync
-ZConfig
-ZODB
-persistent
-transaction
-zdaemon
diff --git a/branches/bug1734/src/ZEO/DebugServer.py b/branches/bug1734/src/ZEO/DebugServer.py
deleted file mode 100644
index 12bf6940..00000000
--- a/branches/bug1734/src/ZEO/DebugServer.py
+++ /dev/null
@@ -1,89 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A debugging version of the server that records network activity."""
-
-import struct
-import time
-
-from ZEO.StorageServer import StorageServer, log
-from ZEO.zrpc.server import ManagedServerConnection
-
-# a bunch of codes
-NEW_CONN = 1
-CLOSE_CONN = 2
-DATA = 3
-ERROR = 4
-
-class DebugManagedServerConnection(ManagedServerConnection):
-
-    def __init__(self, sock, addr, obj, mgr):
-        # mgr is the DebugServer instance
-        self.mgr = mgr
-        self.__super_init(sock, addr, obj)
-        record_id = mgr._record_connection(addr)
-        self._record = lambda code, data: mgr._record(record_id, code, data)
-        self.obj.notifyConnected(self)
-
-    def close(self):
-        self._record(CLOSE_CONN, "")
-        ManagedServerConnection.close(self)
-
-    # override the lowest-level of asyncore's connection
-
-    def recv(self, buffer_size):
-        try:
-            data = self.socket.recv(buffer_size)
-            if not data:
-                # a closed connection is indicated by signaling
-                # a read condition, and having recv() return 0.
-                self.handle_close()
-                return ''
-            else:
-                self._record(DATA, data)
-                return data
-        except socket.error, why:
-            # winsock sometimes throws ENOTCONN
-            self._record(ERROR, why)
-            if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
-                self.handle_close()
-                return ''
-            else:
-                raise socket.error, why
-
-class DebugServer(StorageServer):
-
-    ZEOStorageClass = DebugZEOStorage
-    ManagedServerConnectionClass = DebugManagerConnection
-
-    def __init__(self, *args, **kwargs):
-        StorageServer.__init__(*args, **kwargs)
-        self._setup_record(kwargs["record"])
-        self._conn_counter = 1
-
-    def _setup_record(self, path):
-        try:
-            self._recordfile = open(path, "ab")
-        except IOError, msg:
-            self._recordfile = None
-            log("failed to open recordfile %s: %s" % (path, msg))
-
-    def _record_connection(self, addr):
-        cid = self._conn_counter
-        self._conn_counter += 1
-        self._record(cid, NEW_CONN, str(addr))
-        return cid
-
-    def _record(self, conn, code, data):
-        s = struct.pack(">iii", code, time.time(), len(data)) + data
-        self._recordfile.write(s)
diff --git a/branches/bug1734/src/ZEO/Exceptions.py b/branches/bug1734/src/ZEO/Exceptions.py
deleted file mode 100644
index cf96415d..00000000
--- a/branches/bug1734/src/ZEO/Exceptions.py
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Exceptions for ZEO."""
-
-from ZODB.POSException import StorageError
-
-class ClientStorageError(StorageError):
-    """An error occured in the ZEO Client Storage."""
-
-class UnrecognizedResult(ClientStorageError):
-    """A server call returned an unrecognized result."""
-
-class ClientDisconnected(ClientStorageError):
-    """The database storage is disconnected from the storage."""
-
-class AuthError(StorageError):
-    """The client provided invalid authentication credentials."""
diff --git a/branches/bug1734/src/ZEO/README.txt b/branches/bug1734/src/ZEO/README.txt
deleted file mode 100644
index 3b0f2963..00000000
--- a/branches/bug1734/src/ZEO/README.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-=======
-ZEO 2.0
-=======
-
-What's ZEO?
------------
-
-ZEO stands for Zope Enterprise Objects.  ZEO is an add-on for Zope
-that allows multiple processes to connect to a single ZODB storage.
-Those processes can live on different machines, but don't need to.
-ZEO 2 has many improvements over ZEO 1, and is incompatible with ZEO 1;
-if you upgrade an existing ZEO 1 installation, you must upgrade the
-server and all clients simultaneous.  If you received ZEO 2 as part of
-the ZODB 3 distribution, the ZEO 1 sources are provided in a separate
-directory (ZEO1).  Some documentation for ZEO is available in the ZODB 3
-package in the Doc subdirectory.  ZEO depends on the ZODB software; it
-can be used with the version of ZODB distributed with Zope 2.5.1 or
-later.  More information about ZEO can be found in the ZODB Wiki:
-
-    http://www.zope.org/Wikis/ZODB
-
-What's here?
-------------
-
-This list of filenames is mostly for ZEO developers::
-
- ClientCache.py          client-side cache implementation
- ClientStorage.py        client-side storage implementation
- ClientStub.py           RPC stubs for callbacks from server to client
- CommitLog.py            buffer used during two-phase commit on the server
- Exceptions.py           definitions of exceptions
- ICache.py               interface definition for the client-side cache
- ServerStub.py           RPC stubs for the server
- StorageServer.py        server-side storage implementation
- TransactionBuffer.py    buffer used for transaction data in the client
- __init__.py             near-empty file to make this directory a package
- simul.py                command-line tool to simulate cache behavior
- start.py                command-line tool to start the storage server
- stats.py                command-line tool to process client cache traces
- tests/                  unit tests and other test utilities
- util.py                 utilities used by the server startup tool
- version.txt             text file indicating the ZEO version
- zrpc/                   subpackage implementing Remote Procedure Call (RPC)
-
diff --git a/branches/bug1734/src/ZEO/SETUP.cfg b/branches/bug1734/src/ZEO/SETUP.cfg
deleted file mode 100644
index b2b9b440..00000000
--- a/branches/bug1734/src/ZEO/SETUP.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-script  runzeo.py
-script  zeoctl.py
-script  zeopasswd.py
-script  mkzeoinst.py
diff --git a/branches/bug1734/src/ZEO/ServerStub.py b/branches/bug1734/src/ZEO/ServerStub.py
deleted file mode 100644
index 00320a98..00000000
--- a/branches/bug1734/src/ZEO/ServerStub.py
+++ /dev/null
@@ -1,295 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""RPC stubs for interface exported by StorageServer."""
-
-##
-# ZEO storage server.
-# <p>
-# Remote method calls can be synchronous or asynchronous.  If the call
-# is synchronous, the client thread blocks until the call returns.  A
-# single client can only have one synchronous request outstanding.  If
-# several threads share a single client, threads other than the caller
-# will block only if the attempt to make another synchronous call.
-# An asynchronous call does not cause the client thread to block.  An
-# exception raised by an asynchronous method is logged on the server,
-# but is not returned to the client.
-
-class StorageServer:
-
-    """An RPC stub class for the interface exported by ClientStorage.
-
-    This is the interface presented by the StorageServer to the
-    ClientStorage; i.e. the ClientStorage calls these methods and they
-    are executed in the StorageServer.
-
-    See the StorageServer module for documentation on these methods,
-    with the exception of _update(), which is documented here.
-    """
-
-    def __init__(self, rpc):
-        """Constructor.
-
-        The argument is a connection: an instance of the
-        zrpc.connection.Connection class.
-        """
-        self.rpc = rpc
-        # Wait until we know what version the other side is using.
-        while rpc.peer_protocol_version is None:
-            rpc.pending()
-        if rpc.peer_protocol_version == 'Z200':
-            self.lastTransaction = lambda: None
-            self.getInvalidations = lambda tid: None
-            self.getAuthProtocol = lambda: None
-
-    def extensionMethod(self, name):
-        return ExtensionMethodWrapper(self.rpc, name).call
-
-    ##
-    # Register current connection with a storage and a mode.
-    # In effect, it is like an open call.
-    # @param storage_name a string naming the storage.  This argument
-    #        is primarily for backwards compatibility with servers
-    #        that supported multiple storages.
-    # @param read_only boolean
-    # @exception ValueError unknown storage_name or already registered
-    # @exception ReadOnlyError storage is read-only and a read-write
-    #            connectio was requested
-
-    def register(self, storage_name, read_only):
-        self.rpc.call('register', storage_name, read_only)
-
-    ##
-    # Return dictionary of meta-data about the storage.
-    # @defreturn dict
-
-    def get_info(self):
-        return self.rpc.call('get_info')
-
-    ##
-    # Check whether the server requires authentication.  Returns
-    # the name of the protocol.
-    # @defreturn string
-
-    def getAuthProtocol(self):
-        return self.rpc.call('getAuthProtocol')
-
-    ##
-    # Return id of the last committed transaction
-    # @defreturn string
-
-    def lastTransaction(self):
-        # Not in protocol version 2.0.0; see __init__()
-        return self.rpc.call('lastTransaction')
-
-    ##
-    # Return invalidations for all transactions after tid.
-    # @param tid transaction id
-    # @defreturn 2-tuple, (tid, list)
-    # @return tuple containing the last committed transaction
-    #         and a list of oids that were invalidated.  Returns
-    #         None and an empty list if the server does not have
-    #         the list of oids available.
-
-    def getInvalidations(self, tid):
-        # Not in protocol version 2.0.0; see __init__()
-        return self.rpc.call('getInvalidations', tid)
-
-    ##
-    # Check whether serial numbers s and sv are current for oid.
-    # If one or both of the serial numbers are not current, the
-    # server will make an asynchronous invalidateVerify() call.
-    # @param oid object id
-    # @param s serial number on non-version data
-    # @param sv serial number of version data or None
-    # @defreturn async
-
-    def zeoVerify(self, oid, s, sv):
-        self.rpc.callAsync('zeoVerify', oid, s, sv)
-
-    ##
-    # Check whether current serial number is valid for oid and version.
-    # If the serial number is not current, the server will make an
-    # asynchronous invalidateVerify() call.
-    # @param oid object id
-    # @param version name of version for oid
-    # @param serial client's current serial number
-    # @defreturn async
-
-    def verify(self, oid, version, serial):
-        self.rpc.callAsync('verify', oid, version, serial)
-
-    ##
-    # Signal to the server that cache verification is done.
-    # @defreturn async
-
-    def endZeoVerify(self):
-        self.rpc.callAsync('endZeoVerify')
-
-    ##
-    # Generate a new set of oids.
-    # @param n number of new oids to return
-    # @defreturn list
-    # @return list of oids
-
-    def new_oids(self, n=None):
-        if n is None:
-            return self.rpc.call('new_oids')
-        else:
-            return self.rpc.call('new_oids', n)
-
-    ##
-    # Pack the storage.
-    # @param t pack time
-    # @param wait optional, boolean.  If true, the call will not
-    #             return until the pack is complete.
-
-    def pack(self, t, wait=None):
-        if wait is None:
-            self.rpc.call('pack', t)
-        else:
-            self.rpc.call('pack', t, wait)
-
-    ##
-    # Return current data for oid.  Version data is returned if
-    # present.
-    # @param oid object id
-    # @defreturn 5-tuple
-    # @return 5-tuple, current non-version data, serial number,
-    #         version name, version data, version data serial number
-    # @exception KeyError if oid is not found
-
-    def zeoLoad(self, oid):
-        return self.rpc.call('zeoLoad', oid)
-
-    ##
-    # Return current data for oid along with tid if transaction that
-    # wrote the date.
-    # @param oid object id
-    # @param version string, name of version
-    # @defreturn 4-tuple
-    # @return data, serial number, transaction id, version,
-    #         where version is the name of the version the data came
-    #         from or "" for non-version data
-    # @exception KeyError if oid is not found
-
-    def loadEx(self, oid, version):
-        return self.rpc.call("loadEx", oid, version)
-
-    ##
-    # Return non-current data along with transaction ids that identify
-    # the lifetime of the specific revision.
-    # @param oid object id
-    # @param tid a transaction id that provides an upper bound on
-    #            the lifetime of the revision.  That is, loadBefore
-    #            returns the revision that was current before tid committed.
-    # @defreturn 4-tuple
-    # @return data, serial numbr, start transaction id, end transaction id
-
-    def loadBefore(self, oid, tid):
-        return self.rpc.call("loadBefore", oid, tid)
-
-    ##
-    # Storage new revision of oid.
-    # @param oid object id
-    # @param serial serial number that this transaction read
-    # @param data new data record for oid
-    # @param version name of version or ""
-    # @param id id of current transaction
-    # @defreturn async
-
-    def storea(self, oid, serial, data, version, id):
-        self.rpc.callAsync('storea', oid, serial, data, version, id)
-
-    ##
-    # Start two-phase commit for a transaction
-    # @param id id used by client to identify current transaction.  The
-    #        only purpose of this argument is to distinguish among multiple
-    #        threads using a single ClientStorage.
-    # @param user name of user committing transaction (can be "")
-    # @param description string containing transaction metadata (can be "")
-    # @param ext dictionary of extended metadata (?)
-    # @param tid optional explicit tid to pass to underlying storage
-    # @param status optional status character, e.g "p" for pack
-    # @defreturn async
-
-    def tpc_begin(self, id, user, descr, ext, tid, status):
-        return self.rpc.call('tpc_begin', id, user, descr, ext, tid, status)
-
-    def vote(self, trans_id):
-        return self.rpc.call('vote', trans_id)
-
-    def tpc_finish(self, id):
-        return self.rpc.call('tpc_finish', id)
-
-    def tpc_abort(self, id):
-        self.rpc.callAsync('tpc_abort', id)
-
-    def abortVersion(self, src, id):
-        return self.rpc.call('abortVersion', src, id)
-
-    def commitVersion(self, src, dest, id):
-        return self.rpc.call('commitVersion', src, dest, id)
-
-    def history(self, oid, version, length=None):
-        if length is None:
-            return self.rpc.call('history', oid, version)
-        else:
-            return self.rpc.call('history', oid, version, length)
-
-    def load(self, oid, version):
-        return self.rpc.call('load', oid, version)
-
-    def getSerial(self, oid):
-        return self.rpc.call('getSerial', oid)
-
-    def loadSerial(self, oid, serial):
-        return self.rpc.call('loadSerial', oid, serial)
-
-    def modifiedInVersion(self, oid):
-        return self.rpc.call('modifiedInVersion', oid)
-
-    def new_oid(self, last=None):
-        if last is None:
-            return self.rpc.call('new_oid')
-        else:
-            return self.rpc.call('new_oid', last)
-
-    def store(self, oid, serial, data, version, trans):
-        return self.rpc.call('store', oid, serial, data, version, trans)
-
-    def undo(self, trans_id, trans):
-        return self.rpc.call('undo', trans_id, trans)
-
-    def undoLog(self, first, last):
-        return self.rpc.call('undoLog', first, last)
-
-    def undoInfo(self, first, last, spec):
-        return self.rpc.call('undoInfo', first, last, spec)
-
-    def versionEmpty(self, vers):
-        return self.rpc.call('versionEmpty', vers)
-
-    def versions(self, max=None):
-        if max is None:
-            return self.rpc.call('versions')
-        else:
-            return self.rpc.call('versions', max)
-
-class ExtensionMethodWrapper:
-    def __init__(self, rpc, name):
-        self.rpc = rpc
-        self.name = name
-
-    def call(self, *a, **kwa):
-        return self.rpc.call(self.name, *a, **kwa)
diff --git a/branches/bug1734/src/ZEO/StorageServer.py b/branches/bug1734/src/ZEO/StorageServer.py
deleted file mode 100644
index 0d860d04..00000000
--- a/branches/bug1734/src/ZEO/StorageServer.py
+++ /dev/null
@@ -1,1015 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""The StorageServer class and the exception that it may raise.
-
-This server acts as a front-end for one or more real storages, like
-file storage or Berkeley storage.
-
-TODO:  Need some basic access control-- a declaration of the methods
-exported for invocation by the server.
-"""
-
-import asyncore
-import cPickle
-import os
-import sys
-import threading
-import time
-import logging
-
-import transaction
-
-from ZEO import ClientStub
-from ZEO.CommitLog import CommitLog
-from ZEO.monitor import StorageStats, StatsServer
-from ZEO.zrpc.server import Dispatcher
-from ZEO.zrpc.connection import ManagedServerConnection, Delay, MTDelay
-from ZEO.zrpc.trigger import trigger
-from ZEO.Exceptions import AuthError
-
-from ZODB.ConflictResolution import ResolvedSerial
-from ZODB.POSException import StorageError, StorageTransactionError
-from ZODB.POSException import TransactionError, ReadOnlyError, ConflictError
-from ZODB.serialize import referencesf
-from ZODB.utils import u64, oid_repr
-from ZODB.loglevels import BLATHER
-
-logger = logging.getLogger('ZEO.StorageServer')
-
-# TODO:  This used to say "ZSS", which is now implied in the logger name.
-# Can this be either set to str(os.getpid()) (if that makes sense) or removed?
-_label = "" # default label used for logging.
-
-def set_label():
-    """Internal helper to reset the logging label (e.g. after fork())."""
-    global _label
-    _label = "%s" % os.getpid()
-
-def log(message, level=logging.INFO, label=None, exc_info=False):
-    """Internal helper to log a message."""
-    label = label or _label
-    if label:
-        message = "(%s) %s" % (label, message)
-    logger.log(level, message, exc_info=exc_info)
-
-class StorageServerError(StorageError):
-    """Error reported when an unpickleable exception is raised."""
-
-class ZEOStorage:
-    """Proxy to underlying storage for a single remote client."""
-
-    # Classes we instantiate.  A subclass might override.
-    ClientStorageStubClass = ClientStub.ClientStorage
-
-    # A list of extension methods.  A subclass with extra methods
-    # should override.
-    extensions = []
-
-    def __init__(self, server, read_only=0, auth_realm=None):
-        self.server = server
-        # timeout and stats will be initialized in register()
-        self.timeout = None
-        self.stats = None
-        self.connection = None
-        self.client = None
-        self.storage = None
-        self.storage_id = "uninitialized"
-        self.transaction = None
-        self.read_only = read_only
-        self.locked = 0
-        self.verifying = 0
-        self.store_failed = 0
-        self.log_label = _label
-        self.authenticated = 0
-        self.auth_realm = auth_realm
-        # The authentication protocol may define extra methods.
-        self._extensions = {}
-        for func in self.extensions:
-            self._extensions[func.func_name] = None
-
-    def finish_auth(self, authenticated):
-        if not self.auth_realm:
-            return 1
-        self.authenticated = authenticated
-        return authenticated
-
-    def set_database(self, database):
-        self.database = database
-
-    def notifyConnected(self, conn):
-        self.connection = conn # For restart_other() below
-        self.client = self.ClientStorageStubClass(conn)
-        addr = conn.addr
-        if isinstance(addr, type("")):
-            label = addr
-        else:
-            host, port = addr
-            label = str(host) + ":" + str(port)
-        self.log_label = _label + "/" + label
-
-    def notifyDisconnected(self):
-        # When this storage closes, we must ensure that it aborts
-        # any pending transaction.
-        if self.transaction is not None:
-            self.log("disconnected during transaction %s" % self.transaction)
-            self._abort()
-        else:
-            self.log("disconnected")
-        if self.stats is not None:
-            self.stats.clients -= 1
-
-    def __repr__(self):
-        tid = self.transaction and repr(self.transaction.id)
-        if self.storage:
-            stid = (self.storage._transaction and
-                    repr(self.storage._transaction.id))
-        else:
-            stid = None
-        name = self.__class__.__name__
-        return "<%s %X trans=%s s_trans=%s>" % (name, id(self), tid, stid)
-
-    def log(self, msg, level=logging.INFO, exc_info=False):
-        log(msg, level=level, label=self.log_label, exc_info=exc_info)
-
-    def setup_delegation(self):
-        """Delegate several methods to the storage"""
-        self.versionEmpty = self.storage.versionEmpty
-        self.versions = self.storage.versions
-        self.getSerial = self.storage.getSerial
-        self.history = self.storage.history
-        self.load = self.storage.load
-        self.loadSerial = self.storage.loadSerial
-        self.modifiedInVersion = self.storage.modifiedInVersion
-        try:
-            fn = self.storage.getExtensionMethods
-        except AttributeError:
-            # We must be running with a ZODB which
-            # predates adding getExtensionMethods to
-            # BaseStorage. Eventually this try/except
-            # can be removed
-            pass
-        else:
-            d = fn()
-            self._extensions.update(d)
-            for name in d.keys():
-                assert not hasattr(self, name)
-                setattr(self, name, getattr(self.storage, name))
-        self.lastTransaction = self.storage.lastTransaction
-
-    def _check_tid(self, tid, exc=None):
-        if self.read_only:
-            raise ReadOnlyError()
-        if self.transaction is None:
-            caller = sys._getframe().f_back.f_code.co_name
-            self.log("no current transaction: %s()" % caller,
-                     level=logging.WARNING)
-            if exc is not None:
-                raise exc(None, tid)
-            else:
-                return 0
-        if self.transaction.id != tid:
-            caller = sys._getframe().f_back.f_code.co_name
-            self.log("%s(%s) invalid; current transaction = %s" %
-                     (caller, repr(tid), repr(self.transaction.id)),
-                     logging.WARNING)
-            if exc is not None:
-                raise exc(self.transaction.id, tid)
-            else:
-                return 0
-        return 1
-
-    def getAuthProtocol(self):
-        """Return string specifying name of authentication module to use.
-
-        The module name should be auth_%s where %s is auth_protocol."""
-        protocol = self.server.auth_protocol
-        if not protocol or protocol == 'none':
-            return None
-        return protocol
-
-    def register(self, storage_id, read_only):
-        """Select the storage that this client will use
-
-        This method must be the first one called by the client.
-        For authenticated storages this method will be called by the client
-        immediately after authentication is finished.
-        """
-        if self.auth_realm and not self.authenticated:
-            raise AuthError, "Client was never authenticated with server!"
-
-        if self.storage is not None:
-            self.log("duplicate register() call")
-            raise ValueError, "duplicate register() call"
-        storage = self.server.storages.get(storage_id)
-        if storage is None:
-            self.log("unknown storage_id: %s" % storage_id)
-            raise ValueError, "unknown storage: %s" % storage_id
-
-        if not read_only and (self.read_only or storage.isReadOnly()):
-            raise ReadOnlyError()
-
-        self.read_only = self.read_only or read_only
-        self.storage_id = storage_id
-        self.storage = storage
-        self.setup_delegation()
-        self.timeout, self.stats = self.server.register_connection(storage_id,
-                                                                   self)
-
-    def get_info(self):
-        return {'length': len(self.storage),
-                'size': self.storage.getSize(),
-                'name': self.storage.getName(),
-                'supportsUndo': self.storage.supportsUndo(),
-                'supportsVersions': self.storage.supportsVersions(),
-                'extensionMethods': self.getExtensionMethods(),
-                }
-
-    def get_size_info(self):
-        return {'length': len(self.storage),
-                'size': self.storage.getSize(),
-                }
-
-    def getExtensionMethods(self):
-        return self._extensions
-
-    def loadEx(self, oid, version):
-        self.stats.loads += 1
-        return self.storage.loadEx(oid, version)
-
-    def loadBefore(self, oid, tid):
-        self.stats.loads += 1
-        return self.storage.loadBefore(oid, tid)
-
-    def zeoLoad(self, oid):
-        self.stats.loads += 1
-        v = self.storage.modifiedInVersion(oid)
-        if v:
-            pv, sv = self.storage.load(oid, v)
-        else:
-            pv = sv = None
-        try:
-            p, s = self.storage.load(oid, '')
-        except KeyError:
-            if sv:
-                # Created in version, no non-version data
-                p = s = None
-            else:
-                raise
-        return p, s, v, pv, sv
-
-    def getInvalidations(self, tid):
-        invtid, invlist = self.server.get_invalidations(tid)
-        if invtid is None:
-            return None
-        self.log("Return %d invalidations up to tid %s"
-                 % (len(invlist), u64(invtid)))
-        return invtid, invlist
-
-    def verify(self, oid, version, tid):
-        try:
-            t = self.storage.getTid(oid)
-        except KeyError:
-            self.client.invalidateVerify((oid, ""))
-        else:
-            if tid != t:
-                # This will invalidate non-version data when the
-                # client only has invalid version data.  Since this is
-                # an uncommon case, we avoid the cost of checking
-                # whether the serial number matches the current
-                # non-version data.
-                self.client.invalidateVerify((oid, version))
-
-    def zeoVerify(self, oid, s, sv):
-        if not self.verifying:
-            self.verifying = 1
-            self.stats.verifying_clients += 1
-        try:
-            os = self.storage.getTid(oid)
-        except KeyError:
-            self.client.invalidateVerify((oid, ''))
-            # It's not clear what we should do now.  The KeyError
-            # could be caused by an object uncreation, in which case
-            # invalidation is right.  It could be an application bug
-            # that left a dangling reference, in which case it's bad.
-        else:
-            # If the client has version data, the logic is a bit more
-            # complicated.  If the current serial number matches the
-            # client serial number, then the non-version data must
-            # also be valid.  If the current serialno is for a
-            # version, then the non-version data can't change.
-
-            # If the version serialno isn't valid, then the
-            # non-version serialno may or may not be valid.  Rather
-            # than trying to figure it whether it is valid, we just
-            # invalidate it.  Sending an invalidation for the
-            # non-version data implies invalidating the version data
-            # too, since an update to non-version data can only occur
-            # after the version is aborted or committed.
-            if sv:
-                if sv != os:
-                    self.client.invalidateVerify((oid, ''))
-            else:
-                if s != os:
-                    self.client.invalidateVerify((oid, ''))
-
-    def endZeoVerify(self):
-        if self.verifying:
-            self.stats.verifying_clients -= 1
-        self.verifying = 0
-        self.client.endVerify()
-
-    def pack(self, time, wait=1):
-        # Yes, you can pack a read-only server or storage!
-        if wait:
-            return run_in_thread(self._pack_impl, time)
-        else:
-            # If the client isn't waiting for a reply, start a thread
-            # and forget about it.
-            t = threading.Thread(target=self._pack_impl, args=(time,))
-            t.start()
-            return None
-
-    def _pack_impl(self, time):
-        self.log("pack(time=%s) started..." % repr(time))
-        self.storage.pack(time, referencesf)
-        self.log("pack(time=%s) complete" % repr(time))
-        # Broadcast new size statistics
-        self.server.invalidate(0, self.storage_id, None,
-                               (), self.get_size_info())
-
-    def new_oids(self, n=100):
-        """Return a sequence of n new oids, where n defaults to 100"""
-        if self.read_only:
-            raise ReadOnlyError()
-        if n <= 0:
-            n = 1
-        return [self.storage.new_oid() for i in range(n)]
-
-    # undoLog and undoInfo are potentially slow methods
-
-    def undoInfo(self, first, last, spec):
-        return run_in_thread(self.storage.undoInfo, first, last, spec)
-
-    def undoLog(self, first, last):
-        return run_in_thread(self.storage.undoLog, first, last)
-
-    def tpc_begin(self, id, user, description, ext, tid=None, status=" "):
-        if self.read_only:
-            raise ReadOnlyError()
-        if self.transaction is not None:
-            if self.transaction.id == id:
-                self.log("duplicate tpc_begin(%s)" % repr(id))
-                return
-            else:
-                raise StorageTransactionError("Multiple simultaneous tpc_begin"
-                                              " requests from one client.")
-
-        self.transaction = t = transaction.Transaction()
-        t.id = id
-        t.user = user
-        t.description = description
-        t._extension = ext
-
-        self.serials = []
-        self.invalidated = []
-        self.txnlog = CommitLog()
-        self.tid = tid
-        self.status = status
-        self.store_failed = 0
-        self.stats.active_txns += 1
-
-    def tpc_finish(self, id):
-        if not self._check_tid(id):
-            return
-        assert self.locked
-        self.stats.active_txns -= 1
-        self.stats.commits += 1
-        self.storage.tpc_finish(self.transaction)
-        tid = self.storage.lastTransaction()
-        if self.invalidated:
-            self.server.invalidate(self, self.storage_id, tid,
-                                   self.invalidated, self.get_size_info())
-        self._clear_transaction()
-        # Return the tid, for cache invalidation optimization
-        return tid
-
-    def tpc_abort(self, id):
-        if not self._check_tid(id):
-            return
-        self.stats.active_txns -= 1
-        self.stats.aborts += 1
-        if self.locked:
-            self.storage.tpc_abort(self.transaction)
-        self._clear_transaction()
-
-    def _clear_transaction(self):
-        # Common code at end of tpc_finish() and tpc_abort()
-        self.transaction = None
-        self.txnlog.close()
-        if self.locked:
-            self.locked = 0
-            self.timeout.end(self)
-            self.stats.lock_time = None
-            self.log("Transaction released storage lock", BLATHER)
-            # _handle_waiting() can start another transaction (by
-            # restarting a waiting one) so must be done last
-            self._handle_waiting()
-
-    def _abort(self):
-        # called when a connection is closed unexpectedly
-        if not self.locked:
-            # Delete (d, zeo_storage) from the _waiting list, if found.
-            waiting = self.storage._waiting
-            for i in range(len(waiting)):
-                d, z = waiting[i]
-                if z is self:
-                    del waiting[i]
-                    self.log("Closed connection removed from waiting list."
-                             " Clients waiting: %d." % len(waiting))
-                    break
-
-        if self.transaction:
-            self.stats.active_txns -= 1
-            self.stats.aborts += 1
-            self.tpc_abort(self.transaction.id)
-
-    # The public methods of the ZEO client API do not do the real work.
-    # They defer work until after the storage lock has been acquired.
-    # Most of the real implementations are in methods beginning with
-    # an _.
-
-    def storea(self, oid, serial, data, version, id):
-        self._check_tid(id, exc=StorageTransactionError)
-        self.stats.stores += 1
-        self.txnlog.store(oid, serial, data, version)
-
-    # The following four methods return values, so they must acquire
-    # the storage lock and begin the transaction before returning.
-
-    def vote(self, id):
-        self._check_tid(id, exc=StorageTransactionError)
-        if self.locked:
-            return self._vote()
-        else:
-            return self._wait(lambda: self._vote())
-
-    def abortVersion(self, src, id):
-        self._check_tid(id, exc=StorageTransactionError)
-        if self.locked:
-            return self._abortVersion(src)
-        else:
-            return self._wait(lambda: self._abortVersion(src))
-
-    def commitVersion(self, src, dest, id):
-        self._check_tid(id, exc=StorageTransactionError)
-        if self.locked:
-            return self._commitVersion(src, dest)
-        else:
-            return self._wait(lambda: self._commitVersion(src, dest))
-
-    def undo(self, trans_id, id):
-        self._check_tid(id, exc=StorageTransactionError)
-        if self.locked:
-            return self._undo(trans_id)
-        else:
-            return self._wait(lambda: self._undo(trans_id))
-
-    def _tpc_begin(self, txn, tid, status):
-        self.locked = 1
-        self.timeout.begin(self)
-        self.stats.lock_time = time.time()
-        self.storage.tpc_begin(txn, tid, status)
-
-    def _store(self, oid, serial, data, version):
-        err = None
-        try:
-            newserial = self.storage.store(oid, serial, data, version,
-                                           self.transaction)
-        except (SystemExit, KeyboardInterrupt):
-            raise
-        except Exception, err:
-            self.store_failed = 1
-            if isinstance(err, ConflictError):
-                self.stats.conflicts += 1
-                self.log("conflict error oid=%s msg=%s" %
-                         (oid_repr(oid), str(err)), BLATHER)
-            if not isinstance(err, TransactionError):
-                # Unexpected errors are logged and passed to the client
-                self.log("store error: %s, %s" % sys.exc_info()[:2],
-                         logging.ERROR, exc_info=True)
-            # Try to pickle the exception.  If it can't be pickled,
-            # the RPC response would fail, so use something else.
-            pickler = cPickle.Pickler()
-            pickler.fast = 1
-            try:
-                pickler.dump(err, 1)
-            except:
-                msg = "Couldn't pickle storage exception: %s" % repr(err)
-                self.log(msg, logging.ERROR)
-                err = StorageServerError(msg)
-            # The exception is reported back as newserial for this oid
-            newserial = err
-        else:
-            if serial != "\0\0\0\0\0\0\0\0":
-                self.invalidated.append((oid, version))
-        if newserial == ResolvedSerial:
-            self.stats.conflicts_resolved += 1
-            self.log("conflict resolved oid=%s" % oid_repr(oid), BLATHER)
-        self.serials.append((oid, newserial))
-        return err is None
-
-    def _vote(self):
-        self.client.serialnos(self.serials)
-        # If a store call failed, then return to the client immediately.
-        # The serialnos() call will deliver an exception that will be
-        # handled by the client in its tpc_vote() method.
-        if self.store_failed:
-            return
-        return self.storage.tpc_vote(self.transaction)
-
-    def _abortVersion(self, src):
-        tid, oids = self.storage.abortVersion(src, self.transaction)
-        inv = [(oid, src) for oid in oids]
-        self.invalidated.extend(inv)
-        return tid, oids
-
-    def _commitVersion(self, src, dest):
-        tid, oids = self.storage.commitVersion(src, dest, self.transaction)
-        inv = [(oid, dest) for oid in oids]
-        self.invalidated.extend(inv)
-        if dest:
-            inv = [(oid, src) for oid in oids]
-            self.invalidated.extend(inv)
-        return tid, oids
-
-    def _undo(self, trans_id):
-        tid, oids = self.storage.undo(trans_id, self.transaction)
-        inv = [(oid, None) for oid in oids]
-        self.invalidated.extend(inv)
-        return tid, oids
-
-    # When a delayed transaction is restarted, the dance is
-    # complicated.  The restart occurs when one ZEOStorage instance
-    # finishes as a transaction and finds another instance is in the
-    # _waiting list.
-
-    # It might be better to have a mechanism to explicitly send
-    # the finishing transaction's reply before restarting the waiting
-    # transaction.  If the restart takes a long time, the previous
-    # client will be blocked until it finishes.
-
-    def _wait(self, thunk):
-        # Wait for the storage lock to be acquired.
-        self._thunk = thunk
-        if self.storage._transaction:
-            d = Delay()
-            self.storage._waiting.append((d, self))
-            self.log("Transaction blocked waiting for storage. "
-                     "Clients waiting: %d." % len(self.storage._waiting))
-            return d
-        else:
-            self.log("Transaction acquired storage lock.", BLATHER)
-            return self._restart()
-
-    def _restart(self, delay=None):
-        # Restart when the storage lock is available.
-        if self.txnlog.stores == 1:
-            template = "Preparing to commit transaction: %d object, %d bytes"
-        else:
-            template = "Preparing to commit transaction: %d objects, %d bytes"
-        self.log(template % (self.txnlog.stores, self.txnlog.size()),
-                 level=BLATHER)
-        self._tpc_begin(self.transaction, self.tid, self.status)
-        loads, loader = self.txnlog.get_loader()
-        for i in range(loads):
-            # load oid, serial, data, version
-            if not self._store(*loader.load()):
-                break
-        resp = self._thunk()
-        if delay is not None:
-            delay.reply(resp)
-        else:
-            return resp
-
-    def _handle_waiting(self):
-        # Restart any client waiting for the storage lock.
-        while self.storage._waiting:
-            delay, zeo_storage = self.storage._waiting.pop(0)
-            if self._restart_other(zeo_storage, delay):
-                if self.storage._waiting:
-                    n = len(self.storage._waiting)
-                    self.log("Blocked transaction restarted.  "
-                             "Clients waiting: %d" % n)
-                else:
-                    self.log("Blocked transaction restarted.")
-                return
-
-    def _restart_other(self, zeo_storage, delay):
-        # Return True if the server restarted.
-        # call the restart() method on the appropriate server.
-        try:
-            zeo_storage._restart(delay)
-        except:
-            self.log("Unexpected error handling waiting transaction",
-                     level=logging.WARNING, exc_info=True)
-            zeo_storage.connection.close()
-            return 0
-        else:
-            return 1
-
-class StorageServer:
-
-    """The server side implementation of ZEO.
-
-    The StorageServer is the 'manager' for incoming connections.  Each
-    connection is associated with its own ZEOStorage instance (defined
-    below).  The StorageServer may handle multiple storages; each
-    ZEOStorage instance only handles a single storage.
-    """
-
-    # Classes we instantiate.  A subclass might override.
-
-    DispatcherClass = Dispatcher
-    ZEOStorageClass = ZEOStorage
-    ManagedServerConnectionClass = ManagedServerConnection
-
-    def __init__(self, addr, storages, read_only=0,
-                 invalidation_queue_size=100,
-                 transaction_timeout=None,
-                 monitor_address=None,
-                 auth_protocol=None,
-                 auth_database=None,
-                 auth_realm=None):
-        """StorageServer constructor.
-
-        This is typically invoked from the start.py script.
-
-        Arguments (the first two are required and positional):
-
-        addr -- the address at which the server should listen.  This
-            can be a tuple (host, port) to signify a TCP/IP connection
-            or a pathname string to signify a Unix domain socket
-            connection.  A hostname may be a DNS name or a dotted IP
-            address.
-
-        storages -- a dictionary giving the storage(s) to handle.  The
-            keys are the storage names, the values are the storage
-            instances, typically FileStorage or Berkeley storage
-            instances.  By convention, storage names are typically
-            strings representing small integers starting at '1'.
-
-        read_only -- an optional flag saying whether the server should
-            operate in read-only mode.  Defaults to false.  Note that
-            even if the server is operating in writable mode,
-            individual storages may still be read-only.  But if the
-            server is in read-only mode, no write operations are
-            allowed, even if the storages are writable.  Note that
-            pack() is considered a read-only operation.
-
-        invalidation_queue_size -- The storage server keeps a queue
-            of the objects modified by the last N transactions, where
-            N == invalidation_queue_size.  This queue is used to
-            speed client cache verification when a client disconnects
-            for a short period of time.
-
-        transaction_timeout -- The maximum amount of time to wait for
-            a transaction to commit after acquiring the storage lock.
-            If the transaction takes too long, the client connection
-            will be closed and the transaction aborted.
-
-        monitor_address -- The address at which the monitor server
-            should listen.  If specified, a monitor server is started.
-            The monitor server provides server statistics in a simple
-            text format.
-
-        auth_protocol -- The name of the authentication protocol to use.
-            Examples are "digest" and "srp".
-
-        auth_database -- The name of the password database filename.
-            It should be in a format compatible with the authentication
-            protocol used; for instance, "sha" and "srp" require different
-            formats.
-
-            Note that to implement an authentication protocol, a server
-            and client authentication mechanism must be implemented in a
-            auth_* module, which should be stored inside the "auth"
-            subdirectory. This module may also define a DatabaseClass
-            variable that should indicate what database should be used
-            by the authenticator.
-        """
-
-        self.addr = addr
-        self.storages = storages
-        set_label()
-        msg = ", ".join(
-            ["%s:%s:%s" % (name, storage.isReadOnly() and "RO" or "RW",
-                           storage.getName())
-             for name, storage in storages.items()])
-        log("%s created %s with storages: %s" %
-            (self.__class__.__name__, read_only and "RO" or "RW", msg))
-        for s in storages.values():
-            s._waiting = []
-        self.read_only = read_only
-        self.auth_protocol = auth_protocol
-        self.auth_database = auth_database
-        self.auth_realm = auth_realm
-        self.database = None
-        if auth_protocol:
-            self._setup_auth(auth_protocol)
-        # A list of at most invalidation_queue_size invalidations.
-        # The list is kept in sorted order with the most recent
-        # invalidation at the front.  The list never has more than
-        # self.invq_bound elements.
-        self.invq = []
-        self.invq_bound = invalidation_queue_size
-        self.connections = {}
-        self.dispatcher = self.DispatcherClass(addr,
-                                               factory=self.new_connection)
-        self.stats = {}
-        self.timeouts = {}
-        for name in self.storages.keys():
-            self.stats[name] = StorageStats()
-            if transaction_timeout is None:
-                # An object with no-op methods
-                timeout = StubTimeoutThread()
-            else:
-                timeout = TimeoutThread(transaction_timeout)
-                timeout.start()
-            self.timeouts[name] = timeout
-        if monitor_address:
-            self.monitor = StatsServer(monitor_address, self.stats)
-        else:
-            self.monitor = None
-
-    def _setup_auth(self, protocol):
-        # Can't be done in global scope, because of cyclic references
-        from ZEO.auth import get_module
-
-        name = self.__class__.__name__
-
-        module = get_module(protocol)
-        if not module:
-            log("%s: no such an auth protocol: %s" % (name, protocol))
-            return
-
-        storage_class, client, db_class = module
-
-        if not storage_class or not issubclass(storage_class, ZEOStorage):
-            log(("%s: %s isn't a valid protocol, must have a StorageClass" %
-                 (name, protocol)))
-            self.auth_protocol = None
-            return
-        self.ZEOStorageClass = storage_class
-
-        log("%s: using auth protocol: %s" % (name, protocol))
-
-        # We create a Database instance here for use with the authenticator
-        # modules. Having one instance allows it to be shared between multiple
-        # storages, avoiding the need to bloat each with a new authenticator
-        # Database that would contain the same info, and also avoiding any
-        # possibly synchronization issues between them.
-        self.database = db_class(self.auth_database)
-        if self.database.realm != self.auth_realm:
-            raise ValueError("password database realm %r "
-                             "does not match storage realm %r"
-                             % (self.database.realm, self.auth_realm))
-
-
-    def new_connection(self, sock, addr):
-        """Internal: factory to create a new connection.
-
-        This is called by the Dispatcher class in ZEO.zrpc.server
-        whenever accept() returns a socket for a new incoming
-        connection.
-        """
-        if self.auth_protocol and self.database:
-            zstorage = self.ZEOStorageClass(self, self.read_only,
-                                            auth_realm=self.auth_realm)
-            zstorage.set_database(self.database)
-        else:
-            zstorage = self.ZEOStorageClass(self, self.read_only)
-
-        c = self.ManagedServerConnectionClass(sock, addr, zstorage, self)
-        log("new connection %s: %s" % (addr, repr(c)))
-        return c
-
-    def register_connection(self, storage_id, conn):
-        """Internal: register a connection with a particular storage.
-
-        This is called by ZEOStorage.register().
-
-        The dictionary self.connections maps each storage name to a
-        list of current connections for that storage; this information
-        is needed to handle invalidation.  This function updates this
-        dictionary.
-
-        Returns the timeout and stats objects for the appropriate storage.
-        """
-        l = self.connections.get(storage_id)
-        if l is None:
-            l = self.connections[storage_id] = []
-        l.append(conn)
-        stats = self.stats[storage_id]
-        stats.clients += 1
-        return self.timeouts[storage_id], stats
-
-    def invalidate(self, conn, storage_id, tid, invalidated=(), info=None):
-        """Internal: broadcast info and invalidations to clients.
-
-        This is called from several ZEOStorage methods.
-
-        This can do three different things:
-
-        - If the invalidated argument is non-empty, it broadcasts
-          invalidateTransaction() messages to all clients of the given
-          storage except the current client (the conn argument).
-
-        - If the invalidated argument is empty and the info argument
-          is a non-empty dictionary, it broadcasts info() messages to
-          all clients of the given storage, including the current
-          client.
-
-        - If both the invalidated argument and the info argument are
-          non-empty, it broadcasts invalidateTransaction() messages to all
-          clients except the current, and sends an info() message to
-          the current client.
-
-        """
-        if invalidated:
-            if len(self.invq) >= self.invq_bound:
-                self.invq.pop()
-            self.invq.insert(0, (tid, invalidated))
-        for p in self.connections.get(storage_id, ()):
-            if invalidated and p is not conn:
-                p.client.invalidateTransaction(tid, invalidated)
-            elif info is not None:
-                p.client.info(info)
-
-    def get_invalidations(self, tid):
-        """Return a tid and list of all objects invalidation since tid.
-
-        The tid is the most recent transaction id seen by the client.
-
-        Returns None if it is unable to provide a complete list
-        of invalidations for tid.  In this case, client should
-        do full cache verification.
-        """
-
-        if not self.invq:
-            log("invq empty")
-            return None, []
-
-        earliest_tid = self.invq[-1][0]
-        if earliest_tid > tid:
-            log("tid to old for invq %s < %s" % (u64(tid), u64(earliest_tid)))
-            return None, []
-
-        oids = {}
-        for _tid, L in self.invq:
-            if _tid <= tid:
-                break
-            for key in L:
-                oids[key] = 1
-        latest_tid = self.invq[0][0]
-        return latest_tid, oids.keys()
-
-    def close_server(self):
-        """Close the dispatcher so that there are no new connections.
-
-        This is only called from the test suite, AFAICT.
-        """
-        self.dispatcher.close()
-        if self.monitor is not None:
-            self.monitor.close()
-        for storage in self.storages.values():
-            storage.close()
-        # Force the asyncore mainloop to exit by hackery, i.e. close
-        # every socket in the map.  loop() will return when the map is
-        # empty.
-        for s in asyncore.socket_map.values():
-            try:
-                s.close()
-            except:
-                pass
-
-    def close_conn(self, conn):
-        """Internal: remove the given connection from self.connections.
-
-        This is the inverse of register_connection().
-        """
-        for cl in self.connections.values():
-            if conn.obj in cl:
-                cl.remove(conn.obj)
-
-class StubTimeoutThread:
-
-    def begin(self, client):
-        pass
-
-    def end(self, client):
-        pass
-
-class TimeoutThread(threading.Thread):
-    """Monitors transaction progress and generates timeouts."""
-
-    # There is one TimeoutThread per storage, because there's one
-    # transaction lock per storage.
-
-    def __init__(self, timeout):
-        threading.Thread.__init__(self)
-        self.setDaemon(1)
-        self._timeout = timeout
-        self._client = None
-        self._deadline = None
-        self._cond = threading.Condition() # Protects _client and _deadline
-        self._trigger = trigger()
-
-    def begin(self, client):
-        # Called from the restart code the "main" thread, whenever the
-        # storage lock is being acquired.  (Serialized by asyncore.)
-        self._cond.acquire()
-        try:
-            assert self._client is None
-            self._client = client
-            self._deadline = time.time() + self._timeout
-            self._cond.notify()
-        finally:
-            self._cond.release()
-
-    def end(self, client):
-        # Called from the "main" thread whenever the storage lock is
-        # being released.  (Serialized by asyncore.)
-        self._cond.acquire()
-        try:
-            assert self._client is not None
-            assert self._client is client
-            self._client = None
-            self._deadline = None
-        finally:
-            self._cond.release()
-
-    def run(self):
-        # Code running in the thread.
-        while 1:
-            self._cond.acquire()
-            try:
-                while self._deadline is None:
-                    self._cond.wait()
-                howlong = self._deadline - time.time()
-                if howlong <= 0:
-                    # Prevent reporting timeout more than once
-                    self._deadline = None
-                client = self._client # For the howlong <= 0 branch below
-            finally:
-                self._cond.release()
-            if howlong <= 0:
-                client.log("Transaction timeout after %s seconds" %
-                           self._timeout)
-                self._trigger.pull_trigger(lambda: client.connection.close())
-            else:
-                time.sleep(howlong)
-
-def run_in_thread(method, *args):
-    t = SlowMethodThread(method, args)
-    t.start()
-    return t.delay
-
-class SlowMethodThread(threading.Thread):
-    """Thread to run potentially slow storage methods.
-
-    Clients can use the delay attribute to access the MTDelay object
-    used to send a zrpc response at the right time.
-    """
-
-    # Some storage methods can take a long time to complete.  If we
-    # run these methods via a standard asyncore read handler, they
-    # will block all other server activity until they complete.  To
-    # avoid blocking, we spawn a separate thread, return an MTDelay()
-    # object, and have the thread reply() when it finishes.
-
-    def __init__(self, method, args):
-        threading.Thread.__init__(self)
-        self._method = method
-        self._args = args
-        self.delay = MTDelay()
-
-    def run(self):
-        try:
-            result = self._method(*self._args)
-        except (SystemExit, KeyboardInterrupt):
-            raise
-        except Exception:
-            self.delay.error(sys.exc_info())
-        else:
-            self.delay.reply(result)
diff --git a/branches/bug1734/src/ZEO/TransactionBuffer.py b/branches/bug1734/src/ZEO/TransactionBuffer.py
deleted file mode 100644
index 8031b7d9..00000000
--- a/branches/bug1734/src/ZEO/TransactionBuffer.py
+++ /dev/null
@@ -1,148 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A TransactionBuffer store transaction updates until commit or abort.
-
-A transaction may generate enough data that it is not practical to
-always hold pending updates in memory.  Instead, a TransactionBuffer
-is used to store the data until a commit or abort.
-"""
-
-# A faster implementation might store trans data in memory until it
-# reaches a certain size.
-
-import cPickle
-import tempfile
-from threading import Lock
-
-class TransactionBuffer:
-
-    # Valid call sequences:
-    #
-    #     ((store | invalidate)* begin_iterate next* clear)* close
-    #
-    # get_size can be called any time
-
-    # The TransactionBuffer is used by client storage to hold update
-    # data until the tpc_finish().  It is normally used by a single
-    # thread, because only one thread can be in the two-phase commit
-    # at one time.
-
-    # It is possible, however, for one thread to close the storage
-    # while another thread is in the two-phase commit.  We must use
-    # a lock to guard against this race, because unpredictable things
-    # can happen in Python if one thread closes a file that another
-    # thread is reading.  In a debug build, an assert() can fail.
-
-    # Caution:  If an operation is performed on a closed TransactionBuffer,
-    # it has no effect and does not raise an exception.  The only time
-    # this should occur is when a ClientStorage is closed in one
-    # thread while another thread is in its tpc_finish().  It's not
-    # clear what should happen in this case.  If the tpc_finish()
-    # completes without error, the Connection using it could have
-    # inconsistent data.  This should have minimal effect, though,
-    # because the Connection is connected to a closed storage.
-
-    def __init__(self):
-        self.file = tempfile.TemporaryFile(suffix=".tbuf")
-        self.lock = Lock()
-        self.closed = 0
-        self.count = 0
-        self.size = 0
-        # It's safe to use a fast pickler because the only objects
-        # stored are builtin types -- strings or None.
-        self.pickler = cPickle.Pickler(self.file, 1)
-        self.pickler.fast = 1
-
-    def close(self):
-        self.lock.acquire()
-        try:
-            self.closed = 1
-            try:
-                self.file.close()
-            except OSError:
-                pass
-        finally:
-            self.lock.release()
-
-    def store(self, oid, version, data):
-        self.lock.acquire()
-        try:
-            self._store(oid, version, data)
-        finally:
-            self.lock.release()
-
-    def _store(self, oid, version, data):
-        """Store oid, version, data for later retrieval"""
-        if self.closed:
-            return
-        self.pickler.dump((oid, version, data))
-        self.count += 1
-        # Estimate per-record cache size
-        self.size = self.size + len(data) + 31
-        if version:
-            # Assume version data has same size as non-version data
-            self.size = self.size + len(version) + len(data) + 12
-
-    def invalidate(self, oid, version):
-        self.lock.acquire()
-        try:
-            if self.closed:
-                return
-            self.pickler.dump((oid, version, None))
-            self.count += 1
-        finally:
-            self.lock.release()
-
-    def clear(self):
-        """Mark the buffer as empty"""
-        self.lock.acquire()
-        try:
-            if self.closed:
-                return
-            self.file.seek(0)
-            self.count = 0
-            self.size = 0
-        finally:
-            self.lock.release()
-
-    def __iter__(self):
-        self.lock.acquire()
-        try:
-            if self.closed:
-                return
-            self.file.flush()
-            self.file.seek(0)
-            return TBIterator(self.file, self.count)
-        finally:
-            self.lock.release()
-
-class TBIterator(object):
-
-    def __init__(self, f, count):
-        self.file = f
-        self.count = count
-        self.unpickler = cPickle.Unpickler(f)
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        """Return next tuple of data or None if EOF"""
-        if self.count == 0:
-            self.file.seek(0)
-            self.size = 0
-            raise StopIteration
-        oid_ver_data = self.unpickler.load()
-        self.count -= 1
-        return oid_ver_data
diff --git a/branches/bug1734/src/ZEO/__init__.py b/branches/bug1734/src/ZEO/__init__.py
deleted file mode 100644
index be1cd686..00000000
--- a/branches/bug1734/src/ZEO/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""ZEO -- Zope Enterprise Objects.
-
-See the file README.txt in this directory for an overview.
-
-ZEO is now part of ZODB; ZODB's home on the web is
-
-    http://www.zope.org/Wikis/ZODB
-
-"""
-
-# The next line must use double quotes, so release.py recognizes it.
-version = "2.4a0"
diff --git a/branches/bug1734/src/ZEO/auth/__init__.py b/branches/bug1734/src/ZEO/auth/__init__.py
deleted file mode 100644
index 39dff689..00000000
--- a/branches/bug1734/src/ZEO/auth/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-_auth_modules = {}
-
-def get_module(name):
-    if name == 'sha':
-        from auth_sha import StorageClass, SHAClient, Database
-        return StorageClass, SHAClient, Database
-    elif name == 'digest':
-        from auth_digest import StorageClass, DigestClient, DigestDatabase
-        return StorageClass, DigestClient, DigestDatabase
-    else:
-        return _auth_modules.get(name)
-
-def register_module(name, storage_class, client, db):
-    if _auth_modules.has_key(name):
-        raise TypeError, "%s is already registred" % name
-    _auth_modules[name] = storage_class, client, db
diff --git a/branches/bug1734/src/ZEO/auth/auth_digest.py b/branches/bug1734/src/ZEO/auth/auth_digest.py
deleted file mode 100644
index 6474e8b5..00000000
--- a/branches/bug1734/src/ZEO/auth/auth_digest.py
+++ /dev/null
@@ -1,143 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Digest authentication for ZEO
-
-This authentication mechanism follows the design of HTTP digest
-authentication (RFC 2069).  It is a simple challenge-response protocol
-that does not send passwords in the clear, but does not offer strong
-security.  The RFC discusses many of the limitations of this kind of
-protocol.
-
-Guard the password database as if it contained plaintext passwords.
-It stores the hash of a username and password.  This does not expose
-the plaintext password, but it is sensitive nonetheless.  An attacker
-with the hash can impersonate the real user.  This is a limitation of
-the simple digest scheme.
-
-HTTP is a stateless protocol, and ZEO is a stateful protocol.  The
-security requirements are quite different as a result.  The HTTP
-protocol uses a nonce as a challenge.  The ZEO protocol requires a
-separate session key that is used for message authentication.  We
-generate a second nonce for this purpose; the hash of nonce and
-user/realm/password is used as the session key.
-
-TODO: I'm not sure if this is a sound approach; SRP would be preferred.
-"""
-
-import os
-import random
-import sha
-import struct
-import time
-
-from ZEO.auth.base import Database, Client
-from ZEO.StorageServer import ZEOStorage
-from ZEO.Exceptions import AuthError
-
-def get_random_bytes(n=8):
-    if os.path.exists("/dev/urandom"):
-        f = open("/dev/urandom")
-        s = f.read(n)
-        f.close()
-    else:
-        L = [chr(random.randint(0, 255)) for i in range(n)]
-        s = "".join(L)
-    return s
-
-def hexdigest(s):
-    return sha.new(s).hexdigest()
-
-class DigestDatabase(Database):
-    def __init__(self, filename, realm=None):
-        Database.__init__(self, filename, realm)
-
-        # Initialize a key used to build the nonce for a challenge.
-        # We need one key for the lifetime of the server, so it
-        # is convenient to store in on the database.
-        self.noncekey = get_random_bytes(8)
-
-    def _store_password(self, username, password):
-        dig = hexdigest("%s:%s:%s" % (username, self.realm, password))
-        self._users[username] = dig
-
-def session_key(h_up, nonce):
-    # The hash itself is a bit too short to be a session key.
-    # HMAC wants a 64-byte key.  We don't want to use h_up
-    # directly because it would never change over time.  Instead
-    # use the hash plus part of h_up.
-    return sha.new("%s:%s" % (h_up, nonce)).digest() + h_up[:44]
-
-class StorageClass(ZEOStorage):
-    def set_database(self, database):
-        assert isinstance(database, DigestDatabase)
-        self.database = database
-        self.noncekey = database.noncekey
-
-    def _get_time(self):
-        # Return a string representing the current time.
-        t = int(time.time())
-        return struct.pack("i", t)
-
-    def _get_nonce(self):
-        # RFC 2069 recommends a nonce of the form
-        # H(client-IP ":" time-stamp ":" private-key)
-        dig = sha.sha()
-        dig.update(str(self.connection.addr))
-        dig.update(self._get_time())
-        dig.update(self.noncekey)
-        return dig.hexdigest()
-
-    def auth_get_challenge(self):
-        """Return realm, challenge, and nonce."""
-        self._challenge = self._get_nonce()
-        self._key_nonce = self._get_nonce()
-        return self.auth_realm, self._challenge, self._key_nonce
-
-    def auth_response(self, resp):
-        # verify client response
-        user, challenge, response = resp
-
-        # Since zrpc is a stateful protocol, we just store the nonce
-        # we sent to the client.  It will need to generate a new
-        # nonce for a new connection anyway.
-        if self._challenge != challenge:
-            raise ValueError, "invalid challenge"
-
-        # lookup user in database
-        h_up = self.database.get_password(user)
-
-        # regeneration resp from user, password, and nonce
-        check = hexdigest("%s:%s" % (h_up, challenge))
-        if check == response:
-            self.connection.setSessionKey(session_key(h_up, self._key_nonce))
-        return self.finish_auth(check == response)
-
-    extensions = [auth_get_challenge, auth_response]
-
-class DigestClient(Client):
-    extensions = ["auth_get_challenge", "auth_response"]
-
-    def start(self, username, realm, password):
-        _realm, challenge, nonce = self.stub.auth_get_challenge()
-        if _realm != realm:
-            raise AuthError("expected realm %r, got realm %r"
-                            % (_realm, realm))
-        h_up = hexdigest("%s:%s:%s" % (username, realm, password))
-
-        resp_dig = hexdigest("%s:%s" % (h_up, challenge))
-        result = self.stub.auth_response((username, challenge, resp_dig))
-        if result:
-            return session_key(h_up, nonce)
-        else:
-            return None
diff --git a/branches/bug1734/src/ZEO/auth/base.py b/branches/bug1734/src/ZEO/auth/base.py
deleted file mode 100644
index 4cdf8030..00000000
--- a/branches/bug1734/src/ZEO/auth/base.py
+++ /dev/null
@@ -1,131 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Base classes for defining an authentication protocol.
-
-Database -- abstract base class for password database
-Client -- abstract base class for authentication client
-"""
-
-import os
-import sha
-
-class Client:
-    # Subclass should override to list the names of methods that
-    # will be called on the server.
-    extensions = []
-
-    def __init__(self, stub):
-        self.stub = stub
-        for m in self.extensions:
-            setattr(self.stub, m, self.stub.extensionMethod(m))
-
-def sort(L):
-    """Sort a list in-place and return it."""
-    L.sort()
-    return L
-
-class Database:
-    """Abstracts a password database.
-
-    This class is used both in the authentication process (via
-    get_password()) and by client scripts that manage the password
-    database file.
-
-    The password file is a simple, colon-separated text file mapping
-    usernames to password hashes. The hashes are SHA hex digests
-    produced from the password string.
-    """
-    realm = None
-    def __init__(self, filename, realm=None):
-        """Creates a new Database
-
-        filename: a string containing the full pathname of
-            the password database file. Must be readable by the user
-            running ZEO. Must be writeable by any client script that
-            accesses the database.
-
-        realm: the realm name (a string)
-        """
-        self._users = {}
-        self.filename = filename
-        self.load()
-        if realm:
-            if self.realm and self.realm != realm:
-                raise ValueError, ("Specified realm %r differs from database "
-                                   "realm %r" % (realm or '', self.realm))
-            else:
-                self.realm = realm
-
-    def save(self, fd=None):
-        filename = self.filename
-
-        if not fd:
-            fd = open(filename, 'w')
-        if self.realm:
-            print >> fd, "realm", self.realm
-
-        for username in sort(self._users.keys()):
-            print >> fd, "%s: %s" % (username, self._users[username])
-
-    def load(self):
-        filename = self.filename
-        if not filename:
-            return
-
-        if not os.path.exists(filename):
-            return
-
-        fd = open(filename)
-        L = fd.readlines()
-
-        if not L:
-            return
-
-        if L[0].startswith("realm "):
-            line = L.pop(0).strip()
-            self.realm = line[len("realm "):]
-
-        for line in L:
-            username, hash = line.strip().split(":", 1)
-            self._users[username] = hash.strip()
-
-    def _store_password(self, username, password):
-        self._users[username] = self.hash(password)
-
-    def get_password(self, username):
-        """Returns password hash for specified username.
-
-        Callers must check for LookupError, which is raised in
-        the case of a non-existent user specified."""
-        if not self._users.has_key(username):
-            raise LookupError, "No such user: %s" % username
-        return self._users[username]
-
-    def hash(self, s):
-        return sha.new(s).hexdigest()
-
-    def add_user(self, username, password):
-        if self._users.has_key(username):
-            raise LookupError, "User %s already exists" % username
-        self._store_password(username, password)
-
-    def del_user(self, username):
-        if not self._users.has_key(username):
-            raise LookupError, "No such user: %s" % username
-        del self._users[username]
-
-    def change_password(self, username, password):
-        if not self._users.has_key(username):
-            raise LookupError, "No such user: %s" % username
-        self._store_password(username, password)
diff --git a/branches/bug1734/src/ZEO/auth/hmac.py b/branches/bug1734/src/ZEO/auth/hmac.py
deleted file mode 100644
index db9b404c..00000000
--- a/branches/bug1734/src/ZEO/auth/hmac.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""HMAC (Keyed-Hashing for Message Authentication) Python module.
-
-Implements the HMAC algorithm as described by RFC 2104.
-"""
-
-def _strxor(s1, s2):
-    """Utility method. XOR the two strings s1 and s2 (must have same length).
-    """
-    return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
-
-# The size of the digests returned by HMAC depends on the underlying
-# hashing module used.
-digest_size = None
-
-class HMAC:
-    """RFC2104 HMAC class.
-
-    This supports the API for Cryptographic Hash Functions (PEP 247).
-    """
-
-    def __init__(self, key, msg = None, digestmod = None):
-        """Create a new HMAC object.
-
-        key:       key for the keyed hash object.
-        msg:       Initial input for the hash, if provided.
-        digestmod: A module supporting PEP 247. Defaults to the md5 module.
-        """
-        if digestmod is None:
-            import md5
-            digestmod = md5
-
-        self.digestmod = digestmod
-        self.outer = digestmod.new()
-        self.inner = digestmod.new()
-        self.digest_size = digestmod.digest_size
-
-        blocksize = 64
-        ipad = "\x36" * blocksize
-        opad = "\x5C" * blocksize
-
-        if len(key) > blocksize:
-            key = digestmod.new(key).digest()
-
-        key = key + chr(0) * (blocksize - len(key))
-        self.outer.update(_strxor(key, opad))
-        self.inner.update(_strxor(key, ipad))
-        if msg is not None:
-            self.update(msg)
-
-##    def clear(self):
-##        raise NotImplementedError, "clear() method not available in HMAC."
-
-    def update(self, msg):
-        """Update this hashing object with the string msg.
-        """
-        self.inner.update(msg)
-
-    def copy(self):
-        """Return a separate copy of this hashing object.
-
-        An update to this copy won't affect the original object.
-        """
-        other = HMAC("")
-        other.digestmod = self.digestmod
-        other.inner = self.inner.copy()
-        other.outer = self.outer.copy()
-        return other
-
-    def digest(self):
-        """Return the hash value of this hashing object.
-
-        This returns a string containing 8-bit data.  The object is
-        not altered in any way by this function; you can continue
-        updating the object after calling this function.
-        """
-        h = self.outer.copy()
-        h.update(self.inner.digest())
-        return h.digest()
-
-    def hexdigest(self):
-        """Like digest(), but returns a string of hexadecimal digits instead.
-        """
-        return "".join([hex(ord(x))[2:].zfill(2)
-                        for x in tuple(self.digest())])
-
-def new(key, msg = None, digestmod = None):
-    """Create a new hashing object and return it.
-
-    key: The starting key for the hash.
-    msg: if available, will immediately be hashed into the object's starting
-    state.
-
-    You can now feed arbitrary strings into the object using its update()
-    method, and can ask for the hash value at any time by calling its digest()
-    method.
-    """
-    return HMAC(key, msg, digestmod)
diff --git a/branches/bug1734/src/ZEO/cache.py b/branches/bug1734/src/ZEO/cache.py
deleted file mode 100644
index 7170de83..00000000
--- a/branches/bug1734/src/ZEO/cache.py
+++ /dev/null
@@ -1,1071 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Disk-based client cache for ZEO.
-
-ClientCache exposes an API used by the ZEO client storage.  FileCache stores
-objects on disk using a 2-tuple of oid and tid as key.
-
-ClientCaches API is similar to a storage API with methods like load(),
-store(), and invalidate().  It manages in-memory data structures that allow
-it to map this richer API onto the simple key-based API of the lower-level
-FileCache.
-"""
-
-import bisect
-import logging
-import os
-import struct
-import tempfile
-import time
-
-from ZODB.utils import z64, u64
-
-logger = logging.getLogger("zeo.cache")
-
-##
-# A disk-based cache for ZEO clients.
-# <p>
-# This class provides an interface to a persistent, disk-based cache
-# used by ZEO clients to store copies of database records from the
-# server.
-# <p>
-# The details of the constructor as unspecified at this point.
-# <p>
-# Each entry in the cache is valid for a particular range of transaction
-# ids.  The lower bound is the transaction that wrote the data.  The
-# upper bound is the next transaction that wrote a revision of the
-# object.  If the data is current, the upper bound is stored as None;
-# the data is considered current until an invalidate() call is made.
-# <p>
-# It is an error to call store() twice with the same object without an
-# intervening invalidate() to set the upper bound on the first cache
-# entry.  <em>Perhaps it will be necessary to have a call the removes
-# something from the cache outright, without keeping a non-current
-# entry.</em>
-# <h3>Cache verification</h3>
-# <p>
-# When the client is connected to the server, it receives
-# invalidations every time an object is modified.  When the client is
-# disconnected then reconnects, it must perform cache verification to make
-# sure its cached data is synchronized with the storage's current state.
-# <p>
-# quick verification
-# full verification
-# <p>
-
-class ClientCache:
-    """A simple in-memory cache."""
-
-    ##
-    # Do we put the constructor here?
-    # @param path path of persistent snapshot of cache state (a file path)
-    # @param size size of cache file, in bytes
-
-    # The default size of 200MB makes a lot more sense than the traditional
-    # default of 20MB.  The default here is misleading, though, since
-    # ClientStorage is the only user of ClientCache, and it always passes an
-    # explicit size of its own choosing.
-    def __init__(self, path=None, size=200*1024**2, trace=False):
-        self.path = path
-        self.size = size
-
-        if trace and path:
-            self._setup_trace()
-        else:
-            self._trace = self._notrace
-
-        # The cache stores objects in a dict mapping (oid, tid) pairs
-        # to Object() records (see below).  The tid is the transaction
-        # id that wrote the object.  An object record includes data,
-        # serialno, and end tid.  It has auxillary data structures to
-        # compute the appropriate tid, given the oid and a transaction id
-        # representing an arbitrary point in history.
-        #
-        # The serialized form of the cache just stores the Object()
-        # records.  The in-memory form can be reconstructed from these
-        # records.
-
-        # Maps oid to current tid.  Used to compute key for objects.
-        self.current = {}
-
-        # Maps oid to list of (start_tid, end_tid) pairs in sorted order.
-        # Used to find matching key for load of non-current data.
-        self.noncurrent = {}
-
-        # Map oid to (version, tid) pair.  If there is no entry, the object
-        # is not modified in a version.
-        self.version = {}
-
-        # A FileCache instance does all the low-level work of storing
-        # and retrieving objects to/from the cache file.
-        self.fc = FileCache(size, self.path, self)
-
-    def open(self):
-        self.fc.scan(self.install)
-
-    ##
-    # Callback for FileCache.scan(), when a pre-existing file cache is
-    # used.  For each object in the file, `install()` is invoked.  `f`
-    # is the file object, positioned at the start of the serialized Object.
-    # `ent` is an Entry giving the object's key ((oid, start_tid) pair).
-    def install(self, f, ent):
-        # Called by cache storage layer to insert object.
-        o = Object.fromFile(f, ent.key, skip_data=True)
-        if o is None:
-            return
-        oid = o.key[0]
-        if o.version:
-            self.version[oid] = o.version, o.start_tid
-        elif o.end_tid is None:
-            self.current[oid] = o.start_tid
-        else:
-            assert o.start_tid < o.end_tid
-            this_span = o.start_tid, o.end_tid
-            span_list = self.noncurrent.get(oid)
-            if span_list:
-                bisect.insort_left(span_list, this_span)
-            else:
-                self.noncurrent[oid] = [this_span]
-
-    def close(self):
-        self.fc.close()
-
-    ##
-    # Set the last transaction seen by the cache.
-    # @param tid a transaction id
-    # @exception ValueError attempt to set a new tid less than the current tid
-
-    def setLastTid(self, tid):
-        self.fc.settid(tid)
-
-    ##
-    # Return the last transaction seen by the cache.
-    # @return a transaction id
-    # @defreturn string, or None if no transaction is yet known
-
-    def getLastTid(self):
-        if self.fc.tid == z64:
-            return None
-        else:
-            return self.fc.tid
-
-    ##
-    # Return the current data record for oid and version.
-    # @param oid object id
-    # @param version a version string
-    # @return (data record, serial number, tid), or None if the object is not
-    #         in the cache
-    # @defreturn 3-tuple: (string, string, string)
-
-    def load(self, oid, version=""):
-        tid = None
-        if version:
-            p = self.version.get(oid)
-            if p is None:
-                return None
-            elif p[0] == version:
-                tid = p[1]
-            # Otherwise, we know the cache has version data but not
-            # for the requested version.  Thus, we know it is safe
-            # to return the non-version data from the cache.
-        if tid is None:
-            tid = self.current.get(oid)
-        if tid is None:
-            self._trace(0x20, oid, version)
-            return None
-        o = self.fc.access((oid, tid))
-        if o is None:
-            return None
-        self._trace(0x22, oid, version, o.start_tid, o.end_tid, len(o.data))
-        return o.data, tid, o.version
-
-    ##
-    # Return a non-current revision of oid that was current before tid.
-    # @param oid object id
-    # @param tid id of transaction that wrote next revision of oid
-    # @return data record, serial number, start tid, and end tid
-    # @defreturn 4-tuple: (string, string, string, string)
-
-    def loadBefore(self, oid, tid):
-        L = self.noncurrent.get(oid)
-        if L is None:
-            self._trace(0x24, oid, tid)
-            return None
-        # A pair with None as the second element will always be less
-        # than any pair with the same first tid.
-        i = bisect.bisect_left(L, (tid, None))
-        # The least element left of tid was written before tid.  If
-        # there is no element, the cache doesn't have old enough data.
-        if i == 0:
-            self._trace(0x24, oid, tid)
-            return
-        lo, hi = L[i-1]
-        # lo should always be less than tid
-        if not lo < tid <= hi:
-            self._trace(0x24, oid, tid)
-            return None
-        o = self.fc.access((oid, lo))
-        self._trace(0x26, oid, tid)
-        return o.data, o.start_tid, o.end_tid
-
-    ##
-    # Return the version an object is modified in, or None for an
-    # object that is not modified in a version.
-    # @param oid object id
-    # @return name of version in which the object is modified
-    # @defreturn string or None
-
-    def modifiedInVersion(self, oid):
-        p = self.version.get(oid)
-        if p is None:
-            return None
-        version, tid = p
-        return version
-
-    ##
-    # Store a new data record in the cache.
-    # @param oid object id
-    # @param version name of version that oid was modified in.  The cache
-    #                only stores current version data, so end_tid should
-    #                be None if version is not the empty string.
-    # @param start_tid the id of the transaction that wrote this revision
-    # @param end_tid the id of the transaction that created the next
-    #                revision of oid.  If end_tid is None, the data is
-    #                current.
-    # @param data the actual data
-    # @exception ValueError tried to store non-current version data
-
-    def store(self, oid, version, start_tid, end_tid, data):
-        # It's hard for the client to avoid storing the same object
-        # more than once.  One case is when the client requests
-        # version data that doesn't exist.  It checks the cache for
-        # the requested version, doesn't find it, then asks the server
-        # for that data.  The server returns the non-version data,
-        # which may already be in the cache.
-        if (oid, start_tid) in self.fc:
-            return
-        o = Object((oid, start_tid), version, data, start_tid, end_tid)
-        if version:
-            if end_tid is not None:
-                raise ValueError("cache only stores current version data")
-            if oid in self.version:
-                if self.version[oid] != (version, start_tid):
-                    raise ValueError("data already exists for version %r"
-                                     % self.version[oid][0])
-            self.version[oid] = version, start_tid
-            self._trace(0x50, oid, version, start_tid, dlen=len(data))
-        else:
-            if end_tid is None:
-                _cur_start = self.current.get(oid)
-                if _cur_start:
-                    if _cur_start != start_tid:
-                        raise ValueError(
-                            "already have current data for oid")
-                    else:
-                        return
-                self.current[oid] = start_tid
-                self._trace(0x52, oid, version, start_tid, dlen=len(data))
-            else:
-                L = self.noncurrent.setdefault(oid, [])
-                p = start_tid, end_tid
-                if p in L:
-                    return # duplicate store
-                bisect.insort_left(L, (start_tid, end_tid))
-                self._trace(0x54, oid, version, start_tid, end_tid,
-                            dlen=len(data))
-        self.fc.add(o)
-
-    ##
-    # Remove all knowledge of noncurrent revisions of oid, both in
-    # self.noncurrent and in our FileCache.  `version` and `tid` are used
-    # only for trace records.
-    def _remove_noncurrent_revisions(self, oid, version, tid):
-        noncurrent_list = self.noncurrent.get(oid)
-        if noncurrent_list:
-            # Note:  must iterate over a copy of noncurrent_list.  The
-            # FileCache remove() calls our _evicted() method, and that
-            # mutates the list.
-            for old_tid, dummy in noncurrent_list[:]:
-                # 0x1E = invalidate (hit, discarding current or non-current)
-                self._trace(0x1E, oid, version, tid)
-                self.fc.remove((oid, old_tid))
-            del self.noncurrent[oid]
-
-    ##
-    # If `tid` is None, or we have data for `oid` in a (non-empty) version,
-    # forget all knowledge of `oid`.  (`tid` can be None only for
-    # invalidations generated by startup cache verification.)  If `tid`
-    # isn't None, we don't have version data for `oid`, and we had current
-    # data for `oid`, stop believing we have current data, and mark the
-    # data we had as being valid only up to `tid`.  In all other cases, do
-    # nothing.
-    # @param oid object id
-    # @param version name of version to invalidate.
-    # @param tid the id of the transaction that wrote a new revision of oid,
-    #        or None to forget all cached info about oid (version, current
-    #        revision, and non-current revisions)
-    def invalidate(self, oid, version, tid):
-        if tid > self.fc.tid and tid is not None:
-            self.fc.settid(tid)
-
-        remove_all_knowledge_of_oid = tid is None
-
-        if oid in self.version:
-            # Forget we know about the version data.
-            # 0x1A = invalidate (hit, version)
-            self._trace(0x1A, oid, version, tid)
-            dllversion, dlltid = self.version[oid]
-            assert not version or version == dllversion, (version, dllversion)
-            self.fc.remove((oid, dlltid))
-            assert oid not in self.version # .remove() got rid of it
-            # And continue:  we must also remove any non-version data from
-            # the cache.  Or, at least, I have such a poor understanding of
-            # versions that anything less drastic would probably be wrong.
-            remove_all_knowledge_of_oid = True
-
-        if remove_all_knowledge_of_oid:
-            self._remove_noncurrent_revisions(oid, version, tid)
-
-        # Only current, non-version data remains to be handled.
-
-        cur_tid = self.current.get(oid)
-        if not cur_tid:
-            # 0x10 == invalidate (miss)
-            self._trace(0x10, oid, version, tid)
-            return
-
-        # We had current data for oid, but no longer.
-
-        if remove_all_knowledge_of_oid:
-            # 0x1E = invalidate (hit, discarding current or non-current)
-            self._trace(0x1E, oid, version, tid)
-            self.fc.remove((oid, cur_tid))
-            assert cur_tid not in self.current  # .remove() got rid of it
-            return
-
-        # Add the data we have to the list of non-current data for oid.
-        assert tid is not None and cur_tid < tid
-        # 0x1C = invalidate (hit, saving non-current)
-        self._trace(0x1C, oid, version, tid)
-        del self.current[oid]   # because we no longer have current data
-
-        # Update the end_tid half of oid's validity range on disk.
-        # TODO: Want to fetch object without marking it as accessed.
-        o = self.fc.access((oid, cur_tid))
-        assert o is not None
-        assert o.end_tid is None  # i.e., o was current
-        if o is None:
-            # TODO:  Since we asserted o is not None above, this block
-            # should be removing; waiting on time to prove it can't happen.
-            return
-        o.end_tid = tid
-        self.fc.update(o)   # record the new end_tid on disk
-        # Add to oid's list of non-current data.
-        L = self.noncurrent.setdefault(oid, [])
-        bisect.insort_left(L, (cur_tid, tid))
-
-    ##
-    # Return the number of object revisions in the cache.
-    #
-    # Or maybe better to just return len(self.cache)?  Needs clearer use case.
-    def __len__(self):
-        n = len(self.current) + len(self.version)
-        if self.noncurrent:
-            n += sum(map(len, self.noncurrent))
-        return n
-
-    ##
-    # Generates (oid, serial, version) triples for all objects in the
-    # cache.  This generator is used by cache verification.
-
-    def contents(self):
-        # May need to materialize list instead of iterating;
-        # depends on whether the caller may change the cache.
-        for o in self.fc:
-            oid, tid = o.key
-            if oid in self.version:
-                obj = self.fc.access(o.key)
-                yield oid, tid, obj.version
-            else:
-                yield oid, tid, ""
-
-    def dump(self):
-        from ZODB.utils import oid_repr
-        print "cache size", len(self)
-        L = list(self.contents())
-        L.sort()
-        for oid, tid, version in L:
-            print oid_repr(oid), oid_repr(tid), repr(version)
-        print "dll contents"
-        L = list(self.fc)
-        L.sort(lambda x, y: cmp(x.key, y.key))
-        for x in L:
-            end_tid = x.end_tid or z64
-            print oid_repr(x.key[0]), oid_repr(x.key[1]), oid_repr(end_tid)
-        print
-
-    def _evicted(self, o):
-        # Called by the FileCache to signal that Object o has been evicted.
-        oid, tid = o.key
-        if o.end_tid is None:
-            if o.version:
-                del self.version[oid]
-            else:
-                del self.current[oid]
-        else:
-            # Although we use bisect to keep the list sorted,
-            # we never expect the list to be very long.  So the
-            # brute force approach should normally be fine.
-            L = self.noncurrent[oid]
-            L.remove((o.start_tid, o.end_tid))
-
-    def _setup_trace(self):
-        tfn = self.path + ".trace"
-        self.tracefile = None
-        try:
-            self.tracefile = open(tfn, "ab")
-            self._trace(0x00)
-        except IOError, msg:
-            self.tracefile = None
-            logger.warning("Could not write to trace file %s: %s",
-                           tfn, msg)
-
-    def _notrace(self, *arg, **kwargs):
-        pass
-
-    def _trace(self,
-               code, oid="", version="", tid="", end_tid=z64, dlen=0,
-               # The next two are just speed hacks.
-               time_time=time.time, struct_pack=struct.pack):
-        # The code argument is two hex digits; bits 0 and 7 must be zero.
-        # The first hex digit shows the operation, the second the outcome.
-        # If the second digit is in "02468" then it is a 'miss'.
-        # If it is in "ACE" then it is a 'hit'.
-        # This method has been carefully tuned to be as fast as possible.
-        # Note: when tracing is disabled, this method is hidden by a dummy.
-        if version:
-            code |= 0x80
-        encoded = (dlen + 255) & 0x7fffff00 | code
-        if tid is None:
-            tid = z64
-        if end_tid is None:
-            end_tid = z64
-        try:
-            self.tracefile.write(
-                struct_pack(">iiH8s8s",
-                            time_time(),
-                            encoded,
-                            len(oid),
-                            tid, end_tid) + oid)
-        except:
-            print `tid`, `end_tid`
-            raise
-
-##
-# An Object stores the cached data for a single object.
-# <p>
-# The cached data includes the actual object data, the key, and three
-# data fields that describe the validity period and version of the
-# object.  The key contains the oid and a redundant start_tid.  The
-# actual size of an object is variable, depending on the size of the
-# data and whether it is in a version.
-# <p>
-# The serialized format does not include the key, because it is stored
-# in the header used by the cache file's storage format.
-# <p>
-# Instances of Object are generally short-lived -- they're really a way to
-# package data on the way to or from the disk file.
-
-class Object(object):
-    __slots__ = (# pair (object id, txn id) -- something usable as a dict key;
-                 # the second part of the pair is equal to start_tid
-                 "key",
-
-                 # string, tid of txn that wrote the data
-                 "start_tid",
-
-                 # string, tid of txn that wrote next revision, or None
-                 # if the data is current; if not None, end_tid is strictly
-                 # greater than start_tid
-                 "end_tid",
-
-                 # string, name of version
-                 "version",
-
-                 # string, the actual data record for the object
-                 "data",
-
-                 # total size of serialized object; this includes the
-                 # data, version, and all overhead (header) bytes.
-                 "size",
-                )
-
-    # A serialized Object on disk looks like:
-    #
-    #         offset             # bytes   value
-    #         ------             -------   -----
-    #              0                   8   end_tid; string
-    #              8                   2   len(version); 2-byte signed int
-    #             10                   4   len(data); 4-byte signed int
-    #             14        len(version)   version; string
-    # 14+len(version)          len(data)   the object pickle; string
-    # 14+len(version)+
-    #       len(data)                  8   oid; string
-
-    # The serialization format uses an end tid of "\0"*8 (z64), the least
-    # 8-byte string, to represent None.  It isn't possible for an end_tid
-    # to be 0, because it must always be strictly greater than the start_tid.
-
-    fmt = ">8shi"  # end_tid, len(self.version), len(self.data)
-    FIXED_HEADER_SIZE = struct.calcsize(fmt)
-    assert FIXED_HEADER_SIZE == 14
-    TOTAL_FIXED_SIZE = FIXED_HEADER_SIZE + 8  # +8 for the oid at the end
-
-    def __init__(self, key, version, data, start_tid, end_tid):
-        self.key = key
-        self.version = version
-        self.data = data
-        self.start_tid = start_tid
-        self.end_tid = end_tid
-        # The size of the serialized object on disk, including the
-        # 14-byte header, the lengths of data and version, and a
-        # copy of the 8-byte oid.
-        if data is not None:
-            self.size = self.TOTAL_FIXED_SIZE + len(data) + len(version)
-
-    ##
-    # Return the fixed-sized serialization header as a string:  pack end_tid,
-    # and the lengths of the .version and .data members.
-    def get_header(self):
-        return struct.pack(self.fmt,
-                           self.end_tid or z64,
-                           len(self.version),
-                           len(self.data))
-
-    ##
-    # Write the serialized representation of self to file f, at its current
-    # position.
-    def serialize(self, f):
-        f.writelines([self.get_header(),
-                      self.version,
-                      self.data,
-                      self.key[0]])
-
-    ##
-    # Write the fixed-size header for self, to file f at its current position.
-    # The only real use for this is when the current revision of an object
-    # in cache is invalidated.  Then the end_tid field gets set to the tid
-    # of the transaction that caused the invalidation.
-    def serialize_header(self, f):
-        f.write(self.get_header())
-
-    ##
-    # fromFile is a class constructor, unserializing an Object from the
-    # current position in file f.  Exclusive access to f for the duration
-    # is assumed.  The key is a (oid, start_tid) pair, and the oid must
-    # match the serialized oid.  If `skip_data` is true, .data is left
-    # None in the Object returned, but all the other fields are populated.
-    # Else (`skip_data` is false, the default), all fields including .data
-    # are populated.  .data can be big, so it's prudent to skip it when it
-    # isn't needed.
-    def fromFile(cls, f, key, skip_data=False):
-        s = f.read(cls.FIXED_HEADER_SIZE)
-        if not s:
-            return None
-        oid, start_tid = key
-
-        end_tid, vlen, dlen = struct.unpack(cls.fmt, s)
-        if end_tid == z64:
-            end_tid = None
-
-        version = f.read(vlen)
-        if vlen != len(version):
-            raise ValueError("corrupted record, version")
-
-        if skip_data:
-            data = None
-            f.seek(dlen, 1)
-        else:
-            data = f.read(dlen)
-            if dlen != len(data):
-                raise ValueError("corrupted record, data")
-
-        s = f.read(8)
-        if s != oid:
-            raise ValueError("corrupted record, oid")
-
-        return cls((oid, start_tid), version, data, start_tid, end_tid)
-
-    fromFile = classmethod(fromFile)
-
-
-# Entry just associates a key with a file offset.  It's used by FileCache.
-class Entry(object):
-    __slots__ = (# object key -- something usable as a dict key.
-                 'key',
-
-                 # Offset from start of file to the object's data
-                 # record; this includes all overhead bytes (status
-                 # byte, size bytes, etc).  The size of the data
-                 # record is stored in the file near the start of the
-                 # record, but for efficiency we also keep size in a
-                 # dict (filemap; see later).
-                 'offset',
-                )
-
-    def __init__(self, key=None, offset=None):
-        self.key = key
-        self.offset = offset
-
-
-
-##
-# FileCache stores a cache in a single on-disk file.
-#
-# On-disk cache structure.
-#
-# The file begins with a 12-byte header.  The first four bytes are the
-# file's magic number - ZEC3 - indicating zeo cache version 3.  The
-# next eight bytes are the last transaction id.
-
-magic = "ZEC3"
-ZEC3_HEADER_SIZE = 12
-
-# After the header, the file contains a contiguous sequence of blocks.  All
-# blocks begin with a one-byte status indicator:
-#
-# 'a'
-#       Allocated.  The block holds an object; the next 4 bytes are >I
-#       format total block size.
-#
-# 'f'
-#       Free.  The block is free; the next 4 bytes are >I format total
-#       block size.
-#
-# '1', '2', '3', '4'
-#       The block is free, and consists of 1, 2, 3 or 4 bytes total.
-#
-# "Total" includes the status byte, and size bytes.  There are no
-# empty (size 0) blocks.
-
-
-# Allocated blocks have more structure:
-#
-#     1 byte allocation status ('a').
-#     4 bytes block size, >I format.
-#     16 bytes oid + tid, string.
-#     size-OBJECT_HEADER_SIZE bytes, the serialization of an Object (see
-#         class Object for details).
-
-OBJECT_HEADER_SIZE = 1 + 4 + 16
-
-# The cache's currentofs goes around the file, circularly, forever.
-# It's always the starting offset of some block.
-#
-# When a new object is added to the cache, it's stored beginning at
-# currentofs, and currentofs moves just beyond it.  As many contiguous
-# blocks needed to make enough room for the new object are evicted,
-# starting at currentofs.  Exception:  if currentofs is close enough
-# to the end of the file that the new object can't fit in one
-# contiguous chunk, currentofs is reset to ZEC3_HEADER_SIZE first.
-
-# Do all possible to ensure that the bytes we wrote to file f are really on
-# disk.
-def sync(f):
-    f.flush()
-    if hasattr(os, 'fsync'):
-        os.fsync(f.fileno())
-
-class FileCache(object):
-
-    def __init__(self, maxsize, fpath, parent, reuse=True):
-        # - `maxsize`:  total size of the cache file, in bytes; this is
-        #   ignored if reuse is true and fpath names an existing file;
-        #   perhaps we should attempt to change the cache size in that
-        #   case
-        # - `fpath`:  filepath for the cache file, or None; see `reuse`
-        # - `parent`:  the ClientCache this FileCache is part of
-        # - `reuse`:  If true, and fpath is not None, and fpath names a
-        #    file that exists, that pre-existing file is used (persistent
-        #    cache).  In all other cases a new file is created:  a temp
-        #    file if fpath is None, else with path fpath.
-        self.maxsize = maxsize
-        self.parent = parent
-
-        # tid for the most recent transaction we know about.  This is also
-        # stored near the start of the file.
-        self.tid = None
-
-        # There's one Entry instance, kept in memory, for each currently
-        # allocated block in the file, and there's one allocated block in the
-        # file per serialized Object.  filemap retrieves the Entry given the
-        # starting offset of a block, and key2entry retrieves the Entry given
-        # an object revision's key (an (oid, start_tid) pair).  From an
-        # Entry, we can get the Object's key and file offset.
-
-        # Map offset in file to pair (data record size, Entry).
-        # Entry is None iff the block starting at offset is free.
-        # filemap always contains a complete account of what's in the
-        # file -- study method _verify_filemap for executable checking
-        # of the relevant invariants.  An offset is at the start of a
-        # block iff it's a key in filemap.  The data record size is
-        # stored in the file too, so we could just seek to the offset
-        # and read it up; keeping it in memory is an optimization.
-        self.filemap = {}
-
-        # Map key to Entry.  After
-        #     obj = key2entry[key]
-        # then
-        #     obj.key == key
-        # is true.  An object is currently stored on disk iff its key is in
-        # key2entry.
-        self.key2entry = {}
-
-        # Always the offset into the file of the start of a block.
-        # New and relocated objects are always written starting at
-        # currentofs.
-        self.currentofs = ZEC3_HEADER_SIZE
-
-        # self.f is the open file object.
-        # When we're not reusing an existing file, self.f is left None
-        # here -- the scan() method must be called then to open the file
-        # (and it sets self.f).
-
-        self.fpath = fpath
-        if reuse and fpath and os.path.exists(fpath):
-            # Reuse an existing file.  scan() will open & read it.
-            self.f = None
-        else:
-            if reuse:
-                logger.warning("reuse=True but the given file path %r "
-                               "doesn't exist; ignoring reuse=True", fpath)
-            if fpath:
-                self.f = open(fpath, 'wb+')
-            else:
-                self.f = tempfile.TemporaryFile()
-            # Make sure the OS really saves enough bytes for the file.
-            self.f.seek(self.maxsize - 1)
-            self.f.write('x')
-            self.f.truncate()
-            # Start with one magic header block
-            self.f.seek(0)
-            self.f.write(magic)
-            self.f.write(z64)
-            # and one free block.
-            self.f.write('f' + struct.pack(">I", self.maxsize -
-                                                 ZEC3_HEADER_SIZE))
-            self.sync()
-            self.filemap[ZEC3_HEADER_SIZE] = (self.maxsize - ZEC3_HEADER_SIZE,
-                                              None)
-
-        # Statistics:  _n_adds, _n_added_bytes,
-        #              _n_evicts, _n_evicted_bytes,
-        #              _n_accesses
-        self.clearStats()
-
-    ##
-    # Scan the current contents of the cache file, calling `install`
-    # for each object found in the cache.  This method should only
-    # be called once to initialize the cache from disk.
-    def scan(self, install):
-        if self.f is not None:
-            return
-        fsize = os.path.getsize(self.fpath)
-        if fsize != self.maxsize:
-            logger.warning("existing cache file %s has size %d; "
-                           "requested size %d ignored", self.fpath,
-                           fsize, self.maxsize)
-            self.maxsize = fsize
-        self.f = open(self.fpath, 'rb+')
-        _magic = self.f.read(4)
-        if _magic != magic:
-            raise ValueError("unexpected magic number: %r" % _magic)
-        self.tid = self.f.read(8)
-        if len(self.tid) != 8:
-            raise ValueError("cache file too small -- no tid at start")
-
-        # Populate .filemap and .key2entry to reflect what's currently in the
-        # file, and tell our parent about it too (via the `install` callback).
-        # Remember the location of the largest free block  That seems a decent
-        # place to start currentofs.
-        max_free_size = max_free_offset = 0
-        ofs = ZEC3_HEADER_SIZE
-        while ofs < fsize:
-            self.f.seek(ofs)
-            ent = None
-            status = self.f.read(1)
-            if status == 'a':
-                size, rawkey = struct.unpack(">I16s", self.f.read(20))
-                key = rawkey[:8], rawkey[8:]
-                assert key not in self.key2entry
-                self.key2entry[key] = ent = Entry(key, ofs)
-                install(self.f, ent)
-            elif status == 'f':
-                size, = struct.unpack(">I", self.f.read(4))
-            elif status in '1234':
-                size = int(status)
-            else:
-                raise ValueError("unknown status byte value %s in client "
-                                 "cache file" % 0, hex(ord(status)))
-
-            self.filemap[ofs] = size, ent
-            if ent is None and size > max_free_size:
-                max_free_size, max_free_offset = size, ofs
-
-            ofs += size
-
-        if ofs != fsize:
-            raise ValueError("final offset %s != file size %s in client "
-                             "cache file" % (ofs, fsize))
-        if __debug__:
-            self._verify_filemap()
-        self.currentofs = max_free_offset
-
-    def clearStats(self):
-        self._n_adds = self._n_added_bytes = 0
-        self._n_evicts = self._n_evicted_bytes = 0
-        self._n_accesses = 0
-
-    def getStats(self):
-        return (self._n_adds, self._n_added_bytes,
-                self._n_evicts, self._n_evicted_bytes,
-                self._n_accesses
-               )
-
-    ##
-    # The number of objects currently in the cache.
-    def __len__(self):
-        return len(self.key2entry)
-
-    ##
-    # Iterate over the objects in the cache, producing an Entry for each.
-    def __iter__(self):
-        return self.key2entry.itervalues()
-
-    ##
-    # Test whether an (oid, tid) pair is in the cache.
-    def __contains__(self, key):
-        return key in self.key2entry
-
-    ##
-    # Do all possible to ensure all bytes written to the file so far are
-    # actually on disk.
-    def sync(self):
-        sync(self.f)
-
-    ##
-    # Close the underlying file.  No methods accessing the cache should be
-    # used after this.
-    def close(self):
-        if self.f:
-            self.sync()
-            self.f.close()
-            self.f = None
-
-    ##
-    # Evict objects as necessary to free up at least nbytes bytes,
-    # starting at currentofs.  If currentofs is closer than nbytes to
-    # the end of the file, currentofs is reset to ZEC3_HEADER_SIZE first.
-    # The number of bytes actually freed may be (and probably will be)
-    # greater than nbytes, and is _makeroom's return value.  The file is not
-    # altered by _makeroom.  filemap is updated to reflect the
-    # evictions, and it's the caller's responsibilty both to fiddle
-    # the file, and to update filemap, to account for all the space
-    # freed (starting at currentofs when _makeroom returns, and
-    # spanning the number of bytes retured by _makeroom).
-    def _makeroom(self, nbytes):
-        assert 0 < nbytes <= self.maxsize - ZEC3_HEADER_SIZE
-        if self.currentofs + nbytes > self.maxsize:
-            self.currentofs = ZEC3_HEADER_SIZE
-        ofs = self.currentofs
-        while nbytes > 0:
-            size, e = self.filemap.pop(ofs)
-            if e is not None:
-                self._evictobj(e, size)
-            ofs += size
-            nbytes -= size
-        return ofs - self.currentofs
-
-    ##
-    # Write Object obj, with data, to file starting at currentofs.
-    # nfreebytes are already available for overwriting, and it's
-    # guranteed that's enough.  obj.offset is changed to reflect the
-    # new data record position, and filemap is updated to match.
-    def _writeobj(self, obj, nfreebytes):
-        size = OBJECT_HEADER_SIZE + obj.size
-        assert size <= nfreebytes
-        excess = nfreebytes - size
-        # If there's any excess (which is likely), we need to record a
-        # free block following the end of the data record.  That isn't
-        # expensive -- it's all a contiguous write.
-        if excess == 0:
-            extra = ''
-        elif excess < 5:
-            extra = "01234"[excess]
-        else:
-            extra = 'f' + struct.pack(">I", excess)
-
-        self.f.seek(self.currentofs)
-        self.f.writelines(('a',
-                           struct.pack(">I8s8s", size,
-                                       obj.key[0], obj.key[1])))
-        obj.serialize(self.f)
-        self.f.write(extra)
-        e = Entry(obj.key, self.currentofs)
-        self.key2entry[obj.key] = e
-        self.filemap[self.currentofs] = size, e
-        self.currentofs += size
-        if excess:
-            # We need to record the free block in filemap, but there's
-            # no need to advance currentofs beyond it.  Instead it
-            # gives some breathing room for the next object to get
-            # written.
-            self.filemap[self.currentofs] = excess, None
-
-    ##
-    # Add Object object to the cache.  This may evict existing objects, to
-    # make room (and almost certainly will, in steady state once the cache
-    # is first full).  The object must not already be in the cache.
-    def add(self, object):
-        size = OBJECT_HEADER_SIZE + object.size
-        # A number of cache simulation experiments all concluded that the
-        # 2nd-level ZEO cache got a much higher hit rate if "very large"
-        # objects simply weren't cached.  For now, we ignore the request
-        # only if the entire cache file is too small to hold the object.
-        if size > self.maxsize - ZEC3_HEADER_SIZE:
-            return
-
-        assert object.key not in self.key2entry
-        assert len(object.key[0]) == 8
-        assert len(object.key[1]) == 8
-
-        self._n_adds += 1
-        self._n_added_bytes += size
-
-        available = self._makeroom(size)
-        self._writeobj(object, available)
-
-    ##
-    # Evict the object represented by Entry `e` from the cache, freeing
-    # `size` bytes in the file for reuse.  `size` is used only for summary
-    # statistics.  This does not alter the file, or self.filemap or
-    # self.key2entry (those are the caller's responsibilities).  It does
-    # invoke _evicted(Object) on our parent.
-    def _evictobj(self, e, size):
-        self._n_evicts += 1
-        self._n_evicted_bytes += size
-        # Load the object header into memory so we know how to
-        # update the parent's in-memory data structures.
-        self.f.seek(e.offset + OBJECT_HEADER_SIZE)
-        o = Object.fromFile(self.f, e.key, skip_data=True)
-        self.parent._evicted(o)
-
-    ##
-    # Return Object for key, or None if not in cache.
-    def access(self, key):
-        self._n_accesses += 1
-        e = self.key2entry.get(key)
-        if e is None:
-            return None
-        offset = e.offset
-        size, e2 = self.filemap[offset]
-        assert e is e2
-
-        self.f.seek(offset + OBJECT_HEADER_SIZE)
-        return Object.fromFile(self.f, key)
-
-    ##
-    # Remove Object for key from cache, if present.
-    def remove(self, key):
-        # If an object is being explicitly removed, we need to load
-        # its header into memory and write a free block marker to the
-        # disk where the object was stored.  We need to load the
-        # header to update the in-memory data structures held by
-        # ClientCache.
-
-        # We could instead just keep the header in memory at all times.
-
-        e = self.key2entry.pop(key, None)
-        if e is None:
-            return
-        offset = e.offset
-        size, e2 = self.filemap[offset]
-        assert e is e2
-        self.filemap[offset] = size, None
-        self.f.seek(offset + OBJECT_HEADER_SIZE)
-        o = Object.fromFile(self.f, key, skip_data=True)
-        assert size >= 5  # only free blocks are tiny
-        # Because `size` >= 5, we can change an allocated block to a free
-        # block just by overwriting the 'a' status byte with 'f' -- the
-        # size field stays the same.
-        self.f.seek(offset)
-        self.f.write('f')
-        self.f.flush()
-        self.parent._evicted(o)
-
-    ##
-    # Update on-disk representation of Object obj.
-    #
-    # This method should be called when the object header is modified.
-    # obj must be in the cache.  The only real use for this is during
-    # invalidation, to set the end_tid field on a revision that was current
-    # (and so had an end_tid of None, but no longer does).
-    def update(self, obj):
-        e = self.key2entry[obj.key]
-        self.f.seek(e.offset + OBJECT_HEADER_SIZE)
-        obj.serialize_header(self.f)
-
-    ##
-    # Update our idea of the most recent tid.  This is stored in the
-    # instance, and also written out near the start of the cache file.  The
-    # new tid must be strictly greater than our current idea of the most
-    # recent tid.
-    def settid(self, tid):
-        if self.tid is not None and tid <= self.tid:
-            raise ValueError("new last tid (%s) must be greater than "
-                             "previous one (%s)" % (u64(tid),
-                                                    u64(self.tid)))
-        assert isinstance(tid, str) and len(tid) == 8
-        self.tid = tid
-        self.f.seek(len(magic))
-        self.f.write(tid)
-        self.f.flush()
-
-    ##
-    # This debug method marches over the entire cache file, verifying that
-    # the current contents match the info in self.filemap and self.key2entry.
-    def _verify_filemap(self, display=False):
-        a = ZEC3_HEADER_SIZE
-        f = self.f
-        while a < self.maxsize:
-            f.seek(a)
-            status = f.read(1)
-            if status in 'af':
-                size, = struct.unpack(">I", f.read(4))
-            else:
-                size = int(status)
-            if display:
-                if a == self.currentofs:
-                    print '*****',
-                print "%c%d" % (status, size),
-            size2, obj = self.filemap[a]
-            assert size == size2
-            assert (obj is not None) == (status == 'a')
-            if obj is not None:
-                assert obj.offset == a
-                assert self.key2entry[obj.key] is obj
-            a += size
-        if display:
-            print
-        assert a == self.maxsize
diff --git a/branches/bug1734/src/ZEO/component.xml b/branches/bug1734/src/ZEO/component.xml
deleted file mode 100644
index 6800588d..00000000
--- a/branches/bug1734/src/ZEO/component.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<component>
-
-  <sectiontype name="zeo">
-
-    <description>
-      The content of a ZEO section describe operational parameters
-      of a ZEO server except for the storage(s) to be served.
-    </description>
-
-    <key name="address" datatype="socket-address"
-         required="yes">
-      <description>
-        The address at which the server should listen.  This can be in
-        the form 'host:port' to signify a TCP/IP connection or a
-        pathname string to signify a Unix domain socket connection (at
-        least one '/' is required).  A hostname may be a DNS name or a
-        dotted IP address.  If the hostname is omitted, the platform's
-        default behavior is used when binding the listening socket (''
-        is passed to socket.bind() as the hostname portion of the
-        address).
-      </description>
-    </key>
-
-    <key name="read-only" datatype="boolean"
-         required="no"
-         default="false">
-      <description>
-        Flag indicating whether the server should operate in read-only
-        mode.  Defaults to false.  Note that even if the server is
-        operating in writable mode, individual storages may still be
-        read-only.  But if the server is in read-only mode, no write
-        operations are allowed, even if the storages are writable.  Note
-        that pack() is considered a read-only operation.
-      </description>
-    </key>
-
-    <key name="invalidation-queue-size" datatype="integer"
-         required="no"
-         default="100">
-      <description>
-        The storage server keeps a queue of the objects modified by the
-        last N transactions, where N == invalidation_queue_size.  This
-        queue is used to speed client cache verification when a client
-        disconnects for a short period of time.
-      </description>
-    </key>
-
-    <key name="monitor-address" datatype="socket-address"
-         required="no">
-      <description>
-        The address at which the monitor server should listen.  If
-        specified, a monitor server is started.  The monitor server
-        provides server statistics in a simple text format.  This can
-        be in the form 'host:port' to signify a TCP/IP connection or a
-        pathname string to signify a Unix domain socket connection (at
-        least one '/' is required).  A hostname may be a DNS name or a
-        dotted IP address.  If the hostname is omitted, the platform's
-        default behavior is used when binding the listening socket (''
-        is passed to socket.bind() as the hostname portion of the
-        address).
-      </description>
-    </key>
-
-    <key name="transaction-timeout" datatype="integer"
-         required="no">
-      <description>
-        The maximum amount of time to wait for a transaction to commit
-        after acquiring the storage lock, specified in seconds.  If the
-        transaction takes too long, the client connection will be closed
-        and the transaction aborted.
-      </description>
-    </key>
-
-    <key name="authentication-protocol" required="no">
-      <description>
-        The name of the protocol used for authentication.  The
-        only protocol provided with ZEO is "digest," but extensions
-        may provide other protocols.
-      </description>
-    </key>
-
-    <key name="authentication-database" required="no">
-      <description>
-        The path of the database containing authentication credentials.
-      </description>
-    </key>
-
-    <key name="authentication-realm" required="no">
-      <description>
-        The authentication realm of the server.  Some authentication
-        schemes use a realm to identify the logical set of usernames
-        that are accepted by this server.
-      </description>
-    </key>
-
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZEO/mkzeoinst.py b/branches/bug1734/src/ZEO/mkzeoinst.py
deleted file mode 100755
index d4bf5479..00000000
--- a/branches/bug1734/src/ZEO/mkzeoinst.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#!python
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""%(program)s -- create a ZEO instance.
-
-Usage: %(program)s home [port]
-
-Given an "instance home directory" <home> and some configuration
-options (all of which have default values), create the following:
-
-<home>/etc/zeo.conf     -- ZEO config file
-<home>/var/             -- Directory for data files: Data.fs etc.
-<home>/log/             -- Directory for log files: zeo.log and zeoctl.log
-<home>/bin/runzeo       -- the zeo server runner
-<home>/bin/zeoctl       -- start/stop script (a shim for zeoctl.py)
-
-The script will not overwrite existing files; instead, it will issue a
-warning if an existing file is found that differs from the file that
-would be written if it didn't exist.
-"""
-
-# WARNING!  Several templates and functions here are reused by ZRS.
-# So be careful with changes.
-
-import os
-import sys
-import stat
-import getopt
-
-zeo_conf_template = """\
-# ZEO configuration file
-
-%%define INSTANCE %(instance_home)s
-
-<zeo>
-  address %(port)d
-  read-only false
-  invalidation-queue-size 100
-  # monitor-address PORT
-  # transaction-timeout SECONDS
-</zeo>
-
-<filestorage 1>
-  path $INSTANCE/var/Data.fs
-</filestorage>
-
-<eventlog>
-  level info
-  <logfile>
-    path $INSTANCE/log/zeo.log
-  </logfile>
-</eventlog>
-
-<runner>
-  program $INSTANCE/bin/runzeo
-  socket-name $INSTANCE/etc/%(package)s.zdsock
-  daemon true
-  forever false
-  backoff-limit 10
-  exit-codes 0, 2
-  directory $INSTANCE
-  default-to-interactive true
-  # user zope
-  python %(python)s
-  zdrun %(zodb3_home)s/zdaemon/zdrun.py
-
-  # This logfile should match the one in the %(package)s.conf file.
-  # It is used by zdctl's logtail command, zdrun/zdctl doesn't write it.
-  logfile $INSTANCE/log/%(package)s.log
-</runner>
-"""
-
-zeoctl_template = """\
-#!/bin/sh
-# %(PACKAGE)s instance control script
-
-# The following two lines are for chkconfig.  On Red Hat Linux (and
-# some other systems), you can copy or symlink this script into
-# /etc/rc.d/init.d/ and then use chkconfig(8) to automatically start
-# %(PACKAGE)s at boot time.
-
-# chkconfig: 345 90 10
-# description: start a %(PACKAGE)s server
-
-PYTHON="%(python)s"
-ZODB3_HOME="%(zodb3_home)s"
-
-CONFIG_FILE="%(instance_home)s/etc/%(package)s.conf"
-
-PYTHONPATH="$ZODB3_HOME"
-export PYTHONPATH
-
-ZEOCTL="$ZODB3_HOME/ZEO/zeoctl.py"
-
-exec "$PYTHON" "$ZEOCTL" -C "$CONFIG_FILE" ${1+"$@"}
-"""
-
-runzeo_template = """\
-#!/bin/sh
-# %(PACKAGE)s instance start script
-
-PYTHON="%(python)s"
-ZODB3_HOME="%(zodb3_home)s"
-
-CONFIG_FILE="%(instance_home)s/etc/%(package)s.conf"
-
-PYTHONPATH="$ZODB3_HOME"
-export PYTHONPATH
-
-RUNZEO="$ZODB3_HOME/ZEO/runzeo.py"
-
-exec "$PYTHON" "$RUNZEO" -C "$CONFIG_FILE" ${1+"$@"}
-"""
-
-def main():
-    ZEOInstanceBuilder().run()
-    print "All done."
-
-class ZEOInstanceBuilder:
-    def run(self):
-        try:
-            opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
-        except getopt.error, msg:
-            print msg
-            sys.exit(2)
-        program = os.path.basename(sys.argv[0])
-        if opts:
-            # There's only the help options, so just dump some help:
-            msg = __doc__ % {"program": program}
-            print msg
-            sys.exit()
-        if len(args) not in [1, 2]:
-            print "Usage: %s home [port]" % program
-            sys.exit(2)
-
-        instance_home = args[0]
-        if not os.path.isabs(instance_home):
-            instance_home = os.path.abspath(instance_home)
-
-        for entry in sys.path:
-            if os.path.exists(os.path.join(entry, 'ZODB')):
-                zodb3_home = entry
-                break
-        else:
-            print "Can't find the Zope home (not in sys.path)"
-            sys.exit(2)
-
-        if args[1:]:
-            port = int(args[1])
-        else:
-            # XXX Change this to 8001, to make Zope's default (verify!).
-            port = 9999
-
-        params = self.get_params(zodb3_home, instance_home, port)
-        self.create(instance_home, params)
-
-    def get_params(self, zodb3_home, instance_home, port):
-        return {
-            "package": "zeo",
-            "PACKAGE": "ZEO",
-            "zodb3_home": zodb3_home,
-            "instance_home": instance_home,
-            "port": port,
-            "python": sys.executable,
-            }
-
-    def create(self, home, params):
-        makedir(home)
-        makedir(home, "etc")
-        makedir(home, "var")
-        makedir(home, "log")
-        makedir(home, "bin")
-        makefile(zeo_conf_template, home, "etc", "zeo.conf", **params)
-        makexfile(zeoctl_template, home, "bin", "zeoctl", **params)
-        makexfile(runzeo_template, home, "bin", "runzeo", **params)
-
-
-def which(program):
-    strpath = os.getenv("PATH")
-    binpath = strpath.split(os.pathsep)
-    for dir in binpath:
-        path = os.path.join(dir, program)
-        if os.path.isfile(path) and os.access(path, os.X_OK):
-            if not os.path.isabs(path):
-                path = os.path.abspath(path)
-            return path
-    raise IOError, "can't find %r on path %r" % (program, strpath)
-
-def makedir(*args):
-    path = ""
-    for arg in args:
-        path = os.path.join(path, arg)
-    mkdirs(path)
-    return path
-
-def mkdirs(path):
-    if os.path.isdir(path):
-        return
-    head, tail = os.path.split(path)
-    if head and tail and not os.path.isdir(head):
-        mkdirs(head)
-    os.mkdir(path)
-    print "Created directory", path
-
-def makefile(template, *args, **kwds):
-    path = makedir(*args[:-1])
-    path = os.path.join(path, args[-1])
-    data = template % kwds
-    if os.path.exists(path):
-        f = open(path)
-        olddata = f.read().strip()
-        f.close()
-        if olddata:
-            if olddata != data.strip():
-                print "Warning: not overwriting existing file %r" % path
-            return path
-    f = open(path, "w")
-    f.write(data)
-    f.close()
-    print "Wrote file", path
-    return path
-
-def makexfile(template, *args, **kwds):
-    path = makefile(template, *args, **kwds)
-    umask = os.umask(022)
-    os.umask(umask)
-    mode = 0777 & ~umask
-    if stat.S_IMODE(os.stat(path)[stat.ST_MODE]) != mode:
-        os.chmod(path, mode)
-        print "Changed mode for %s to %o" % (path, mode)
-    return path
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/monitor.py b/branches/bug1734/src/ZEO/monitor.py
deleted file mode 100644
index 7790c164..00000000
--- a/branches/bug1734/src/ZEO/monitor.py
+++ /dev/null
@@ -1,162 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Monitor behavior of ZEO server and record statistics.
-
-$Id$
-"""
-
-import asyncore
-import socket
-import time
-import types
-import logging
-
-import ZEO
-
-class StorageStats:
-    """Per-storage usage statistics."""
-
-    def __init__(self):
-        self.loads = 0
-        self.stores = 0
-        self.commits = 0
-        self.aborts = 0
-        self.active_txns = 0
-        self.clients = 0
-        self.verifying_clients = 0
-        self.lock_time = None
-        self.conflicts = 0
-        self.conflicts_resolved = 0
-        self.start = time.ctime()
-
-    def parse(self, s):
-        # parse the dump format
-        lines = s.split("\n")
-        for line in lines:
-            field, value = line.split(":", 1)
-            if field == "Server started":
-                self.start = value
-            elif field == "Clients":
-                self.clients = int(value)
-            elif field == "Clients verifying":
-                self.verifying_clients = int(value)
-            elif field == "Active transactions":
-                self.active_txns = int(value)
-            elif field == "Commit lock held for":
-                # This assumes
-                self.lock_time = time.time() - int(value)
-            elif field == "Commits":
-                self.commits = int(value)
-            elif field == "Aborts":
-                self.aborts = int(value)
-            elif field == "Loads":
-                self.loads = int(value)
-            elif field == "Stores":
-                self.stores = int(value)
-            elif field == "Conflicts":
-                self.conflicts = int(value)
-            elif field == "Conflicts resolved":
-                self.conflicts_resolved = int(value)
-
-    def dump(self, f):
-        print >> f, "Server started:", self.start
-        print >> f, "Clients:", self.clients
-        print >> f, "Clients verifying:", self.verifying_clients
-        print >> f, "Active transactions:", self.active_txns
-        if self.lock_time:
-            howlong = time.time() - self.lock_time
-            print >> f, "Commit lock held for:", int(howlong)
-        print >> f, "Commits:", self.commits
-        print >> f, "Aborts:", self.aborts
-        print >> f, "Loads:", self.loads
-        print >> f, "Stores:", self.stores
-        print >> f, "Conflicts:", self.conflicts
-        print >> f, "Conflicts resolved:", self.conflicts_resolved
-
-class StatsClient(asyncore.dispatcher):
-
-    def __init__(self, sock, addr):
-        asyncore.dispatcher.__init__(self, sock)
-        self.buf = []
-        self.closed = 0
-
-    def close(self):
-        self.closed = 1
-        # The socket is closed after all the data is written.
-        # See handle_write().
-
-    def write(self, s):
-        self.buf.append(s)
-
-    def writable(self):
-        return len(self.buf)
-
-    def readable(self):
-        return 0
-
-    def handle_write(self):
-        s = "".join(self.buf)
-        self.buf = []
-        n = self.socket.send(s)
-        if n < len(s):
-            self.buf.append(s[:n])
-
-        if self.closed and not self.buf:
-            asyncore.dispatcher.close(self)
-
-class StatsServer(asyncore.dispatcher):
-
-    StatsConnectionClass = StatsClient
-
-    def __init__(self, addr, stats):
-        asyncore.dispatcher.__init__(self)
-        self.addr = addr
-        self.stats = stats
-        if type(self.addr) == types.TupleType:
-            self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
-        else:
-            self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        self.set_reuse_addr()
-        logger = logging.getLogger('ZEO.monitor')
-        logger.info("listening on %s", repr(self.addr))
-        self.bind(self.addr)
-        self.listen(5)
-
-    def writable(self):
-        return 0
-
-    def readable(self):
-        return 1
-
-    def handle_accept(self):
-        try:
-            sock, addr = self.accept()
-        except socket.error:
-            return
-        f = self.StatsConnectionClass(sock, addr)
-        self.dump(f)
-        f.close()
-
-    def dump(self, f):
-        print >> f, "ZEO monitor server version %s" % ZEO.version
-        print >> f, time.ctime()
-        print >> f
-
-        L = self.stats.keys()
-        L.sort()
-        for k in L:
-            stats = self.stats[k]
-            print >> f, "Storage:", k
-            stats.dump(f)
-            print >> f
diff --git a/branches/bug1734/src/ZEO/runzeo.py b/branches/bug1734/src/ZEO/runzeo.py
deleted file mode 100644
index 51a1dd82..00000000
--- a/branches/bug1734/src/ZEO/runzeo.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!python
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Start the ZEO storage server.
-
-Usage: %s [-C URL] [-a ADDRESS] [-f FILENAME] [-h]
-
-Options:
--C/--configuration URL -- configuration file or URL
--a/--address ADDRESS -- server address of the form PORT, HOST:PORT, or PATH
-                        (a PATH must contain at least one "/")
--f/--filename FILENAME -- filename for FileStorage
--t/--timeout TIMEOUT -- transaction timeout in seconds (default no timeout)
--h/--help -- print this usage message and exit
--m/--monitor ADDRESS -- address of monitor server ([HOST:]PORT or PATH)
-
-Unless -C is specified, -a and -f are required.
-"""
-
-# The code here is designed to be reused by other, similar servers.
-# For the forseeable future, it must work under Python 2.1 as well as
-# 2.2 and above.
-
-import os
-import sys
-import signal
-import socket
-import logging
-
-import ZConfig, ZConfig.datatypes
-import ZEO
-from zdaemon.zdoptions import ZDOptions
-
-logger = logging.getLogger('ZEO.runzeo')
-_pid = str(os.getpid())
-
-def log(msg, level=logging.INFO, exc_info=False):
-    """Internal: generic logging function."""
-    message = "(%s) %s" % (_pid, msg)
-    logger.log(level, message, exc_info=exc_info)
-
-
-def parse_address(arg):
-    # Caution:  Not part of the official ZConfig API.
-    obj = ZConfig.datatypes.SocketAddress(arg)
-    return obj.family, obj.address
-
-
-class ZEOOptionsMixin:
-
-    storages = None
-
-    def handle_address(self, arg):
-        self.family, self.address = parse_address(arg)
-
-    def handle_monitor_address(self, arg):
-        self.monitor_family, self.monitor_address = parse_address(arg)
-
-    def handle_filename(self, arg):
-        from ZODB.config import FileStorage # That's a FileStorage *opener*!
-        class FSConfig:
-            def __init__(self, name, path):
-                self._name = name
-                self.path = path
-                self.create = 0
-                self.read_only = 0
-                self.stop = None
-                self.quota = None
-            def getSectionName(self):
-                return self._name
-        if not self.storages:
-            self.storages = []
-        name = str(1 + len(self.storages))
-        conf = FileStorage(FSConfig(name, arg))
-        self.storages.append(conf)
-
-    def add_zeo_options(self):
-        self.add(None, None, "a:", "address=", self.handle_address)
-        self.add(None, None, "f:", "filename=", self.handle_filename)
-        self.add("family", "zeo.address.family")
-        self.add("address", "zeo.address.address",
-                 required="no server address specified; use -a or -C")
-        self.add("read_only", "zeo.read_only", default=0)
-        self.add("invalidation_queue_size", "zeo.invalidation_queue_size",
-                 default=100)
-        self.add("transaction_timeout", "zeo.transaction_timeout",
-                 "t:", "timeout=", float)
-        self.add("monitor_address", "zeo.monitor_address.address",
-                 "m:", "monitor=", self.handle_monitor_address)
-        self.add('auth_protocol', 'zeo.authentication_protocol',
-                 None, 'auth-protocol=', default=None)
-        self.add('auth_database', 'zeo.authentication_database',
-                 None, 'auth-database=')
-        self.add('auth_realm', 'zeo.authentication_realm',
-                 None, 'auth-realm=')
-
-class ZEOOptions(ZDOptions, ZEOOptionsMixin):
-
-    logsectionname = "eventlog"
-    schemadir = os.path.dirname(ZEO.__file__)
-
-    def __init__(self):
-        ZDOptions.__init__(self)
-        self.add_zeo_options()
-        self.add("storages", "storages",
-                 required="no storages specified; use -f or -C")
-
-
-class ZEOServer:
-
-    def __init__(self, options):
-        self.options = options
-
-    def main(self):
-        self.setup_default_logging()
-        self.check_socket()
-        self.clear_socket()
-        try:
-            self.open_storages()
-            self.setup_signals()
-            self.create_server()
-            self.loop_forever()
-        finally:
-            self.close_storages()
-            self.clear_socket()
-
-    def setup_default_logging(self):
-        if self.options.config_logger is not None:
-            return
-        # No log file is configured; default to stderr.
-        logger = logging.getLogger()
-        handler = logging.StreamHandler()
-        handler.setLevel(logging.INFO)
-        logger.addHandler(handler)
-
-    def check_socket(self):
-        if self.can_connect(self.options.family, self.options.address):
-            self.options.usage("address %s already in use" %
-                               repr(self.options.address))
-
-    def can_connect(self, family, address):
-        s = socket.socket(family, socket.SOCK_STREAM)
-        try:
-            s.connect(address)
-        except socket.error:
-            return 0
-        else:
-            s.close()
-            return 1
-
-    def clear_socket(self):
-        if isinstance(self.options.address, type("")):
-            try:
-                os.unlink(self.options.address)
-            except os.error:
-                pass
-
-    def open_storages(self):
-        self.storages = {}
-        for opener in self.options.storages:
-            log("opening storage %r using %s"
-                % (opener.name, opener.__class__.__name__))
-            self.storages[opener.name] = opener.open()
-
-    def setup_signals(self):
-        """Set up signal handlers.
-
-        The signal handler for SIGFOO is a method handle_sigfoo().
-        If no handler method is defined for a signal, the signal
-        action is not changed from its initial value.  The handler
-        method is called without additional arguments.
-        """
-        if os.name != "posix":
-            return
-        if hasattr(signal, 'SIGXFSZ'):
-            signal.signal(signal.SIGXFSZ, signal.SIG_IGN) # Special case
-        init_signames()
-        for sig, name in signames.items():
-            method = getattr(self, "handle_" + name.lower(), None)
-            if method is not None:
-                def wrapper(sig_dummy, frame_dummy, method=method):
-                    method()
-                signal.signal(sig, wrapper)
-
-    def create_server(self):
-        from ZEO.StorageServer import StorageServer
-        self.server = StorageServer(
-            self.options.address,
-            self.storages,
-            read_only=self.options.read_only,
-            invalidation_queue_size=self.options.invalidation_queue_size,
-            transaction_timeout=self.options.transaction_timeout,
-            monitor_address=self.options.monitor_address,
-            auth_protocol=self.options.auth_protocol,
-            auth_database=self.options.auth_database,
-            auth_realm=self.options.auth_realm)
-
-    def loop_forever(self):
-        import ThreadedAsync.LoopCallback
-        ThreadedAsync.LoopCallback.loop()
-
-    def handle_sigterm(self):
-        log("terminated by SIGTERM")
-        sys.exit(0)
-
-    def handle_sigint(self):
-        log("terminated by SIGINT")
-        sys.exit(0)
-
-    def handle_sighup(self):
-        log("restarted by SIGHUP")
-        sys.exit(1)
-
-    def handle_sigusr2(self):
-        # TODO: this used to reinitialize zLOG. How do I achieve
-        # the same effect with Python's logging package?
-        # Should we restart as with SIGHUP?
-        log("received SIGUSR2, but it was not handled!", level=logging.WARNING)
-
-    def close_storages(self):
-        for name, storage in self.storages.items():
-            log("closing storage %r" % name)
-            try:
-                storage.close()
-            except: # Keep going
-                log("failed to close storage %r" % name,
-                    level=logging.EXCEPTION, exc_info=True)
-
-
-# Signal names
-
-signames = None
-
-def signame(sig):
-    """Return a symbolic name for a signal.
-
-    Return "signal NNN" if there is no corresponding SIG name in the
-    signal module.
-    """
-
-    if signames is None:
-        init_signames()
-    return signames.get(sig) or "signal %d" % sig
-
-def init_signames():
-    global signames
-    signames = {}
-    for name, sig in signal.__dict__.items():
-        k_startswith = getattr(name, "startswith", None)
-        if k_startswith is None:
-            continue
-        if k_startswith("SIG") and not k_startswith("SIG_"):
-            signames[sig] = name
-
-
-# Main program
-
-def main(args=None):
-    options = ZEOOptions()
-    options.realize(args)
-    s = ZEOServer(options)
-    s.main()
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/schema.xml b/branches/bug1734/src/ZEO/schema.xml
deleted file mode 100644
index acaf863b..00000000
--- a/branches/bug1734/src/ZEO/schema.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<schema>
-
-  <!-- note that zeoctl.xml is a closely related schema which should
-       match this schema, but should require the "runner" section -->
-
-  <description>
-    This schema describes the configuration of the ZEO storage server
-    process.
-  </description>
-
-  <!-- Use the storage types defined by ZODB. -->
-  <import package="ZODB"/>
-
-  <!-- Use the ZEO server information structure. -->
-  <import package="ZEO"/>
-
-  <import package="ZConfig.components.logger"/>
-
-  <!-- runner control -->
-  <import package="zdaemon"/>
-
-
-  <section type="zeo" name="*" required="yes" attribute="zeo" />
-
-  <section type="runner" name="*" required="no" attribute="runner" />
-
-  <multisection name="+" type="ZODB.storage"
-                attribute="storages"
-                required="yes">
-    <description>
-      One or more storages that are provided by the ZEO server.  The
-      section names are used as the storage names, and must be unique
-      within each ZEO storage server.  Traditionally, these names
-      represent small integers starting at '1'.
-    </description>
-  </multisection>
-
-  <section name="*" type="eventlog" attribute="eventlog" required="no" />
-
-</schema>
diff --git a/branches/bug1734/src/ZEO/simul.py b/branches/bug1734/src/ZEO/simul.py
deleted file mode 100644
index fb296ebc..00000000
--- a/branches/bug1734/src/ZEO/simul.py
+++ /dev/null
@@ -1,757 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Cache simulation.
-
-Usage: simul.py [-bflyz] [-X] [-s size] tracefile
-
-Use one of -b, -f, -l, -y or -z select the cache simulator:
--b: buddy system allocator
--f: simple free list allocator
--l: idealized LRU (no allocator)
--y: variation on the existing ZEO cache that copies to current file
--z: existing ZEO cache (default)
-
-Options:
--s size: cache size in MB (default 20 MB)
--X: enable heuristic checking for misaligned records: oids > 2**32
-    will be rejected; this requires the tracefile to be seekable
-
-Note: the buddy system allocator rounds the cache size up to a power of 2
-"""
-
-import sys
-import time
-import getopt
-import struct
-
-def usage(msg):
-    print >>sys.stderr, msg
-    print >>sys.stderr, __doc__
-
-def main():
-    # Parse options
-    MB = 1000*1000
-    cachelimit = 20*MB
-    simclass = ZEOCacheSimulation
-    heuristic = 0
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X")
-    except getopt.error, msg:
-        usage(msg)
-        return 2
-    for o, a in opts:
-        if o == '-b':
-            simclass = BuddyCacheSimulation
-        if o == '-f':
-            simclass = SimpleCacheSimulation
-        if o == '-l':
-            simclass = LRUCacheSimulation
-        if o == '-y':
-            simclass = AltZEOCacheSimulation
-        if o == '-z':
-            simclass = ZEOCacheSimulation
-        if o == '-s':
-            cachelimit = int(float(a)*MB)
-        if o == '-X':
-            heuristic = 1
-    if len(args) != 1:
-        usage("exactly one file argument required")
-        return 2
-    filename = args[0]
-
-    # Open file
-    if filename.endswith(".gz"):
-        # Open gzipped file
-        try:
-            import gzip
-        except ImportError:
-            print >>sys.stderr,  "can't read gzipped files (no module gzip)"
-            return 1
-        try:
-            f = gzip.open(filename, "rb")
-        except IOError, msg:
-            print >>sys.stderr,  "can't open %s: %s" % (filename, msg)
-            return 1
-    elif filename == "-":
-        # Read from stdin
-        f = sys.stdin
-    else:
-        # Open regular file
-        try:
-            f = open(filename, "rb")
-        except IOError, msg:
-            print >>sys.stderr,  "can't open %s: %s" % (filename, msg)
-            return 1
-
-    # Create simulation object
-    sim = simclass(cachelimit)
-
-    # Print output header
-    sim.printheader()
-
-    # Read trace file, simulating cache behavior
-    offset = 0
-    records = 0
-    f_read = f.read
-    struct_unpack = struct.unpack
-    while 1:
-        # Read a record and decode it
-        r = f_read(10)
-        if len(r) < 10:
-            break
-        offset += 10
-        ts, code, lenoid = struct_unpack(">iiH", r)
-        if ts == 0:
-            # Must be a misaligned record caused by a crash
-            ##print "Skipping 8 bytes at offset", offset-8
-            continue
-        r = f_read(8 + lenoid)
-        if len(r) < 8 + lenoid:
-            break
-        offset += 8 + lenoid
-        records += 1
-        serial, oid = struct_unpack(">8s%ds" % lenoid, r)
-        # Decode the code
-        dlen, version, code, current = (code & 0x7fffff00,
-                                        code & 0x80,
-                                        code & 0x7e,
-                                        code & 0x01)
-        # And pass it to the simulation
-        sim.event(ts, dlen, version, code, current, oid, serial)
-
-    # Finish simulation
-    sim.finish()
-
-    # Exit code from main()
-    return 0
-
-class Simulation:
-
-    """Base class for simulations.
-
-    The driver program calls: event(), printheader(), finish().
-
-    The standard event() method calls these additional methods:
-    write(), load(), inval(), report(), restart(); the standard
-    finish() method also calls report().
-
-    """
-
-    def __init__(self, cachelimit):
-        self.cachelimit = cachelimit
-        # Initialize global statistics
-        self.epoch = None
-        self.total_loads = 0
-        self.total_hits = 0 # Subclass must increment
-        self.total_invals = 0
-        self.total_writes = 0
-        # Reset per-run statistics and set up simulation data
-        self.restart()
-
-    def restart(self):
-        # Reset per-run statistics
-        self.loads = 0
-        self.hits = 0 # Subclass must increment
-        self.invals = 0
-        self.writes = 0
-        self.ts0 = None
-
-    def event(self, ts, dlen, _version, code, _current, oid, _serial):
-        # Record first and last timestamp seen
-        if self.ts0 is None:
-            self.ts0 = ts
-            if self.epoch is None:
-                self.epoch = ts
-        self.ts1 = ts
-
-        # Simulate cache behavior.  Use load hits, updates and stores
-        # only (each load miss is followed immediately by a store
-        # unless the object in fact did not exist).  Updates always write.
-        if dlen and code & 0x70 in (0x20, 0x30, 0x50):
-            if code == 0x3A:
-                # Update
-                self.writes += 1
-                self.total_writes += 1
-                self.write(oid, dlen)
-            else:
-                # Load hit or store -- these are really the load requests
-                self.loads += 1
-                self.total_loads += 1
-                self.load(oid, dlen)
-        elif code & 0x70 == 0x10:
-            # Invalidate
-            self.inval(oid)
-        elif code == 0x00:
-            # Restart
-            self.report()
-            self.restart()
-
-    def write(self, oid, size):
-        pass
-
-    def load(self, oid, size):
-        pass
-
-    def inval(self, oid):
-        pass
-
-    format = "%12s %9s %8s %8s %6s %6s %6s %6s"
-
-    # Subclass should override extraname to name known instance variables;
-    # if extraname is 'foo', both self.foo and self.total_foo must exist:
-    extraname = "*** please override ***"
-
-    def printheader(self):
-        print "%s, cache size %s bytes" % (self.__class__.__name__,
-                                           addcommas(self.cachelimit))
-        print self.format % (
-            "START TIME", "DURATION", "LOADS", "HITS",
-            "INVALS", "WRITES", self.extraname.upper(), "HITRATE")
-
-    nreports = 0
-
-    def report(self):
-        if self.loads:
-            self.nreports += 1
-            print self.format % (
-                time.ctime(self.ts0)[4:-8],
-                duration(self.ts1 - self.ts0),
-                self.loads, self.hits, self.invals, self.writes,
-                getattr(self, self.extraname),
-                hitrate(self.loads, self.hits))
-
-    def finish(self):
-        self.report()
-        if self.nreports > 1:
-            print (self.format + " OVERALL") % (
-                time.ctime(self.epoch)[4:-8],
-                duration(self.ts1 - self.epoch),
-                self.total_loads,
-                self.total_hits,
-                self.total_invals,
-                self.total_writes,
-                getattr(self, "total_" + self.extraname),
-                hitrate(self.total_loads, self.total_hits))
-
-class ZEOCacheSimulation(Simulation):
-
-    """Simulate the current (ZEO 1.0 and 2.0) ZEO cache behavior.
-
-    This assumes the cache is not persistent (we don't know how to
-    simulate cache validation.)
-
-    """
-
-    extraname = "flips"
-
-    def __init__(self, cachelimit):
-        # Initialize base class
-        Simulation.__init__(self, cachelimit)
-        # Initialize additional global statistics
-        self.total_flips = 0
-
-    def restart(self):
-        # Reset base class
-        Simulation.restart(self)
-        # Reset additional per-run statistics
-        self.flips = 0
-        # Set up simulation
-        self.filesize = [4, 4] # account for magic number
-        self.fileoids = [{}, {}]
-        self.current = 0 # index into filesize, fileoids
-
-    def load(self, oid, size):
-        if (self.fileoids[self.current].get(oid) or
-            self.fileoids[1 - self.current].get(oid)):
-            self.hits += 1
-            self.total_hits += 1
-        else:
-            self.write(oid, size)
-
-    def write(self, oid, size):
-        # Fudge because size is rounded up to multiples of 256.  (31
-        # is header overhead per cache record; 127 is to compensate
-        # for rounding up to multiples of 256.)
-        size = size + 31 - 127
-        if self.filesize[self.current] + size > self.cachelimit / 2:
-            # Cache flip
-            self.flips += 1
-            self.total_flips += 1
-            self.current = 1 - self.current
-            self.filesize[self.current] = 4
-            self.fileoids[self.current] = {}
-        self.filesize[self.current] += size
-        self.fileoids[self.current][oid] = 1
-
-    def inval(self, oid):
-        if self.fileoids[self.current].get(oid):
-            self.invals += 1
-            self.total_invals += 1
-            del self.fileoids[self.current][oid]
-        elif self.fileoids[1 - self.current].get(oid):
-            self.invals += 1
-            self.total_invals += 1
-            del self.fileoids[1 - self.current][oid]
-
-class AltZEOCacheSimulation(ZEOCacheSimulation):
-
-    """A variation of the ZEO cache that copies to the current file.
-
-    When a hit is found in the non-current cache file, it is copied to
-    the current cache file.  Exception: when the copy would cause a
-    cache flip, we don't copy (this is part laziness, part concern
-    over causing extraneous flips).
-    """
-
-    def load(self, oid, size):
-        if self.fileoids[self.current].get(oid):
-            self.hits += 1
-            self.total_hits += 1
-        elif self.fileoids[1 - self.current].get(oid):
-            self.hits += 1
-            self.total_hits += 1
-            # Simulate a write, unless it would cause a flip
-            size = size + 31 - 127
-            if self.filesize[self.current] + size <= self.cachelimit / 2:
-                self.filesize[self.current] += size
-                self.fileoids[self.current][oid] = 1
-                del self.fileoids[1 - self.current][oid]
-        else:
-            self.write(oid, size)
-
-class LRUCacheSimulation(Simulation):
-
-    extraname = "evicts"
-
-    def __init__(self, cachelimit):
-        # Initialize base class
-        Simulation.__init__(self, cachelimit)
-        # Initialize additional global statistics
-        self.total_evicts = 0
-
-    def restart(self):
-        # Reset base class
-        Simulation.restart(self)
-        # Reset additional per-run statistics
-        self.evicts = 0
-        # Set up simulation
-        self.cache = {}
-        self.size = 0
-        self.head = Node(None, None)
-        self.head.linkbefore(self.head)
-
-    def load(self, oid, size):
-        node = self.cache.get(oid)
-        if node is not None:
-            self.hits += 1
-            self.total_hits += 1
-            node.linkbefore(self.head)
-        else:
-            self.write(oid, size)
-
-    def write(self, oid, size):
-        node = self.cache.get(oid)
-        if node is not None:
-            node.unlink()
-            assert self.head.next is not None
-            self.size -= node.size
-        node = Node(oid, size)
-        self.cache[oid] = node
-        node.linkbefore(self.head)
-        self.size += size
-        # Evict LRU nodes
-        while self.size > self.cachelimit:
-            self.evicts += 1
-            self.total_evicts += 1
-            node = self.head.next
-            assert node is not self.head
-            node.unlink()
-            assert self.head.next is not None
-            del self.cache[node.oid]
-            self.size -= node.size
-
-    def inval(self, oid):
-        node = self.cache.get(oid)
-        if node is not None:
-            assert node.oid == oid
-            self.invals += 1
-            self.total_invals += 1
-            node.unlink()
-            assert self.head.next is not None
-            del self.cache[oid]
-            self.size -= node.size
-            assert self.size >= 0
-
-class Node:
-
-    """Node in a doubly-linked list, storing oid and size as payload.
-
-    A node can be linked or unlinked; in the latter case, next and
-    prev are None.  Initially a node is unlinked.
-
-    """
-    # Make it a new-style class in Python 2.2 and up; no effect in 2.1
-    __metaclass__ = type
-    __slots__ = ['prev', 'next', 'oid', 'size']
-
-    def __init__(self, oid, size):
-        self.oid = oid
-        self.size = size
-        self.prev = self.next = None
-
-    def unlink(self):
-        prev = self.prev
-        next = self.next
-        if prev is not None:
-            assert next is not None
-            assert prev.next is self
-            assert next.prev is self
-            prev.next = next
-            next.prev = prev
-            self.prev = self.next = None
-        else:
-            assert next is None
-
-    def linkbefore(self, next):
-        self.unlink()
-        prev = next.prev
-        if prev is None:
-            assert next.next is None
-            prev = next
-        self.prev = prev
-        self.next = next
-        prev.next = next.prev = self
-
-class BuddyCacheSimulation(LRUCacheSimulation):
-
-    def __init__(self, cachelimit):
-        LRUCacheSimulation.__init__(self, roundup(cachelimit))
-
-    def restart(self):
-        LRUCacheSimulation.restart(self)
-        self.allocator = self.allocatorFactory(self.cachelimit)
-
-    def allocatorFactory(self, size):
-        return BuddyAllocator(size)
-
-    # LRUCacheSimulation.load() is just fine
-
-    def write(self, oid, size):
-        node = self.cache.get(oid)
-        if node is not None:
-            node.unlink()
-            assert self.head.next is not None
-            self.size -= node.size
-            self.allocator.free(node)
-        while 1:
-            node = self.allocator.alloc(size)
-            if node is not None:
-                break
-            # Failure to allocate.  Evict something and try again.
-            node = self.head.next
-            assert node is not self.head
-            self.evicts += 1
-            self.total_evicts += 1
-            node.unlink()
-            assert self.head.next is not None
-            del self.cache[node.oid]
-            self.size -= node.size
-            self.allocator.free(node)
-        node.oid = oid
-        self.cache[oid] = node
-        node.linkbefore(self.head)
-        self.size += node.size
-
-    def inval(self, oid):
-        node = self.cache.get(oid)
-        if node is not None:
-            assert node.oid == oid
-            self.invals += 1
-            self.total_invals += 1
-            node.unlink()
-            assert self.head.next is not None
-            del self.cache[oid]
-            self.size -= node.size
-            assert self.size >= 0
-            self.allocator.free(node)
-
-class SimpleCacheSimulation(BuddyCacheSimulation):
-
-    def allocatorFactory(self, size):
-        return SimpleAllocator(size)
-
-    def finish(self):
-        BuddyCacheSimulation.finish(self)
-        self.allocator.report()
-
-MINSIZE = 256
-
-class BuddyAllocator:
-
-    def __init__(self, cachelimit):
-        cachelimit = roundup(cachelimit)
-        self.cachelimit = cachelimit
-        self.avail = {} # Map rounded-up sizes to free list node heads
-        self.nodes = {} # Map address to node
-        k = MINSIZE
-        while k <= cachelimit:
-            self.avail[k] = n = Node(None, None) # Not BlockNode; has no addr
-            n.linkbefore(n)
-            k += k
-        node = BlockNode(None, cachelimit, 0)
-        self.nodes[0] = node
-        node.linkbefore(self.avail[cachelimit])
-
-    def alloc(self, size):
-        size = roundup(size)
-        k = size
-        while k <= self.cachelimit:
-            head = self.avail[k]
-            node = head.next
-            if node is not head:
-                break
-            k += k
-        else:
-            return None # Store is full, or block is too large
-        node.unlink()
-        size2 = node.size
-        while size2 > size:
-            size2 = size2 / 2
-            assert size2 >= size
-            node.size = size2
-            buddy = BlockNode(None, size2, node.addr + size2)
-            self.nodes[buddy.addr] = buddy
-            buddy.linkbefore(self.avail[size2])
-        node.oid = 1 # Flag as in-use
-        return node
-
-    def free(self, node):
-        assert node is self.nodes[node.addr]
-        assert node.prev is node.next is None
-        node.oid = None # Flag as free
-        while node.size < self.cachelimit:
-            buddy_addr = node.addr ^ node.size
-            buddy = self.nodes[buddy_addr]
-            assert buddy.addr == buddy_addr
-            if buddy.oid is not None or buddy.size != node.size:
-                break
-            # Merge node with buddy
-            buddy.unlink()
-            if buddy.addr < node.addr: # buddy prevails
-                del self.nodes[node.addr]
-                node = buddy
-            else: # node prevails
-                del self.nodes[buddy.addr]
-            node.size *= 2
-        assert node is self.nodes[node.addr]
-        node.linkbefore(self.avail[node.size])
-
-    def dump(self, msg=""):
-        if msg:
-            print msg,
-        size = MINSIZE
-        blocks = bytes = 0
-        while size <= self.cachelimit:
-            head = self.avail[size]
-            node = head.next
-            count = 0
-            while node is not head:
-                count += 1
-                node = node.next
-            if count:
-                print "%d:%d" % (size, count),
-            blocks += count
-            bytes += count*size
-            size += size
-        print "-- %d, %d" % (bytes, blocks)
-
-def roundup(size):
-    k = MINSIZE
-    while k < size:
-        k += k
-    return k
-
-class SimpleAllocator:
-
-    def __init__(self, arenasize):
-        self.arenasize = arenasize
-        self.avail = BlockNode(None, 0, 0) # Weird: empty block as list head
-        self.rover = self.avail
-        node = BlockNode(None, arenasize, 0)
-        node.linkbefore(self.avail)
-        self.taglo = {0: node}
-        self.taghi = {arenasize: node}
-        # Allocator statistics
-        self.nallocs = 0
-        self.nfrees = 0
-        self.allocloops = 0
-        self.freebytes = arenasize
-        self.freeblocks = 1
-        self.allocbytes = 0
-        self.allocblocks = 0
-
-    def report(self):
-        print ("NA=%d AL=%d NF=%d ABy=%d ABl=%d FBy=%d FBl=%d" %
-               (self.nallocs, self.allocloops,
-                self.nfrees,
-                self.allocbytes, self.allocblocks,
-                self.freebytes, self.freeblocks))
-
-    def alloc(self, size):
-        self.nallocs += 1
-        # First fit algorithm
-        rover = stop = self.rover
-        while 1:
-            self.allocloops += 1
-            if rover.size >= size:
-                break
-            rover = rover.next
-            if rover is stop:
-                return None # We went round the list without finding space
-        if rover.size == size:
-            self.rover = rover.next
-            rover.unlink()
-            del self.taglo[rover.addr]
-            del self.taghi[rover.addr + size]
-            self.freeblocks -= 1
-            self.allocblocks += 1
-            self.freebytes -= size
-            self.allocbytes += size
-            return rover
-        # Take space from the beginning of the roving pointer
-        assert rover.size > size
-        node = BlockNode(None, size, rover.addr)
-        del self.taglo[rover.addr]
-        rover.size -= size
-        rover.addr += size
-        self.taglo[rover.addr] = rover
-        #self.freeblocks += 0 # No change here
-        self.allocblocks += 1
-        self.freebytes -= size
-        self.allocbytes += size
-        return node
-
-    def free(self, node):
-        self.nfrees += 1
-        self.freeblocks += 1
-        self.allocblocks -= 1
-        self.freebytes += node.size
-        self.allocbytes -= node.size
-        node.linkbefore(self.avail)
-        self.taglo[node.addr] = node
-        self.taghi[node.addr + node.size] = node
-        x = self.taghi.get(node.addr)
-        if x is not None:
-            # Merge x into node
-            x.unlink()
-            self.freeblocks -= 1
-            del self.taglo[x.addr]
-            del self.taghi[x.addr + x.size]
-            del self.taglo[node.addr]
-            node.addr = x.addr
-            node.size += x.size
-            self.taglo[node.addr] = node
-        x = self.taglo.get(node.addr + node.size)
-        if x is not None:
-            # Merge x into node
-            x.unlink()
-            self.freeblocks -= 1
-            del self.taglo[x.addr]
-            del self.taghi[x.addr + x.size]
-            del self.taghi[node.addr + node.size]
-            node.size += x.size
-            self.taghi[node.addr + node.size] = node
-        # It's possible that either one of the merges above invalidated
-        # the rover.
-        # It's simplest to simply reset the rover to the newly freed block.
-        self.rover = node
-
-    def dump(self, msg=""):
-        if msg:
-            print msg,
-        count = 0
-        bytes = 0
-        node = self.avail.next
-        while node is not self.avail:
-            bytes += node.size
-            count += 1
-            node = node.next
-        print count, "free blocks,", bytes, "free bytes"
-        self.report()
-
-class BlockNode(Node):
-
-    __slots__ = ['addr']
-
-    def __init__(self, oid, size, addr):
-        Node.__init__(self, oid, size)
-        self.addr = addr
-
-def testallocator(factory=BuddyAllocator):
-    # Run one of Knuth's experiments as a test
-    import random
-    import heapq # This only runs with Python 2.3, folks :-)
-    reportfreq = 100
-    cachelimit = 2**17
-    cache = factory(cachelimit)
-    queue = []
-    T = 0
-    blocks = 0
-    while T < 5000:
-        while queue and queue[0][0] <= T:
-            time, node = heapq.heappop(queue)
-            assert time == T
-            ##print "free addr=%d, size=%d" % (node.addr, node.size)
-            cache.free(node)
-            blocks -= 1
-        size = random.randint(100, 2000)
-        lifetime = random.randint(1, 100)
-        node = cache.alloc(size)
-        if node is None:
-            print "out of mem"
-            cache.dump("T=%4d: %d blocks;" % (T, blocks))
-            break
-        else:
-            ##print "alloc addr=%d, size=%d" % (node.addr, node.size)
-            blocks += 1
-            heapq.heappush(queue, (T + lifetime, node))
-        T = T+1
-        if T % reportfreq == 0:
-            cache.dump("T=%4d: %d blocks;" % (T, blocks))
-
-def hitrate(loads, hits):
-    return "%5.1f%%" % (100.0 * hits / max(1, loads))
-
-def duration(secs):
-
-    mm, ss = divmod(secs, 60)
-    hh, mm = divmod(mm, 60)
-    if hh:
-        return "%d:%02d:%02d" % (hh, mm, ss)
-    if mm:
-        return "%d:%02d" % (mm, ss)
-    return "%d" % ss
-
-def addcommas(n):
-    sign, s = '', str(n)
-    if s[0] == '-':
-        sign, s = '-', s[1:]
-    i = len(s) - 3
-    while i > 0:
-        s = s[:i] + ',' + s[i:]
-        i -= 3
-    return sign + s
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/branches/bug1734/src/ZEO/stats.py b/branches/bug1734/src/ZEO/stats.py
deleted file mode 100755
index 263fb60a..00000000
--- a/branches/bug1734/src/ZEO/stats.py
+++ /dev/null
@@ -1,392 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Trace file statistics analyzer.
-
-Usage: stats.py [-h] [-i interval] [-q] [-s] [-S] [-v] [-X] tracefile
--h: print histogram of object load frequencies
--i: summarizing interval in minutes (default 15; max 60)
--q: quiet; don't print summaries
--s: print histogram of object sizes
--S: don't print statistics
--v: verbose; print each record
--X: enable heuristic checking for misaligned records: oids > 2**32
-    will be rejected; this requires the tracefile to be seekable
-"""
-
-"""File format:
-
-Each record is 24 bytes, with the following layout.  Numbers are
-big-endian integers.
-
-Offset  Size  Contents
-
-0       4     timestamp (seconds since 1/1/1970)
-4       3     data size, in 256-byte increments, rounded up
-7       1     code (see below)
-8       2     object id length
-10      8     serial number
-18  variable  object id
-
-The code at offset 7 packs three fields:
-
-Mask    bits  Contents
-
-0x80    1     set if there was a non-empty version string
-0x7e    6     function and outcome code
-0x01    1     current cache file (0 or 1)
-
-The function and outcome codes are documented in detail at the end of
-this file in the 'explain' dictionary.  Note that the keys there (and
-also the arguments to _trace() in ClientStorage.py) are 'code & 0x7e',
-i.e. the low bit is always zero.
-"""
-
-import sys
-import time
-import getopt
-import struct
-from types import StringType
-
-def usage(msg):
-    print >>sys.stderr, msg
-    print >>sys.stderr, __doc__
-
-def main():
-    # Parse options
-    verbose = 0
-    quiet = 0
-    dostats = 1
-    print_size_histogram = 0
-    print_histogram = 0
-    interval = 900 # Every 15 minutes
-    heuristic = 0
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hi:qsSvX")
-    except getopt.error, msg:
-        usage(msg)
-        return 2
-    for o, a in opts:
-        if o == '-h':
-            print_histogram = 1
-        if o == "-i":
-            interval = int(60 * float(a))
-            if interval <= 0:
-                interval = 60
-            elif interval > 3600:
-                interval = 3600
-        if o == "-q":
-            quiet = 1
-            verbose = 0
-        if o == "-s":
-            print_size_histogram = 1
-        if o == "-S":
-            dostats = 0
-        if o == "-v":
-            verbose = 1
-        if o == '-X':
-            heuristic = 1
-    if len(args) != 1:
-        usage("exactly one file argument required")
-        return 2
-    filename = args[0]
-
-    # Open file
-    if filename.endswith(".gz"):
-        # Open gzipped file
-        try:
-            import gzip
-        except ImportError:
-            print >>sys.stderr,  "can't read gzipped files (no module gzip)"
-            return 1
-        try:
-            f = gzip.open(filename, "rb")
-        except IOError, msg:
-            print >>sys.stderr,  "can't open %s: %s" % (filename, msg)
-            return 1
-    elif filename == '-':
-        # Read from stdin
-        f = sys.stdin
-    else:
-        # Open regular file
-        try:
-            f = open(filename, "rb")
-        except IOError, msg:
-            print >>sys.stderr,  "can't open %s: %s" % (filename, msg)
-            return 1
-
-    # Read file, gathering statistics, and printing each record if verbose
-    rt0 = time.time()
-    # bycode -- map code to count of occurrences
-    bycode = {}
-    # records -- number of records
-    records = 0
-    # version -- number of records with versions
-    versions = 0
-    t0 = te = None
-    # datarecords -- number of records with dlen set
-    datarecords = 0
-    datasize = 0L
-    # oids -- maps oid to number of times it was loaded
-    oids = {}
-    # bysize -- maps data size to number of loads
-    bysize = {}
-    # bysize -- maps data size to number of writes
-    bysizew = {}
-    total_loads = 0
-    byinterval = {}
-    thisinterval = None
-    h0 = he = None
-    offset = 0
-    f_read = f.read
-    struct_unpack = struct.unpack
-    try:
-        while 1:
-            r = f_read(8)
-            if len(r) < 8:
-                break
-            offset += 8
-            ts, code = struct_unpack(">ii", r)
-            if ts == 0:
-                # Must be a misaligned record caused by a crash
-                if not quiet:
-                    print "Skipping 8 bytes at offset", offset-8
-                continue
-            r = f_read(18)
-            if len(r) < 10:
-                break
-            offset += 10
-            records += 1
-            oidlen, start_tid, end_tid = struct_unpack(">H8s8s", r)
-            oid = f_read(oidlen)
-            if len(oid) != oidlen:
-                break
-            offset += oidlen
-            if t0 is None:
-                t0 = ts
-                thisinterval = t0 / interval
-                h0 = he = ts
-            te = ts
-            if ts / interval != thisinterval:
-                if not quiet:
-                    dumpbyinterval(byinterval, h0, he)
-                byinterval = {}
-                thisinterval = ts / interval
-                h0 = ts
-            he = ts
-            dlen, code = code & 0x7fffff00, code & 0xff
-            if dlen:
-                datarecords += 1
-                datasize += dlen
-            version = '-'
-            if code & 0x80:
-                version = 'V'
-                versions += 1
-            code = code & 0x7e
-            bycode[code] = bycode.get(code, 0) + 1
-            byinterval[code] = byinterval.get(code, 0) + 1
-            if dlen:
-                if code & 0x70 == 0x20: # All loads
-                    bysize[dlen] = d = bysize.get(dlen) or {}
-                    d[oid] = d.get(oid, 0) + 1
-                elif code & 0x70 == 0x50: # All stores
-                    bysizew[dlen] = d = bysizew.get(dlen) or {}
-                    d[oid] = d.get(oid, 0) + 1
-            if verbose:
-                print "%s %d %02x %s %016x %016x %1s %s" % (
-                    time.ctime(ts)[4:-5],
-                    current,
-                    code,
-                    oid_repr(oid),
-                    U64(start_tid),
-                    U64(end_tid),
-                    version,
-                    dlen and str(dlen) or "")
-            if code & 0x70 == 0x20:
-                oids[oid] = oids.get(oid, 0) + 1
-                total_loads += 1
-            if code == 0x00:
-                if not quiet:
-                    dumpbyinterval(byinterval, h0, he)
-                byinterval = {}
-                thisinterval = ts / interval
-                h0 = he = ts
-                if not quiet:
-                    print time.ctime(ts)[4:-5],
-                    print '='*20, "Restart", '='*20
-    except KeyboardInterrupt:
-        print "\nInterrupted.  Stats so far:\n"
-
-    f.close()
-    rte = time.time()
-    if not quiet:
-        dumpbyinterval(byinterval, h0, he)
-
-    # Error if nothing was read
-    if not records:
-        print >>sys.stderr, "No records processed"
-        return 1
-
-    # Print statistics
-    if dostats:
-        print
-        print "Read %s records (%s bytes) in %.1f seconds" % (
-            addcommas(records), addcommas(records*24), rte-rt0)
-        print "Versions:   %s records used a version" % addcommas(versions)
-        print "First time: %s" % time.ctime(t0)
-        print "Last time:  %s" % time.ctime(te)
-        print "Duration:   %s seconds" % addcommas(te-t0)
-        print "Data recs:  %s (%.1f%%), average size %.1f KB" % (
-            addcommas(datarecords),
-            100.0 * datarecords / records,
-            datasize / 1024.0 / datarecords)
-        print "Hit rate:   %.1f%% (load hits / loads)" % hitrate(bycode)
-        print
-        codes = bycode.keys()
-        codes.sort()
-        print "%13s %4s %s" % ("Count", "Code", "Function (action)")
-        for code in codes:
-            print "%13s  %02x  %s" % (
-                addcommas(bycode.get(code, 0)),
-                code,
-                explain.get(code) or "*** unknown code ***")
-
-    # Print histogram
-    if print_histogram:
-        print
-        print "Histogram of object load frequency"
-        total = len(oids)
-        print "Unique oids: %s" % addcommas(total)
-        print "Total loads: %s" % addcommas(total_loads)
-        s = addcommas(total)
-        width = max(len(s), len("objects"))
-        fmt = "%5d %" + str(width) + "s %5.1f%% %5.1f%% %5.1f%%"
-        hdr = "%5s %" + str(width) + "s %6s %6s %6s"
-        print hdr % ("loads", "objects", "%obj", "%load", "%cum")
-        cum = 0.0
-        for binsize, count in histogram(oids):
-            obj_percent = 100.0 * count / total
-            load_percent = 100.0 * count * binsize / total_loads
-            cum += load_percent
-            print fmt % (binsize, addcommas(count),
-                         obj_percent, load_percent, cum)
-
-    # Print size histogram
-    if print_size_histogram:
-        print
-        print "Histograms of object sizes"
-        print
-        dumpbysize(bysizew, "written", "writes")
-        dumpbysize(bysize, "loaded", "loads")
-
-def dumpbysize(bysize, how, how2):
-    print
-    print "Unique sizes %s: %s" % (how, addcommas(len(bysize)))
-    print "%10s %6s %6s" % ("size", "objs", how2)
-    sizes = bysize.keys()
-    sizes.sort()
-    for size in sizes:
-        loads = 0
-        for n in bysize[size].itervalues():
-            loads += n
-        print "%10s %6d %6d" % (addcommas(size),
-                                len(bysize.get(size, "")),
-                                loads)
-
-def dumpbyinterval(byinterval, h0, he):
-    loads = 0
-    hits = 0
-    for code in byinterval.keys():
-        if code & 0x70 == 0x20:
-            n = byinterval[code]
-            loads += n
-            if code in (0x22, 0x26):
-                hits += n
-    if not loads:
-        return
-    if loads:
-        hr = 100.0 * hits / loads
-    else:
-        hr = 0.0
-    print "%s-%s %10s loads, %10s hits,%5.1f%% hit rate" % (
-        time.ctime(h0)[4:-8], time.ctime(he)[14:-8],
-        addcommas(loads), addcommas(hits), hr)
-
-def hitrate(bycode):
-    loads = 0
-    hits = 0
-    for code in bycode.keys():
-        if code & 0x70 == 0x20:
-            n = bycode[code]
-            loads += n
-            if code in (0x22, 0x26):
-                hits += n
-    if loads:
-        return 100.0 * hits / loads
-    else:
-        return 0.0
-
-def histogram(d):
-    bins = {}
-    for v in d.itervalues():
-        bins[v] = bins.get(v, 0) + 1
-    L = bins.items()
-    L.sort()
-    return L
-
-def U64(s):
-    h, v = struct.unpack(">II", s)
-    return (long(h) << 32) + v
-
-def oid_repr(oid):
-    if isinstance(oid, StringType) and len(oid) == 8:
-        return '%16x' % U64(oid)
-    else:
-        return repr(oid)
-
-def addcommas(n):
-    sign, s = '', str(n)
-    if s[0] == '-':
-        sign, s = '-', s[1:]
-    i = len(s) - 3
-    while i > 0:
-        s = s[:i] + ',' + s[i:]
-        i -= 3
-    return sign + s
-
-explain = {
-    # The first hex digit shows the operation, the second the outcome.
-    # If the second digit is in "02468" then it is a 'miss'.
-    # If it is in "ACE" then it is a 'hit'.
-
-    0x00: "_setup_trace (initialization)",
-
-    0x10: "invalidate (miss)",
-    0x1A: "invalidate (hit, version)",
-    0x1C: "invalidate (hit, saving non-current)",
-    # 0x1E can occur during startup verification.
-    0x1E: "invalidate (hit, discarding current or non-current)",
-
-    0x20: "load (miss)",
-    0x22: "load (hit)",
-    0x24: "load (non-current, miss)",
-    0x26: "load (non-current, hit)",
-
-    0x50: "store (version)",
-    0x52: "store (current, non-version)",
-    0x54: "store (non-current)",
-
-    }
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/branches/bug1734/src/ZEO/tests/Cache.py b/branches/bug1734/src/ZEO/tests/Cache.py
deleted file mode 100644
index a3377568..00000000
--- a/branches/bug1734/src/ZEO/tests/Cache.py
+++ /dev/null
@@ -1,107 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Tests of the ZEO cache"""
-
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle
-
-from transaction import Transaction
-
-class TransUndoStorageWithCache:
-
-    def checkUndoInvalidation(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(23))
-        revid = self._dostore(oid, revid=revid, data=MinPO(24))
-        revid = self._dostore(oid, revid=revid, data=MinPO(25))
-
-        info = self._storage.undoInfo()
-        if not info:
-            # Preserved this comment, but don't understand it:
-            # "Perhaps we have an old storage implementation that
-            #  does do the negative nonsense."
-            info = self._storage.undoInfo(0, 20)
-        tid = info[0]['id']
-
-        # We may need to bail at this point if the storage doesn't
-        # support transactional undo
-        if not self._storage.supportsTransactionalUndo():
-            return
-
-        # Now start an undo transaction
-        t = Transaction()
-        t.note('undo1')
-        self._storage.tpc_begin(t)
-
-        tid, oids = self._storage.undo(tid, t)
-
-        # Make sure this doesn't load invalid data into the cache
-        self._storage.load(oid, '')
-
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        assert len(oids) == 1
-        assert oids[0] == oid
-        data, revid = self._storage.load(oid, '')
-        obj = zodb_unpickle(data)
-        assert obj == MinPO(24)
-
-class StorageWithCache:
-
-    def checkAbortVersionInvalidation(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(1))
-        revid = self._dostore(oid, revid=revid, data=MinPO(2))
-        revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
-        revid = self._dostore(oid, revid=revid, data=MinPO(4), version="foo")
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.abortVersion("foo", t)
-        self._storage.load(oid, "foo")
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        data, revid = self._storage.load(oid, "foo")
-        obj = zodb_unpickle(data)
-        assert obj == MinPO(2), obj
-
-    def checkCommitEmptyVersionInvalidation(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(1))
-        revid = self._dostore(oid, revid=revid, data=MinPO(2))
-        revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion("foo", "", t)
-        self._storage.load(oid, "")
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        data, revid = self._storage.load(oid, "")
-        obj = zodb_unpickle(data)
-        assert obj == MinPO(3), obj
-
-    def checkCommitVersionInvalidation(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(1))
-        revid = self._dostore(oid, revid=revid, data=MinPO(2))
-        revid = self._dostore(oid, revid=revid, data=MinPO(3), version="foo")
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion("foo", "bar", t)
-        self._storage.load(oid, "")
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        data, revid = self._storage.load(oid, "bar")
-        obj = zodb_unpickle(data)
-        assert obj == MinPO(3), obj
diff --git a/branches/bug1734/src/ZEO/tests/CommitLockTests.py b/branches/bug1734/src/ZEO/tests/CommitLockTests.py
deleted file mode 100644
index 4a141cde..00000000
--- a/branches/bug1734/src/ZEO/tests/CommitLockTests.py
+++ /dev/null
@@ -1,249 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Tests of the distributed commit lock."""
-
-import threading
-import time
-
-from persistent.TimeStamp import TimeStamp
-import transaction
-from ZODB.tests.StorageTestBase import zodb_pickle, MinPO
-
-import ZEO.ClientStorage
-from ZEO.Exceptions import ClientDisconnected
-from ZEO.tests.TestThread import TestThread
-
-ZERO = '\0'*8
-
-class DummyDB:
-    def invalidate(self, *args, **kwargs):
-        pass
-
-class WorkerThread(TestThread):
-
-    # run the entire test in a thread so that the blocking call for
-    # tpc_vote() doesn't hang the test suite.
-
-    def __init__(self, storage, trans, method="tpc_finish"):
-        self.storage = storage
-        self.trans = trans
-        self.method = method
-        self.ready = threading.Event()
-        TestThread.__init__(self)
-
-    def testrun(self):
-        try:
-            self.storage.tpc_begin(self.trans)
-            oid = self.storage.new_oid()
-            p = zodb_pickle(MinPO("c"))
-            self.storage.store(oid, ZERO, p, '', self.trans)
-            oid = self.storage.new_oid()
-            p = zodb_pickle(MinPO("c"))
-            self.storage.store(oid, ZERO, p, '', self.trans)
-            self.myvote()
-            if self.method == "tpc_finish":
-                self.storage.tpc_finish(self.trans)
-            else:
-                self.storage.tpc_abort(self.trans)
-        except ClientDisconnected:
-            pass
-
-    def myvote(self):
-        # The vote() call is synchronous, which makes it difficult to
-        # coordinate the action of multiple threads that all call
-        # vote().  This method sends the vote call, then sets the
-        # event saying vote was called, then waits for the vote
-        # response.  It digs deep into the implementation of the client.
-
-        # This method is a replacement for:
-        #     self.ready.set()
-        #     self.storage.tpc_vote(self.trans)
-
-        rpc = self.storage._server.rpc
-        msgid = rpc._deferred_call('vote', id(self.trans))
-        self.ready.set()
-        rpc._deferred_wait(msgid)
-        self.storage._check_serials()
-
-class CommitLockTests:
-
-    NUM_CLIENTS = 5
-
-    # The commit lock tests verify that the storage successfully
-    # blocks and restarts transactions when there is contention for a
-    # single storage.  There are a lot of cases to cover.
-
-    # The general flow of these tests is to start a transaction by
-    # getting far enough into 2PC to acquire the commit lock.  Then
-    # begin one or more other connections that also want to commit.
-    # This causes the commit lock code to be exercised.  Once the
-    # other connections are started, the first transaction completes.
-
-    def _cleanup(self):
-        for store, trans in self._storages:
-            store.tpc_abort(trans)
-            store.close()
-        self._storages = []
-
-    def _start_txn(self):
-        txn = transaction.Transaction()
-        self._storage.tpc_begin(txn)
-        oid = self._storage.new_oid()
-        self._storage.store(oid, ZERO, zodb_pickle(MinPO(1)), '', txn)
-        return oid, txn
-
-    def _begin_threads(self):
-        # Start a second transaction on a different connection without
-        # blocking the test thread.  Returns only after each thread has
-        # set it's ready event.
-        self._storages = []
-        self._threads = []
-
-        for i in range(self.NUM_CLIENTS):
-            storage = self._duplicate_client()
-            txn = transaction.Transaction()
-            tid = self._get_timestamp()
-
-            t = WorkerThread(storage, txn)
-            self._threads.append(t)
-            t.start()
-            t.ready.wait()
-
-            # Close on the connections abnormally to test server response
-            if i == 0:
-                storage.close()
-            else:
-                self._storages.append((storage, txn))
-
-    def _finish_threads(self):
-        for t in self._threads:
-            t.cleanup()
-
-    def _duplicate_client(self):
-        "Open another ClientStorage to the same server."
-        # It's hard to find the actual address.
-        # The rpc mgr addr attribute is a list.  Each element in the
-        # list is a socket domain (AF_INET, AF_UNIX, etc.) and an
-        # address.
-        addr = self._storage._addr
-        new = ZEO.ClientStorage.ClientStorage(addr, wait=1)
-        new.registerDB(DummyDB(), None)
-        return new
-
-    def _get_timestamp(self):
-        t = time.time()
-        t = TimeStamp(*time.gmtime(t)[:5]+(t%60,))
-        return `t`
-
-class CommitLockVoteTests(CommitLockTests):
-
-    def checkCommitLockVoteFinish(self):
-        oid, txn = self._start_txn()
-        self._storage.tpc_vote(txn)
-
-        self._begin_threads()
-
-        self._storage.tpc_finish(txn)
-        self._storage.load(oid, '')
-
-        self._finish_threads()
-
-        self._dostore()
-        self._cleanup()
-
-    def checkCommitLockVoteAbort(self):
-        oid, txn = self._start_txn()
-        self._storage.tpc_vote(txn)
-
-        self._begin_threads()
-
-        self._storage.tpc_abort(txn)
-
-        self._finish_threads()
-
-        self._dostore()
-        self._cleanup()
-
-    def checkCommitLockVoteClose(self):
-        oid, txn = self._start_txn()
-        self._storage.tpc_vote(txn)
-
-        self._begin_threads()
-
-        self._storage.close()
-
-        self._finish_threads()
-        self._cleanup()
-
-class CommitLockUndoTests(CommitLockTests):
-
-    def _get_trans_id(self):
-        self._dostore()
-        L = self._storage.undoInfo()
-        return L[0]['id']
-
-    def _begin_undo(self, trans_id, txn):
-        rpc = self._storage._server.rpc
-        return rpc._deferred_call('undo', trans_id, id(txn))
-
-    def _finish_undo(self, msgid):
-        return self._storage._server.rpc._deferred_wait(msgid)
-
-    def checkCommitLockUndoFinish(self):
-        trans_id = self._get_trans_id()
-        oid, txn = self._start_txn()
-        msgid = self._begin_undo(trans_id, txn)
-
-        self._begin_threads()
-
-        self._finish_undo(msgid)
-        self._storage.tpc_vote(txn)
-        self._storage.tpc_finish(txn)
-        self._storage.load(oid, '')
-
-        self._finish_threads()
-
-        self._dostore()
-        self._cleanup()
-
-    def checkCommitLockUndoAbort(self):
-        trans_id = self._get_trans_id()
-        oid, txn = self._start_txn()
-        msgid = self._begin_undo(trans_id, txn)
-
-        self._begin_threads()
-
-        self._finish_undo(msgid)
-        self._storage.tpc_vote(txn)
-        self._storage.tpc_abort(txn)
-
-        self._finish_threads()
-
-        self._dostore()
-        self._cleanup()
-
-    def checkCommitLockUndoClose(self):
-        trans_id = self._get_trans_id()
-        oid, txn = self._start_txn()
-        msgid = self._begin_undo(trans_id, txn)
-
-        self._begin_threads()
-
-        self._finish_undo(msgid)
-        self._storage.tpc_vote(txn)
-        self._storage.close()
-
-        self._finish_threads()
-
-        self._cleanup()
diff --git a/branches/bug1734/src/ZEO/tests/ConnectionTests.py b/branches/bug1734/src/ZEO/tests/ConnectionTests.py
deleted file mode 100644
index a18bc35e..00000000
--- a/branches/bug1734/src/ZEO/tests/ConnectionTests.py
+++ /dev/null
@@ -1,1125 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import os
-import sys
-import time
-import random
-import asyncore
-import tempfile
-import threading
-import logging
-
-import ZEO.ServerStub
-from ZEO.ClientStorage import ClientStorage
-from ZEO.Exceptions import ClientDisconnected
-from ZEO.zrpc.marshal import Marshaller
-from ZEO.tests import forker
-
-from ZODB.DB import DB
-from ZODB.POSException import ReadOnlyError, ConflictError
-from ZODB.tests.StorageTestBase import StorageTestBase
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase \
-     import zodb_pickle, zodb_unpickle, handle_all_serials, handle_serials
-
-import transaction
-from transaction import Transaction
-
-logger = logging.getLogger('ZEO.tests.ConnectionTests')
-
-ZERO = '\0'*8
-
-class TestServerStub(ZEO.ServerStub.StorageServer):
-    __super_getInvalidations = ZEO.ServerStub.StorageServer.getInvalidations
-
-    def getInvalidations(self, tid):
-        # squirrel the results away for inspection by test case
-        self._last_invals = self.__super_getInvalidations(tid)
-        return self._last_invals
-
-class TestClientStorage(ClientStorage):
-
-    test_connection = False
-
-    StorageServerStubClass = TestServerStub
-
-    def verify_cache(self, stub):
-        self.end_verify = threading.Event()
-        self.verify_result = ClientStorage.verify_cache(self, stub)
-
-    def endVerify(self):
-        ClientStorage.endVerify(self)
-        self.end_verify.set()
-
-    def testConnection(self, conn):
-        try:
-            return ClientStorage.testConnection(self, conn)
-        finally:
-            self.test_connection = True
-
-class DummyDB:
-    def invalidate(self, *args, **kwargs):
-        pass
-
-class CommonSetupTearDown(StorageTestBase):
-    """Common boilerplate"""
-
-    __super_setUp = StorageTestBase.setUp
-    __super_tearDown = StorageTestBase.tearDown
-    keep = 0
-    invq = None
-    timeout = None
-    monitor = 0
-    db_class = DummyDB
-
-    def setUp(self):
-        """Test setup for connection tests.
-
-        This starts only one server; a test may start more servers by
-        calling self._newAddr() and then self.startServer(index=i)
-        for i in 1, 2, ...
-        """
-        self.__super_setUp()
-        logging.info("setUp() %s", self.id())
-        self.file = tempfile.mktemp()
-        self.addr = []
-        self._pids = []
-        self._servers = []
-        self.conf_paths = []
-        self.caches = []
-        self._newAddr()
-        self.startServer()
-
-    def tearDown(self):
-        """Try to cause the tests to halt"""
-        logging.info("tearDown() %s" % self.id())
-        for p in self.conf_paths:
-            os.remove(p)
-        if getattr(self, '_storage', None) is not None:
-            self._storage.close()
-            if hasattr(self._storage, 'cleanup'):
-                logging.debug("cleanup storage %s" %
-                         self._storage.__name__)
-                self._storage.cleanup()
-        for adminaddr in self._servers:
-            if adminaddr is not None:
-                forker.shutdown_zeo_server(adminaddr)
-        if hasattr(os, 'waitpid'):
-            # Not in Windows Python until 2.3
-            for pid in self._pids:
-                os.waitpid(pid, 0)
-        for c in self.caches:
-            for i in 0, 1:
-                for ext in "", ".trace":
-                    base = "%s-%s.zec%s" % (c, "1", ext)
-                    path = os.path.join(tempfile.tempdir, base)
-                    # On Windows before 2.3, we don't have a way to wait for
-                    # the spawned server(s) to close, and they inherited
-                    # file descriptors for our open files.  So long as those
-                    # processes are alive, we can't delete the files.  Try
-                    # a few times then give up.
-                    need_to_delete = False
-                    if os.path.exists(path):
-                        need_to_delete = True
-                        for dummy in range(5):
-                            try:
-                                os.unlink(path)
-                            except:
-                                time.sleep(0.5)
-                            else:
-                                need_to_delete = False
-                                break
-                    if need_to_delete:
-                        os.unlink(path)  # sometimes this is just gonna fail
-        self.__super_tearDown()
-
-    def _newAddr(self):
-        self.addr.append(self._getAddr())
-
-    def _getAddr(self):
-        # port+1 is also used, so only draw even port numbers
-        return 'localhost', random.randrange(25000, 30000, 2)
-
-    def getConfig(self, path, create, read_only):
-        raise NotImplementedError
-
-    cache_id = 1
-
-    def openClientStorage(self, cache=None, cache_size=200000, wait=1,
-                          read_only=0, read_only_fallback=0,
-                          username=None, password=None, realm=None):
-        if cache is None:
-            cache = str(self.__class__.cache_id)
-            self.__class__.cache_id += 1
-        self.caches.append(cache)
-        storage = TestClientStorage(self.addr,
-                                    client=cache,
-                                    var=tempfile.tempdir,
-                                    cache_size=cache_size,
-                                    wait=wait,
-                                    min_disconnect_poll=0.1,
-                                    read_only=read_only,
-                                    read_only_fallback=read_only_fallback,
-                                    username=username,
-                                    password=password,
-                                    realm=realm)
-        storage.registerDB(DummyDB(), None)
-        return storage
-
-    def getServerConfig(self, addr, ro_svr):
-        zconf = forker.ZEOConfig(addr)
-        if ro_svr:
-            zconf.read_only = 1
-        if self.monitor:
-            zconf.monitor_address = ("", 42000)
-        if self.invq:
-            zconf.invalidation_queue_size = self.invq
-        if self.timeout:
-            zconf.transaction_timeout = self.timeout
-        return zconf
-
-    def startServer(self, create=1, index=0, read_only=0, ro_svr=0, keep=None):
-        addr = self.addr[index]
-        logging.info("startServer(create=%d, index=%d, read_only=%d) @ %s" %
-                     (create, index, read_only, addr))
-        path = "%s.%d" % (self.file, index)
-        sconf = self.getConfig(path, create, read_only)
-        zconf = self.getServerConfig(addr, ro_svr)
-        if keep is None:
-            keep = self.keep
-        zeoport, adminaddr, pid, path = forker.start_zeo_server(
-            sconf, zconf, addr[1], keep)
-        self.conf_paths.append(path)
-        self._pids.append(pid)
-        self._servers.append(adminaddr)
-
-    def shutdownServer(self, index=0):
-        logging.info("shutdownServer(index=%d) @ %s" %
-                     (index, self._servers[index]))
-        adminaddr = self._servers[index]
-        if adminaddr is not None:
-            forker.shutdown_zeo_server(adminaddr)
-            self._servers[index] = None
-
-    def pollUp(self, timeout=30.0, storage=None):
-        if storage is None:
-            storage = self._storage
-        # Poll until we're connected.
-        now = time.time()
-        giveup = now + timeout
-        while not storage.is_connected():
-            asyncore.poll(0.1)
-            now = time.time()
-            if now > giveup:
-                self.fail("timed out waiting for storage to connect")
-            # When the socket map is empty, poll() returns immediately,
-            # and this is a pure busy-loop then.  At least on some Linux
-            # flavors, that can starve the thread trying to connect,
-            # leading to grossly increased runtime (typical) or bogus
-            # "timed out" failures.  A little sleep here cures both.
-            time.sleep(0.1)
-
-    def pollDown(self, timeout=30.0):
-        # Poll until we're disconnected.
-        now = time.time()
-        giveup = now + timeout
-        while self._storage.is_connected():
-            asyncore.poll(0.1)
-            now = time.time()
-            if now > giveup:
-                self.fail("timed out waiting for storage to disconnect")
-            # See pollUp() for why we sleep a little here.
-            time.sleep(0.1)
-
-
-class ConnectionTests(CommonSetupTearDown):
-    """Tests that explicitly manage the server process.
-
-    To test the cache or re-connection, these test cases explicit
-    start and stop a ZEO storage server.
-    """
-
-    def checkMultipleAddresses(self):
-        for i in range(4):
-            self._newAddr()
-        self._storage = self.openClientStorage('test', 100000)
-        oid = self._storage.new_oid()
-        obj = MinPO(12)
-        self._dostore(oid, data=obj)
-        self._storage.close()
-
-    def checkMultipleServers(self):
-        # Crude test-- just start two servers and do a commit at each one.
-
-        self._newAddr()
-        self._storage = self.openClientStorage('test', 100000)
-        self._dostore()
-
-        self.shutdownServer(index=0)
-        self.startServer(index=1)
-
-        # If we can still store after shutting down one of the
-        # servers, we must be reconnecting to the other server.
-
-        did_a_store = 0
-        for i in range(10):
-            try:
-                self._dostore()
-                did_a_store = 1
-                break
-            except ClientDisconnected:
-                time.sleep(0.5)
-        self.assert_(did_a_store)
-        self._storage.close()
-
-    def checkReadOnlyClient(self):
-        # Open a read-only client to a read-write server; stores fail
-
-        # Start a read-only client for a read-write server
-        self._storage = self.openClientStorage(read_only=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-        self._storage.close()
-
-    def checkReadOnlyServer(self):
-        # Open a read-only client to a read-only *server*; stores fail
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Start a read-only server
-        self.startServer(create=0, index=0, ro_svr=1)
-        # Start a read-only client
-        self._storage = self.openClientStorage(read_only=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-        self._storage.close()
-        # Get rid of the 'test left new threads behind' warning
-        time.sleep(0.1)
-
-    def checkReadOnlyFallbackWritable(self):
-        # Open a fallback client to a read-write server; stores succeed
-
-        # Start a read-only-fallback client for a read-write server
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        # Stores should succeed here
-        self._dostore()
-        self._storage.close()
-
-    def checkReadOnlyFallbackReadOnlyServer(self):
-        # Open a fallback client to a read-only *server*; stores fail
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Start a read-only server
-        self.startServer(create=0, index=0, ro_svr=1)
-        # Start a read-only-fallback client
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        self.assert_(self._storage.isReadOnly())
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-        self._storage.close()
-
-    # TODO:  Compare checkReconnectXXX() here to checkReconnection()
-    # further down.  Is the code here hopelessly naive, or is
-    # checkReconnection() overwrought?
-
-    def checkReconnectWritable(self):
-        # A read-write client reconnects to a read-write server
-
-        # Start a client
-        self._storage = self.openClientStorage()
-        # Stores should succeed here
-        self._dostore()
-
-        # Shut down the server
-        self.shutdownServer()
-        self._servers = []
-        # Poll until the client disconnects
-        self.pollDown()
-        # Stores should fail now
-        self.assertRaises(ClientDisconnected, self._dostore)
-
-        # Restart the server
-        self.startServer(create=0)
-        # Poll until the client connects
-        self.pollUp()
-        # Stores should succeed here
-        self._dostore()
-        self._storage.close()
-
-    def checkDisconnectionError(self):
-        # Make sure we get a ClientDisconnected when we try to read an
-        # object when we're not connected to a storage server and the
-        # object is not in the cache.
-        self.shutdownServer()
-        self._storage = self.openClientStorage('test', 1000, wait=0)
-        self.assertRaises(ClientDisconnected,
-                          self._storage.load, 'fredwash', '')
-        self._storage.close()
-
-    def checkDisconnectedAbort(self):
-        self._storage = self.openClientStorage()
-        self._dostore()
-        oids = [self._storage.new_oid() for i in range(5)]
-        txn = Transaction()
-        self._storage.tpc_begin(txn)
-        for oid in oids:
-            data = zodb_pickle(MinPO(oid))
-            self._storage.store(oid, None, data, '', txn)
-        self.shutdownServer()
-        self.assertRaises(ClientDisconnected, self._storage.tpc_vote, txn)
-        self._storage.tpc_abort(txn)
-        self.startServer(create=0)
-        self._storage._wait()
-        self._dostore()
-
-        # This test is supposed to cover the following error, although
-        # I don't have much confidence that it does.  The likely
-        # explanation for the error is that the _tbuf contained
-        # objects that weren't in the _seriald, because the client was
-        # interrupted waiting for tpc_vote() to return.  When the next
-        # transaction committed, it tried to do something with the
-        # bogus _tbuf entries.  The explanation is wrong/incomplete,
-        # because tpc_begin() should clear the _tbuf.
-
-        # 2003-01-15T15:44:19 ERROR(200) ZODB A storage error occurred
-        # in the last phase of a two-phase commit.  This shouldn't happen.
-
-        # Traceback (innermost last):
-        # Module ZODB.Transaction, line 359, in _finish_one
-        # Module ZODB.Connection, line 691, in tpc_finish
-        # Module ZEO.ClientStorage, line 679, in tpc_finish
-        # Module ZEO.ClientStorage, line 709, in _update_cache
-        # KeyError: ...
-
-    def checkBasicPersistence(self):
-        # Verify cached data persists across client storage instances.
-
-        # To verify that the cache is being used, the test closes the
-        # server and then starts a new client with the server down.
-        # When the server is down, a load() gets the data from its cache.
-
-        self._storage = self.openClientStorage('test', 100000)
-        oid = self._storage.new_oid()
-        obj = MinPO(12)
-        revid1 = self._dostore(oid, data=obj)
-        self._storage.close()
-        self.shutdownServer()
-        self._storage = self.openClientStorage('test', 100000, wait=0)
-        data, revid2 = self._storage.load(oid, '')
-        self.assertEqual(zodb_unpickle(data), MinPO(12))
-        self.assertEqual(revid1, revid2)
-        self._storage.close()
-
-    def checkDisconnectedCacheWorks(self):
-        # Check that the cache works when the client is disconnected.
-        self._storage = self.openClientStorage('test')
-        oid1 = self._storage.new_oid()
-        obj1 = MinPO("1" * 500)
-        self._dostore(oid1, data=obj1)
-        oid2 = self._storage.new_oid()
-        obj2 = MinPO("2" * 500)
-        self._dostore(oid2, data=obj2)
-        expected1 = self._storage.load(oid1, '')
-        expected2 = self._storage.load(oid2, '')
-
-        # Shut it all down, and try loading from the persistent cache file
-        # without a server present.
-        self._storage.close()
-        self.shutdownServer()
-        self._storage = self.openClientStorage('test', wait=False)
-        self.assertEqual(expected1, self._storage.load(oid1, ''))
-        self.assertEqual(expected2, self._storage.load(oid2, ''))
-        self._storage.close()
-
-    def checkDisconnectedCacheFails(self):
-        # Like checkDisconnectedCacheWorks above, except the cache
-        # file is so small that only one object can be remembered.
-        self._storage = self.openClientStorage('test', cache_size=900)
-        oid1 = self._storage.new_oid()
-        obj1 = MinPO("1" * 500)
-        self._dostore(oid1, data=obj1)
-        oid2 = self._storage.new_oid()
-        obj2 = MinPO("2" * 500)
-        # The cache file is so small that adding oid2 will evict oid1.
-        self._dostore(oid2, data=obj2)
-        expected2 = self._storage.load(oid2, '')
-
-        # Shut it all down, and try loading from the persistent cache file
-        # without a server present.
-        self._storage.close()
-        self.shutdownServer()
-        self._storage = self.openClientStorage('test', cache_size=900,
-                                               wait=False)
-        # oid2 should still be in cache.
-        self.assertEqual(expected2, self._storage.load(oid2, ''))
-        # But oid1 should have been purged, so that trying to load it will
-        # try to fetch it from the (non-existent) ZEO server.
-        self.assertRaises(ClientDisconnected, self._storage.load, oid1, '')
-        self._storage.close()
-
-    def checkVerificationInvalidationPersists(self):
-        # This tests a subtle invalidation bug from ZODB 3.3:
-        # invalidations processed as part of ZEO cache verification acted
-        # kinda OK wrt the in-memory cache structures, but had no effect
-        # on the cache file.  So opening the file cache again could
-        # incorrectly believe that a previously invalidated object was
-        # still current.  This takes some effort to set up.
-
-        # First, using a persistent cache ('test'), create an object
-        # MinPO(13).  We used to see this again at the end of this test,
-        # despite that we modify it, and despite that it gets invalidated
-        # in 'test', before the end.
-        self._storage = self.openClientStorage('test')
-        oid = self._storage.new_oid()
-        obj = MinPO(13)
-        self._dostore(oid, data=obj)
-        self._storage.close()
-
-        # Now modify obj via a temp connection.  `test` won't learn about
-        # this until we open a connection using `test` again.
-        self._storage = self.openClientStorage()
-        pickle, rev = self._storage.load(oid, '')
-        newobj = zodb_unpickle(pickle)
-        self.assertEqual(newobj, obj)
-        newobj.value = 42 # .value *should* be 42 forever after now, not 13
-        self._dostore(oid, data=newobj, revid=rev)
-        self._storage.close()
-
-        # Open 'test' again.  `oid` in this cache should be (and is)
-        # invalidated during cache verification.  The bug was that it
-        # got invalidated (kinda) in memory, but not in the cache file.
-        self._storage = self.openClientStorage('test')
-
-        # The invalidation happened already.  Now create and store a new
-        # object before closing this storage:  this is so `test` believes
-        # it's seen transactions beyond the one that invalidated `oid`, so
-        # that the *next* time we open `test` it doesn't process another
-        # invalidation for `oid`.  It's also important that we not try to
-        # load `oid` now:  because it's been (kinda) invalidated in the
-        # cache's memory structures, loading it now would fetch the
-        # current revision from the server, thus hiding the bug.
-        obj2 = MinPO(666)
-        oid2 = self._storage.new_oid()
-        self._dostore(oid2, data=obj2)
-        self._storage.close()
-
-        # Finally, open `test` again and load `oid`.  `test` believes
-        # it's beyond the transaction that modified `oid`, so its view
-        # of whether it has an up-to-date `oid` comes solely from the disk
-        # file, unaffected by cache verification.
-        self._storage = self.openClientStorage('test')
-        pickle, rev = self._storage.load(oid, '')
-        newobj_copy = zodb_unpickle(pickle)
-        # This used to fail, with
-        #     AssertionError: MinPO(13) != MinPO(42)
-        # That is, `test` retained a stale revision of the object on disk.
-        self.assertEqual(newobj_copy, newobj)
-        self._storage.close()
-
-    def checkReconnection(self):
-        # Check that the client reconnects when a server restarts.
-
-        self._storage = self.openClientStorage()
-        oid = self._storage.new_oid()
-        obj = MinPO(12)
-        self._dostore(oid, data=obj)
-        logging.info("checkReconnection(): About to shutdown server")
-        self.shutdownServer()
-        logging.info("checkReconnection(): About to restart server")
-        self.startServer(create=0)
-        oid = self._storage.new_oid()
-        obj = MinPO(12)
-        while 1:
-            try:
-                self._dostore(oid, data=obj)
-                break
-            except ClientDisconnected:
-                # Maybe the exception mess is better now
-                logging.info("checkReconnection(): Error after"
-                             " server restart; retrying.", exc_info=True)
-                transaction.abort()
-            # Give the other thread a chance to run.
-            time.sleep(0.1)
-        logging.info("checkReconnection(): finished")
-        self._storage.close()
-
-    def checkBadMessage1(self):
-        # not even close to a real message
-        self._bad_message("salty")
-
-    def checkBadMessage2(self):
-        # just like a real message, but with an unpicklable argument
-        global Hack
-        class Hack:
-            pass
-
-        msg = Marshaller().encode(1, 0, "foo", (Hack(),))
-        self._bad_message(msg)
-        del Hack
-
-    def _bad_message(self, msg):
-        # Establish a connection, then send the server an ill-formatted
-        # request.  Verify that the connection is closed and that it is
-        # possible to establish a new connection.
-
-        self._storage = self.openClientStorage()
-        self._dostore()
-
-        # break into the internals to send a bogus message
-        zrpc_conn = self._storage._server.rpc
-        zrpc_conn.message_output(msg)
-
-        try:
-            self._dostore()
-        except ClientDisconnected:
-            pass
-        else:
-            self._storage.close()
-            self.fail("Server did not disconnect after bogus message")
-        self._storage.close()
-
-        self._storage = self.openClientStorage()
-        self._dostore()
-        self._storage.close()
-
-    # Test case for multiple storages participating in a single
-    # transaction.  This is not really a connection test, but it needs
-    # about the same infrastructure (several storage servers).
-
-    # TODO: with the current ZEO code, this occasionally fails.
-    # That's the point of this test. :-)
-
-    def NOcheckMultiStorageTransaction(self):
-        # Configuration parameters (larger values mean more likely deadlocks)
-        N = 2
-        # These don't *have* to be all the same, but it's convenient this way
-        self.nservers = N
-        self.nthreads = N
-        self.ntrans = N
-        self.nobj = N
-
-        # Start extra servers
-        for i in range(1, self.nservers):
-            self._newAddr()
-            self.startServer(index=i)
-
-        # Spawn threads that each do some transactions on all storages
-        threads = []
-        try:
-            for i in range(self.nthreads):
-                t = MSTThread(self, "T%d" % i)
-                threads.append(t)
-                t.start()
-            # Wait for all threads to finish
-            for t in threads:
-                t.join(60)
-                self.failIf(t.isAlive(), "%s didn't die" % t.getName())
-        finally:
-            for t in threads:
-                t.closeclients()
-
-    def checkCrossDBInvalidations(self):
-        db1 = DB(self.openClientStorage())
-        c1 = db1.open()
-        r1 = c1.root()
-
-        r1["a"] = MinPO("a")
-        transaction.commit()
-
-        db2 = DB(self.openClientStorage())
-        r2 = db2.open().root()
-
-        self.assertEqual(r2["a"].value, "a")
-
-        r2["b"] = MinPO("b")
-        transaction.commit()
-
-        # make sure the invalidation is received in the other client
-        for i in range(10):
-            c1._storage.sync()
-            if c1._invalidated.has_key(r1._p_oid):
-                break
-            time.sleep(0.1)
-        self.assert_(c1._invalidated.has_key(r1._p_oid))
-
-        # force the invalidations to be applied...
-        c1.sync()
-        r1.keys() # unghostify
-        self.assertEqual(r1._p_serial, r2._p_serial)
-
-        db2.close()
-        db1.close()
-
-class InvqTests(CommonSetupTearDown):
-    invq = 3
-
-    def checkQuickVerificationWith2Clients(self):
-        perstorage = self.openClientStorage(cache="test")
-        self.assertEqual(perstorage.verify_result, "full verification")
-
-        self._storage = self.openClientStorage()
-        oid = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        # When we create a new storage, it should always do a full
-        # verification
-        self.assertEqual(self._storage.verify_result, "full verification")
-        # do two storages of the object to make sure an invalidation
-        # message is generated
-        revid = self._dostore(oid)
-        revid = self._dostore(oid, revid)
-        # Create a second object and revision to guarantee it doesn't
-        # show up in the list of invalidations sent when perstore restarts.
-        revid2 = self._dostore(oid2)
-        revid2 = self._dostore(oid2, revid2)
-
-        # sync() is needed to prevent invalidation for oid from arriving
-        # in the middle of the load() call.
-        perstorage.sync()
-        perstorage.load(oid, '')
-        perstorage.close()
-
-        revid = self._dostore(oid, revid)
-        perstorage = self.openClientStorage(cache="test")
-        self.assertEqual(perstorage.verify_result, "quick verification")
-        self.assertEqual(perstorage._server._last_invals,
-                         (revid, [(oid, '')]))
-
-        self.assertEqual(perstorage.load(oid, ''),
-                         self._storage.load(oid, ''))
-        perstorage.close()
-
-    def checkVerificationWith2ClientsInvqOverflow(self):
-        perstorage = self.openClientStorage(cache="test")
-        self.assertEqual(perstorage.verify_result, "full verification")
-
-        self._storage = self.openClientStorage()
-        oid = self._storage.new_oid()
-        # When we create a new storage, it should always do a full
-        # verification
-        self.assertEqual(self._storage.verify_result, "full verification")
-        # do two storages of the object to make sure an invalidation
-        # message is generated
-        revid = self._dostore(oid)
-        revid = self._dostore(oid, revid)
-
-        perstorage.load(oid, '')
-        perstorage.close()
-
-        # the test code sets invq bound to 2
-        for i in range(5):
-            revid = self._dostore(oid, revid)
-
-        perstorage = self.openClientStorage(cache="test")
-        self.assertEqual(perstorage.verify_result, "full verification")
-        t = time.time() + 30
-        while not perstorage.end_verify.isSet():
-            perstorage.sync()
-            if time.time() > t:
-                self.fail("timed out waiting for endVerify")
-
-        self.assertEqual(self._storage.load(oid, '')[1], revid)
-        self.assertEqual(perstorage.load(oid, ''),
-                         self._storage.load(oid, ''))
-
-        perstorage.close()
-
-class ReconnectionTests(CommonSetupTearDown):
-    # The setUp() starts a server automatically.  In order for its
-    # state to persist, we set the class variable keep to 1.  In
-    # order for its state to be cleaned up, the last startServer()
-    # call in the test must pass keep=0.
-    keep = 1
-    invq = 2
-
-    def checkReadOnlyStorage(self):
-        # Open a read-only client to a read-only *storage*; stores fail
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Start a read-only server
-        self.startServer(create=0, index=0, read_only=1, keep=0)
-        # Start a read-only client
-        self._storage = self.openClientStorage(read_only=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-    def checkReadOnlyFallbackReadOnlyStorage(self):
-        # Open a fallback client to a read-only *storage*; stores fail
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Start a read-only server
-        self.startServer(create=0, index=0, read_only=1, keep=0)
-        # Start a read-only-fallback client
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-    def checkReconnectReadOnly(self):
-        # A read-only client reconnects from a read-write to a
-        # read-only server
-
-        # Start a client
-        self._storage = self.openClientStorage(read_only=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-        # Shut down the server
-        self.shutdownServer()
-        self._servers = []
-        # Poll until the client disconnects
-        self.pollDown()
-        # Stores should still fail
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-        # Restart the server
-        self.startServer(create=0, read_only=1, keep=0)
-        # Poll until the client connects
-        self.pollUp()
-        # Stores should still fail
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-    def checkReconnectFallback(self):
-        # A fallback client reconnects from a read-write to a
-        # read-only server
-
-        # Start a client in fallback mode
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        # Stores should succeed here
-        self._dostore()
-
-        # Shut down the server
-        self.shutdownServer()
-        self._servers = []
-        # Poll until the client disconnects
-        self.pollDown()
-        # Stores should fail now
-        self.assertRaises(ClientDisconnected, self._dostore)
-
-        # Restart the server
-        self.startServer(create=0, read_only=1, keep=0)
-        # Poll until the client connects
-        self.pollUp()
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-    def checkReconnectUpgrade(self):
-        # A fallback client reconnects from a read-only to a
-        # read-write server
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Start a read-only server
-        self.startServer(create=0, read_only=1)
-        # Start a client in fallback mode
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-        # Shut down the server
-        self.shutdownServer()
-        self._servers = []
-        # Poll until the client disconnects
-        self.pollDown()
-        # Stores should fail now
-        self.assertRaises(ClientDisconnected, self._dostore)
-
-        # Restart the server, this time read-write
-        self.startServer(create=0, keep=0)
-        # Poll until the client sconnects
-        self.pollUp()
-        # Stores should now succeed
-        self._dostore()
-
-    def checkReconnectSwitch(self):
-        # A fallback client initially connects to a read-only server,
-        # then discovers a read-write server and switches to that
-
-        # We don't want the read-write server created by setUp()
-        self.shutdownServer()
-        self._servers = []
-        # Allocate a second address (for the second server)
-        self._newAddr()
-
-        # Start a read-only server
-        self.startServer(create=0, index=0, read_only=1, keep=0)
-        # Start a client in fallback mode
-        self._storage = self.openClientStorage(read_only_fallback=1)
-        # Stores should fail here
-        self.assertRaises(ReadOnlyError, self._dostore)
-
-        # Start a read-write server
-        self.startServer(index=1, read_only=0, keep=0)
-        # After a while, stores should work
-        for i in range(300): # Try for 30 seconds
-            try:
-                self._dostore()
-                break
-            except (ClientDisconnected, ReadOnlyError):
-                # If the client isn't connected at all, sync() returns
-                # quickly and the test fails because it doesn't wait
-                # long enough for the client.
-                time.sleep(0.1)
-        else:
-            self.fail("Couldn't store after starting a read-write server")
-
-    def checkNoVerificationOnServerRestart(self):
-        self._storage = self.openClientStorage()
-        # When we create a new storage, it should always do a full
-        # verification
-        self.assertEqual(self._storage.verify_result, "full verification")
-        self._dostore()
-        self.shutdownServer()
-        self.pollDown()
-        self._storage.verify_result = None
-        self.startServer(create=0, keep=0)
-        self.pollUp()
-        # There were no transactions committed, so no verification
-        # should be needed.
-        self.assertEqual(self._storage.verify_result, "no verification")
-
-    def checkNoVerificationOnServerRestartWith2Clients(self):
-        perstorage = self.openClientStorage(cache="test")
-        self.assertEqual(perstorage.verify_result, "full verification")
-
-        self._storage = self.openClientStorage()
-        oid = self._storage.new_oid()
-        # When we create a new storage, it should always do a full
-        # verification
-        self.assertEqual(self._storage.verify_result, "full verification")
-        # do two storages of the object to make sure an invalidation
-        # message is generated
-        revid = self._dostore(oid)
-        self._dostore(oid, revid)
-
-        perstorage.load(oid, '')
-
-        self.shutdownServer()
-
-        self.pollDown()
-        self._storage.verify_result = None
-        perstorage.verify_result = None
-        logging.info('2ALLBEEF')
-        self.startServer(create=0, keep=0)
-        self.pollUp()
-        self.pollUp(storage=perstorage)
-        # There were no transactions committed, so no verification
-        # should be needed.
-        self.assertEqual(self._storage.verify_result, "no verification")
-        self.assertEqual(perstorage.verify_result, "no verification")
-        perstorage.close()
-        self._storage.close()
-
-class TimeoutTests(CommonSetupTearDown):
-    timeout = 1
-
-    def checkTimeout(self):
-        storage = self.openClientStorage()
-        txn = Transaction()
-        storage.tpc_begin(txn)
-        storage.tpc_vote(txn)
-        time.sleep(2)
-        self.assertRaises(ClientDisconnected, storage.tpc_finish, txn)
-        storage.close()
-
-    def checkTimeoutOnAbort(self):
-        storage = self.openClientStorage()
-        txn = Transaction()
-        storage.tpc_begin(txn)
-        storage.tpc_vote(txn)
-        storage.tpc_abort(txn)
-        storage.close()
-
-    def checkTimeoutOnAbortNoLock(self):
-        storage = self.openClientStorage()
-        txn = Transaction()
-        storage.tpc_begin(txn)
-        storage.tpc_abort(txn)
-        storage.close()
-
-    def checkTimeoutAfterVote(self):
-        raises = self.assertRaises
-        unless = self.failUnless
-        self._storage = storage = self.openClientStorage()
-        # Assert that the zeo cache is empty
-        unless(not list(storage._cache.contents()))
-        # Create the object
-        oid = storage.new_oid()
-        obj = MinPO(7)
-        # Now do a store, sleeping before the finish so as to cause a timeout
-        t = Transaction()
-        storage.tpc_begin(t)
-        revid1 = storage.store(oid, ZERO, zodb_pickle(obj), '', t)
-        storage.tpc_vote(t)
-        # Now sleep long enough for the storage to time out
-        time.sleep(3)
-        storage.sync()
-        unless(not storage.is_connected())
-        storage._wait()
-        unless(storage.is_connected())
-        # We expect finish to fail
-        raises(ClientDisconnected, storage.tpc_finish, t)
-        # The cache should still be empty
-        unless(not list(storage._cache.contents()))
-        # Load should fail since the object should not be in either the cache
-        # or the server.
-        raises(KeyError, storage.load, oid, '')
-
-    def checkTimeoutProvokingConflicts(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        unless = self.failUnless
-        self._storage = storage = self.openClientStorage()
-        # Assert that the zeo cache is empty
-        unless(not list(storage._cache.contents()))
-        # Create the object
-        oid = storage.new_oid()
-        obj = MinPO(7)
-        # We need to successfully commit an object now so we have something to
-        # conflict about.
-        t = Transaction()
-        storage.tpc_begin(t)
-        revid1a = storage.store(oid, ZERO, zodb_pickle(obj), '', t)
-        revid1b = storage.tpc_vote(t)
-        revid1 = handle_serials(oid, revid1a, revid1b)
-        storage.tpc_finish(t)
-        # Now do a store, sleeping before the finish so as to cause a timeout
-        obj.value = 8
-        t = Transaction()
-        storage.tpc_begin(t)
-        revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t)
-        revid2b = storage.tpc_vote(t)
-        revid2 = handle_serials(oid, revid2a, revid2b)
-        # Now sleep long enough for the storage to time out
-        time.sleep(3)
-        storage.sync()
-        unless(not storage.is_connected())
-        storage._wait()
-        unless(storage.is_connected())
-        # We expect finish to fail
-        raises(ClientDisconnected, storage.tpc_finish, t)
-        # Now we think we've committed the second transaction, but we really
-        # haven't.  A third one should produce a POSKeyError on the server,
-        # which manifests as a ConflictError on the client.
-        obj.value = 9
-        t = Transaction()
-        storage.tpc_begin(t)
-        storage.store(oid, revid2, zodb_pickle(obj), '', t)
-        raises(ConflictError, storage.tpc_vote, t)
-        # Even aborting won't help
-        storage.tpc_abort(t)
-        storage.tpc_finish(t)
-        # Try again
-        obj.value = 10
-        t = Transaction()
-        storage.tpc_begin(t)
-        storage.store(oid, revid2, zodb_pickle(obj), '', t)
-        # Even aborting won't help
-        raises(ConflictError, storage.tpc_vote, t)
-        # Abort this one and try a transaction that should succeed
-        storage.tpc_abort(t)
-        storage.tpc_finish(t)
-        # Now do a store, sleeping before the finish so as to cause a timeout
-        obj.value = 11
-        t = Transaction()
-        storage.tpc_begin(t)
-        revid2a = storage.store(oid, revid1, zodb_pickle(obj), '', t)
-        revid2b = storage.tpc_vote(t)
-        revid2 = handle_serials(oid, revid2a, revid2b)
-        storage.tpc_finish(t)
-        # Now load the object and verify that it has a value of 11
-        data, revid = storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(11))
-        eq(revid, revid2)
-
-class MSTThread(threading.Thread):
-
-    __super_init = threading.Thread.__init__
-
-    def __init__(self, testcase, name):
-        self.__super_init(name=name)
-        self.testcase = testcase
-        self.clients = []
-
-    def run(self):
-        tname = self.getName()
-        testcase = self.testcase
-
-        # Create client connections to each server
-        clients = self.clients
-        for i in range(len(testcase.addr)):
-            c = testcase.openClientStorage(addr=testcase.addr[i])
-            c.__name = "C%d" % i
-            clients.append(c)
-
-        for i in range(testcase.ntrans):
-            # Because we want a transaction spanning all storages,
-            # we can't use _dostore().  This is several _dostore() calls
-            # expanded in-line (mostly).
-
-            # Create oid->serial mappings
-            for c in clients:
-                c.__oids = []
-                c.__serials = {}
-
-            # Begin a transaction
-            t = Transaction()
-            for c in clients:
-                #print "%s.%s.%s begin\n" % (tname, c.__name, i),
-                c.tpc_begin(t)
-
-            for j in range(testcase.nobj):
-                for c in clients:
-                    # Create and store a new object on each server
-                    oid = c.new_oid()
-                    c.__oids.append(oid)
-                    data = MinPO("%s.%s.t%d.o%d" % (tname, c.__name, i, j))
-                    #print data.value
-                    data = zodb_pickle(data)
-                    s = c.store(oid, ZERO, data, '', t)
-                    c.__serials.update(handle_all_serials(oid, s))
-
-            # Vote on all servers and handle serials
-            for c in clients:
-                #print "%s.%s.%s vote\n" % (tname, c.__name, i),
-                s = c.tpc_vote(t)
-                c.__serials.update(handle_all_serials(None, s))
-
-            # Finish on all servers
-            for c in clients:
-                #print "%s.%s.%s finish\n" % (tname, c.__name, i),
-                c.tpc_finish(t)
-
-            for c in clients:
-                # Check that we got serials for all oids
-                for oid in c.__oids:
-                    testcase.failUnless(c.__serials.has_key(oid))
-                # Check that we got serials for no other oids
-                for oid in c.__serials.keys():
-                    testcase.failUnless(oid in c.__oids)
-
-    def closeclients(self):
-        # Close clients opened by run()
-        for c in self.clients:
-            try:
-                c.close()
-            except:
-                pass
diff --git a/branches/bug1734/src/ZEO/tests/InvalidationTests.py b/branches/bug1734/src/ZEO/tests/InvalidationTests.py
deleted file mode 100644
index 70a74130..00000000
--- a/branches/bug1734/src/ZEO/tests/InvalidationTests.py
+++ /dev/null
@@ -1,608 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import threading
-import time
-from random import Random
-
-import transaction
-
-from BTrees.check import check, display
-from BTrees.OOBTree import OOBTree
-
-from ZEO.tests.TestThread import TestThread
-
-from ZODB.DB import DB
-from ZODB.POSException \
-     import ReadConflictError, ConflictError, VersionLockError
-
-# The tests here let several threads have a go at one or more database
-# instances simultaneously.  Each thread appends a disjoint (from the
-# other threads) sequence of increasing integers to an OOBTree, one at
-# at time (per thread).  This provokes lots of conflicts, and BTrees
-# work hard at conflict resolution too.  An OOBTree is used because
-# that flavor has the smallest maximum bucket size, and so splits buckets
-# more often than other BTree flavors.
-#
-# When these tests were first written, they provoked an amazing number
-# of obscure timing-related bugs in cache consistency logic, revealed
-# by failure of the BTree to pass internal consistency checks at the end,
-# and/or by failure of the BTree to contain all the keys the threads
-# thought they added (i.e., the keys for which transaction.commit()
-# did not raise any exception).
-
-class FailableThread(TestThread):
-
-    # mixin class
-    # subclass must provide
-    # - self.stop attribute (an event)
-    # - self._testrun() method
-
-    # TestThread.run() invokes testrun().
-    def testrun(self):
-        try:
-            self._testrun()
-        except:
-            # Report the failure here to all the other threads, so
-            # that they stop quickly.
-            self.stop.set()
-            raise
-
-
-class StressTask:
-    # Append integers startnum, startnum + step, startnum + 2*step, ...
-    # to 'tree'.  If sleep is given, sleep
-    # that long after each append.  At the end, instance var .added_keys
-    # is a list of the ints the thread believes it added successfully.
-    def __init__(self, db, threadnum, startnum, step=2, sleep=None):
-        self.db = db
-        self.threadnum = threadnum
-        self.startnum = startnum
-        self.step = step
-        self.sleep = sleep
-        self.added_keys = []
-        self.tm = transaction.TransactionManager()
-        self.cn = self.db.open(txn_mgr=self.tm)
-        self.cn.sync()
-
-    def doStep(self):
-        tree = self.cn.root()["tree"]
-        key = self.startnum
-        tree[key] = self.threadnum
-
-    def commit(self):
-        cn = self.cn
-        key = self.startnum
-        self.tm.get().note("add key %s" % key)
-        try:
-            self.tm.get().commit()
-        except ConflictError, msg:
-            self.tm.get().abort()
-            cn.sync()
-        else:
-            if self.sleep:
-                time.sleep(self.sleep)
-            self.added_keys.append(key)
-        self.startnum += self.step
-
-    def cleanup(self):
-        self.tm.get().abort()
-        self.cn.close()
-
-def _runTasks(rounds, *tasks):
-    '''run *task* interleaved for *rounds* rounds.'''
-    def commit(run, actions):
-        actions.append(':')
-        for t in run:
-            t.commit()
-        del run[:]
-    r = Random()
-    r.seed(1064589285) # make it deterministic
-    run = []
-    actions = []
-    try:
-        for i in range(rounds):
-            t = r.choice(tasks)
-            if t in run:
-                commit(run, actions)
-            run.append(t)
-            t.doStep()
-            actions.append(`t.startnum`)
-        commit(run,actions)
-        # stderr.write(' '.join(actions)+'\n')
-    finally:
-        for t in tasks:
-            t.cleanup()
-
-
-class StressThread(FailableThread):
-
-    # Append integers startnum, startnum + step, startnum + 2*step, ...
-    # to 'tree' until Event stop is set.  If sleep is given, sleep
-    # that long after each append.  At the end, instance var .added_keys
-    # is a list of the ints the thread believes it added successfully.
-    def __init__(self, db, stop, threadnum, commitdict,
-                 startnum, step=2, sleep=None):
-        TestThread.__init__(self)
-        self.db = db
-        self.stop = stop
-        self.threadnum = threadnum
-        self.startnum = startnum
-        self.step = step
-        self.sleep = sleep
-        self.added_keys = []
-        self.commitdict = commitdict
-
-    def _testrun(self):
-        cn = self.db.open()
-        while not self.stop.isSet():
-            try:
-                tree = cn.root()["tree"]
-                break
-            except (ConflictError, KeyError):
-                transaction.abort()
-                cn.sync()
-        key = self.startnum
-        while not self.stop.isSet():
-            try:
-                tree[key] = self.threadnum
-                transaction.get().note("add key %s" % key)
-                transaction.commit()
-                self.commitdict[self] = 1
-                if self.sleep:
-                    time.sleep(self.sleep)
-            except (ReadConflictError, ConflictError), msg:
-                transaction.abort()
-                # sync() is necessary here to process invalidations
-                # if we get a read conflict.  In the read conflict case,
-                # no objects were modified so cn never got registered
-                # with the transaction.
-                cn.sync()
-            else:
-                self.added_keys.append(key)
-            key += self.step
-        cn.close()
-
-class LargeUpdatesThread(FailableThread):
-
-    # A thread that performs a lot of updates.  It attempts to modify
-    # more than 25 objects so that it can test code that runs vote
-    # in a separate thread when it modifies more than 25 objects.
-
-    def __init__(self, db, stop, threadnum, commitdict, startnum,
-                 step=2, sleep=None):
-        TestThread.__init__(self)
-        self.db = db
-        self.stop = stop
-        self.threadnum = threadnum
-        self.startnum = startnum
-        self.step = step
-        self.sleep = sleep
-        self.added_keys = []
-        self.commitdict = commitdict
-
-    def _testrun(self):
-        cn = self.db.open()
-        while not self.stop.isSet():
-            try:
-                tree = cn.root()["tree"]
-                break
-            except (ConflictError, KeyError):
-                # print "%d getting tree abort" % self.threadnum
-                transaction.abort()
-                cn.sync()
-
-        keys_added = {} # set of keys we commit
-        tkeys = []
-        while not self.stop.isSet():
-
-            # The test picks 50 keys spread across many buckets.
-            # self.startnum and self.step ensure that all threads use
-            # disjoint key sets, to minimize conflict errors.
-
-            nkeys = len(tkeys)
-            if nkeys < 50:
-                tkeys = range(self.startnum, 3000, self.step)
-                nkeys = len(tkeys)
-            step = max(int(nkeys / 50), 1)
-            keys = [tkeys[i] for i in range(0, nkeys, step)]
-            for key in keys:
-                try:
-                    tree[key] = self.threadnum
-                except (ReadConflictError, ConflictError), msg:
-                    # print "%d setting key %s" % (self.threadnum, msg)
-                    transaction.abort()
-                    cn.sync()
-                    break
-            else:
-                # print "%d set #%d" % (self.threadnum, len(keys))
-                transaction.get().note("keys %s" % ", ".join(map(str, keys)))
-                try:
-                    transaction.commit()
-                    self.commitdict[self] = 1
-                    if self.sleep:
-                        time.sleep(self.sleep)
-                except ConflictError, msg:
-                    # print "%d commit %s" % (self.threadnum, msg)
-                    transaction.abort()
-                    cn.sync()
-                    continue
-                for k in keys:
-                    tkeys.remove(k)
-                    keys_added[k] = 1
-                # sync() is necessary here to process invalidations
-                # if we get a read conflict.  In the read conflict case,
-                # no objects were modified so cn never got registered
-                # with the transaction.
-                cn.sync()
-        self.added_keys = keys_added.keys()
-        cn.close()
-
-class VersionStressThread(FailableThread):
-
-    def __init__(self, db, stop, threadnum, commitdict, startnum,
-                 step=2, sleep=None):
-        TestThread.__init__(self)
-        self.db = db
-        self.stop = stop
-        self.threadnum = threadnum
-        self.startnum = startnum
-        self.step = step
-        self.sleep = sleep
-        self.added_keys = []
-        self.commitdict = commitdict
-
-    def _testrun(self):
-        commit = 0
-        key = self.startnum
-        while not self.stop.isSet():
-            version = "%s:%s" % (self.threadnum, key)
-            commit = not commit
-            if self.oneupdate(version, key, commit):
-                self.added_keys.append(key)
-                self.commitdict[self] = 1
-            key += self.step
-
-    def oneupdate(self, version, key, commit=1):
-        # The mess of sleeps below were added to reduce the number
-        # of VersionLockErrors, based on empirical observation.
-        # It looks like the threads don't switch enough without
-        # the sleeps.
-
-        cn = self.db.open(version)
-        while not self.stop.isSet():
-            try:
-                tree = cn.root()["tree"]
-                break
-            except (ConflictError, KeyError):
-                transaction.abort()
-                cn.sync()
-        while not self.stop.isSet():
-            try:
-                tree[key] = self.threadnum
-                transaction.commit()
-                if self.sleep:
-                    time.sleep(self.sleep)
-                break
-            except (VersionLockError, ReadConflictError, ConflictError), msg:
-                transaction.abort()
-                # sync() is necessary here to process invalidations
-                # if we get a read conflict.  In the read conflict case,
-                # no objects were modified so cn never got registered
-                # with the transaction.
-                cn.sync()
-                if self.sleep:
-                    time.sleep(self.sleep)
-        try:
-            while not self.stop.isSet():
-                try:
-                    if commit:
-                        self.db.commitVersion(version)
-                        transaction.get().note("commit version %s" % version)
-                    else:
-                        self.db.abortVersion(version)
-                        transaction.get().note("abort version %s" % version)
-                    transaction.commit()
-                    if self.sleep:
-                        time.sleep(self.sleep)
-                    return commit
-                except ConflictError, msg:
-                    transaction.abort()
-                    cn.sync()
-        finally:
-            cn.close()
-        return 0
-
-class InvalidationTests:
-
-    level = 2
-
-    # Minimum # of seconds the main thread lets the workers run.  The
-    # test stops as soon as this much time has elapsed, and all threads
-    # have managed to commit a change.
-    MINTIME = 10
-
-    # Maximum # of seconds the main thread lets the workers run.  We
-    # stop after this long has elapsed regardless of whether all threads
-    # have managed to commit a change.
-    MAXTIME = 300
-
-    StressThread = StressThread
-
-    def _check_tree(self, cn, tree):
-        # Make sure the BTree is sane at the C level.
-        retries = 3
-        while retries:
-            retries -= 1
-            try:
-                check(tree)
-                tree._check()
-            except ReadConflictError:
-                if retries:
-                    transaction.abort()
-                    cn.sync()
-                else:
-                    raise
-            except:
-                display(tree)
-                raise
-
-    def _check_threads(self, tree, *threads):
-        # Make sure the thread's view of the world is consistent with
-        # the actual database state.
-        expected_keys = []
-        errormsgs = []
-        err = errormsgs.append
-        for t in threads:
-            if not t.added_keys:
-                err("thread %d didn't add any keys" % t.threadnum)
-            expected_keys.extend(t.added_keys)
-        expected_keys.sort()
-        actual_keys = list(tree.keys())
-        if expected_keys != actual_keys:
-            err("expected keys != actual keys")
-            for k in expected_keys:
-                if k not in actual_keys:
-                    err("key %s expected but not in tree" % k)
-            for k in actual_keys:
-                if k not in expected_keys:
-                    err("key %s in tree but not expected" % k)
-        if errormsgs:
-            display(tree)
-            self.fail('\n'.join(errormsgs))
-
-    def go(self, stop, commitdict, *threads):
-        # Run the threads
-        for t in threads:
-            t.start()
-        delay = self.MINTIME
-        start = time.time()
-        while time.time() - start <= self.MAXTIME:
-            stop.wait(delay)
-            if stop.isSet():
-                # Some thread failed.  Stop right now.
-                break
-            delay = 2.0
-            if len(commitdict) >= len(threads):
-                break
-            # Some thread still hasn't managed to commit anything.
-        stop.set()
-        # Give all the threads some time to stop before trying to clean up.
-        # cleanup() will cause the test to fail if some thread ended with
-        # an uncaught exception, and unittest will call the base class
-        # tearDown then immediately, but if other threads are still
-        # running that can lead to a cascade of spurious exceptions.
-        for t in threads:
-            t.join(10)
-        for t in threads:
-            t.cleanup()
-
-    def checkConcurrentUpdates2Storages_emulated(self):
-        self._storage = storage1 = self.openClientStorage()
-        storage2 = self.openClientStorage()
-        db1 = DB(storage1)
-        db2 = DB(storage2)
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        transaction.commit()
-        # DM: allow time for invalidations to come in and process them
-        time.sleep(0.1)
-
-        # Run two threads that update the BTree
-        t1 = StressTask(db1, 1, 1,)
-        t2 = StressTask(db2, 2, 2,)
-        _runTasks(100, t1, t2)
-
-        cn.sync()
-        self._check_tree(cn, tree)
-        self._check_threads(tree, t1, t2)
-
-        cn.close()
-        db1.close()
-        db2.close()
-
-    def checkConcurrentUpdates2Storages(self):
-        self._storage = storage1 = self.openClientStorage()
-        storage2 = self.openClientStorage()
-        db1 = DB(storage1)
-        db2 = DB(storage2)
-        stop = threading.Event()
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        transaction.commit()
-        cn.close()
-
-        # Run two threads that update the BTree
-        cd = {}
-        t1 = self.StressThread(db1, stop, 1, cd, 1)
-        t2 = self.StressThread(db2, stop, 2, cd, 2)
-        self.go(stop, cd, t1, t2)
-
-        while db1.lastTransaction() != db2.lastTransaction():
-            db1._storage.sync()
-            db2._storage.sync()
-
-        cn = db1.open()
-        tree = cn.root()["tree"]
-        self._check_tree(cn, tree)
-        self._check_threads(tree, t1, t2)
-
-        cn.close()
-        db1.close()
-        db2.close()
-
-    def checkConcurrentUpdates1Storage(self):
-        self._storage = storage1 = self.openClientStorage()
-        db1 = DB(storage1)
-        stop = threading.Event()
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        transaction.commit()
-        cn.close()
-
-        # Run two threads that update the BTree
-        cd = {}
-        t1 = self.StressThread(db1, stop, 1, cd, 1, sleep=0.01)
-        t2 = self.StressThread(db1, stop, 2, cd, 2, sleep=0.01)
-        self.go(stop, cd, t1, t2)
-
-        cn = db1.open()
-        tree = cn.root()["tree"]
-        self._check_tree(cn, tree)
-        self._check_threads(tree, t1, t2)
-
-        cn.close()
-        db1.close()
-
-    def checkConcurrentUpdates2StoragesMT(self):
-        self._storage = storage1 = self.openClientStorage()
-        db1 = DB(storage1)
-        db2 = DB(self.openClientStorage())
-        stop = threading.Event()
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        transaction.commit()
-        cn.close()
-
-        # Run three threads that update the BTree.
-        # Two of the threads share a single storage so that it
-        # is possible for both threads to read the same object
-        # at the same time.
-
-        cd = {}
-        t1 = self.StressThread(db1, stop, 1, cd, 1, 3)
-        t2 = self.StressThread(db2, stop, 2, cd, 2, 3, 0.01)
-        t3 = self.StressThread(db2, stop, 3, cd, 3, 3, 0.01)
-        self.go(stop, cd, t1, t2, t3)
-
-        while db1.lastTransaction() != db2.lastTransaction():
-            db1._storage.sync()
-            db2._storage.sync()
-
-
-        cn = db1.open()
-        tree = cn.root()["tree"]
-        self._check_tree(cn, tree)
-        self._check_threads(tree, t1, t2, t3)
-
-        cn.close()
-        db1.close()
-        db2.close()
-
-    # TODO:  Temporarily disabled.  I know it fails, and there's no point
-    # getting an endless number of reports about that.
-    def xxxcheckConcurrentUpdatesInVersions(self):
-        self._storage = storage1 = self.openClientStorage()
-        db1 = DB(storage1)
-        db2 = DB(self.openClientStorage())
-        stop = threading.Event()
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        transaction.commit()
-        cn.close()
-
-        # Run three threads that update the BTree.
-        # Two of the threads share a single storage so that it
-        # is possible for both threads to read the same object
-        # at the same time.
-
-        cd = {}
-        t1 = VersionStressThread(db1, stop, 1, cd, 1, 3)
-        t2 = VersionStressThread(db2, stop, 2, cd, 2, 3, 0.01)
-        t3 = VersionStressThread(db2, stop, 3, cd, 3, 3, 0.01)
-        self.go(stop, cd, t1, t2, t3)
-
-        while db1.lastTransaction() != db2.lastTransaction():
-            db1._storage.sync()
-            db2._storage.sync()
-
-
-        cn = db1.open()
-        tree = cn.root()["tree"]
-        self._check_tree(cn, tree)
-        self._check_threads(tree, t1, t2, t3)
-
-        cn.close()
-        db1.close()
-        db2.close()
-
-    def checkConcurrentLargeUpdates(self):
-        # Use 3 threads like the 2StorageMT test above.
-        self._storage = storage1 = self.openClientStorage()
-        db1 = DB(storage1)
-        db2 = DB(self.openClientStorage())
-        stop = threading.Event()
-
-        cn = db1.open()
-        tree = cn.root()["tree"] = OOBTree()
-        for i in range(0, 3000, 2):
-            tree[i] = 0
-        transaction.commit()
-        cn.close()
-
-        # Run three threads that update the BTree.
-        # Two of the threads share a single storage so that it
-        # is possible for both threads to read the same object
-        # at the same time.
-
-        cd = {}
-        t1 = LargeUpdatesThread(db1, stop, 1, cd, 1, 3, 0.02)
-        t2 = LargeUpdatesThread(db2, stop, 2, cd, 2, 3, 0.01)
-        t3 = LargeUpdatesThread(db2, stop, 3, cd, 3, 3, 0.01)
-        self.go(stop, cd, t1, t2, t3)
-
-        while db1.lastTransaction() != db2.lastTransaction():
-            db1._storage.sync()
-            db2._storage.sync()
-
-        cn = db1.open()
-        tree = cn.root()["tree"]
-        self._check_tree(cn, tree)
-
-        # Purge the tree of the dummy entries mapping to 0.
-        losers = [k for k, v in tree.items() if v == 0]
-        for k in losers:
-            del tree[k]
-        transaction.commit()
-
-        self._check_threads(tree, t1, t2, t3)
-
-        cn.close()
-        db1.close()
-        db2.close()
diff --git a/branches/bug1734/src/ZEO/tests/TestThread.py b/branches/bug1734/src/ZEO/tests/TestThread.py
deleted file mode 100644
index 9b3aa6f5..00000000
--- a/branches/bug1734/src/ZEO/tests/TestThread.py
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A Thread base class for use with unittest."""
-
-import threading
-import sys
-
-class TestThread(threading.Thread):
-    """Base class for defining threads that run from unittest.
-
-    The subclass should define a testrun() method instead of a run()
-    method.
-
-    Call cleanup() when the test is done with the thread, instead of join().
-    If the thread exits with an uncaught exception, it's captured and
-    re-raised when cleanup() is called.  cleanup() should be called by
-    the main thread!  Trying to tell unittest that a test failed from
-    another thread creates a nightmare of timing-depending cascading
-    failures and missed errors (tracebacks that show up on the screen,
-    but don't cause unittest to believe the test failed).
-
-    cleanup() also joins the thread.  If the thread ended without raising
-    an uncaught exception, and the join doesn't succeed in the timeout
-    period, then the test is made to fail with a "Thread still alive"
-    message.
-    """
-
-    def __init__(self):
-        threading.Thread.__init__(self)
-        # In case this thread hangs, don't stop Python from exiting.
-        self.setDaemon(1)
-        self._exc_info = None
-
-    def run(self):
-        try:
-            self.testrun()
-        except:
-            self._exc_info = sys.exc_info()
-
-    def cleanup(self, timeout=15):
-        self.join(timeout)
-        if self._exc_info:
-            raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
-        if self.isAlive():
-            self._testcase.fail("Thread did not finish: %s" % self)
diff --git a/branches/bug1734/src/ZEO/tests/ThreadTests.py b/branches/bug1734/src/ZEO/tests/ThreadTests.py
deleted file mode 100644
index 41e8b919..00000000
--- a/branches/bug1734/src/ZEO/tests/ThreadTests.py
+++ /dev/null
@@ -1,132 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Compromising positions involving threads."""
-
-import threading
-
-import transaction
-from ZODB.tests.StorageTestBase import zodb_pickle, MinPO
-import ZEO.ClientStorage
-
-ZERO = '\0'*8
-
-class BasicThread(threading.Thread):
-    def __init__(self, storage, doNextEvent, threadStartedEvent):
-        self.storage = storage
-        self.trans = transaction.Transaction()
-        self.doNextEvent = doNextEvent
-        self.threadStartedEvent = threadStartedEvent
-        self.gotValueError = 0
-        self.gotDisconnected = 0
-        threading.Thread.__init__(self)
-        self.setDaemon(1)
-
-    def join(self):
-        threading.Thread.join(self, 10)
-        assert not self.isAlive()
-
-
-class GetsThroughVoteThread(BasicThread):
-    # This thread gets partially through a transaction before it turns
-    # execution over to another thread.  We're trying to establish that a
-    # tpc_finish() after a storage has been closed by another thread will get
-    # a ClientStorageError error.
-    #
-    # This class gets does a tpc_begin(), store(), tpc_vote() and is waiting
-    # to do the tpc_finish() when the other thread closes the storage.
-    def run(self):
-        self.storage.tpc_begin(self.trans)
-        oid = self.storage.new_oid()
-        self.storage.store(oid, ZERO, zodb_pickle(MinPO("c")), '', self.trans)
-        self.storage.tpc_vote(self.trans)
-        self.threadStartedEvent.set()
-        self.doNextEvent.wait(10)
-        try:
-            self.storage.tpc_finish(self.trans)
-        except ZEO.ClientStorage.ClientStorageError:
-            self.gotValueError = 1
-            self.storage.tpc_abort(self.trans)
-
-
-class GetsThroughBeginThread(BasicThread):
-    # This class is like the above except that it is intended to be run when
-    # another thread is already in a tpc_begin().  Thus, this thread will
-    # block in the tpc_begin until another thread closes the storage.  When
-    # that happens, this one will get disconnected too.
-    def run(self):
-        try:
-            self.storage.tpc_begin(self.trans)
-        except ZEO.ClientStorage.ClientStorageError:
-            self.gotValueError = 1
-
-
-class ThreadTests:
-    # Thread 1 should start a transaction, but not get all the way through it.
-    # Main thread should close the connection.  Thread 1 should then get
-    # disconnected.
-    def checkDisconnectedOnThread2Close(self):
-        doNextEvent = threading.Event()
-        threadStartedEvent = threading.Event()
-        thread1 = GetsThroughVoteThread(self._storage,
-                                        doNextEvent, threadStartedEvent)
-        thread1.start()
-        threadStartedEvent.wait(10)
-        self._storage.close()
-        doNextEvent.set()
-        thread1.join()
-        self.assertEqual(thread1.gotValueError, 1)
-
-    # Thread 1 should start a transaction, but not get all the way through
-    # it.  While thread 1 is in the middle of the transaction, a second thread
-    # should start a transaction, and it will block in the tcp_begin() --
-    # because thread 1 has acquired the lock in its tpc_begin().  Now the main
-    # thread closes the storage and both sub-threads should get disconnected.
-    def checkSecondBeginFails(self):
-        doNextEvent = threading.Event()
-        threadStartedEvent = threading.Event()
-        thread1 = GetsThroughVoteThread(self._storage,
-                                        doNextEvent, threadStartedEvent)
-        thread2 = GetsThroughBeginThread(self._storage,
-                                         doNextEvent, threadStartedEvent)
-        thread1.start()
-        threadStartedEvent.wait(1)
-        thread2.start()
-        self._storage.close()
-        doNextEvent.set()
-        thread1.join()
-        thread2.join()
-        self.assertEqual(thread1.gotValueError, 1)
-        self.assertEqual(thread2.gotValueError, 1)
-
-    # Run a bunch of threads doing small and large stores in parallel
-    def checkMTStores(self):
-        threads = []
-        for i in range(5):
-            t = threading.Thread(target=self.mtstorehelper)
-            threads.append(t)
-            t.start()
-        for t in threads:
-            t.join(30)
-        for i in threads:
-            self.failUnless(not t.isAlive())
-
-    # Helper for checkMTStores
-    def mtstorehelper(self):
-        name = threading.currentThread().getName()
-        objs = []
-        for i in range(10):
-            objs.append(MinPO("X" * 200000))
-            objs.append(MinPO("X"))
-        for obj in objs:
-            self._dostore(data=obj)
diff --git a/branches/bug1734/src/ZEO/tests/__init__.py b/branches/bug1734/src/ZEO/tests/__init__.py
deleted file mode 100644
index 43cf0e3b..00000000
--- a/branches/bug1734/src/ZEO/tests/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
diff --git a/branches/bug1734/src/ZEO/tests/auth_plaintext.py b/branches/bug1734/src/ZEO/tests/auth_plaintext.py
deleted file mode 100644
index 50d4bbde..00000000
--- a/branches/bug1734/src/ZEO/tests/auth_plaintext.py
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Implements plaintext password authentication. The password is stored in
-an SHA hash in the Database. The client sends over the plaintext
-password, and the SHA hashing is done on the server side.
-
-This mechanism offers *no network security at all*; the only security
-is provided by not storing plaintext passwords on disk.
-"""
-
-import sha
-
-from ZEO.StorageServer import ZEOStorage
-from ZEO.auth import register_module
-from ZEO.auth.base import Client, Database
-
-def session_key(username, realm, password):
-    return sha.new("%s:%s:%s" % (username, realm, password)).hexdigest()
-
-class StorageClass(ZEOStorage):
-
-    def auth(self, username, password):
-        try:
-            dbpw = self.database.get_password(username)
-        except LookupError:
-            return 0
-
-        password_dig = sha.new(password).hexdigest()
-        if dbpw == password_dig:
-            self.connection.setSessionKey(session_key(username,
-                                                      self.database.realm,
-                                                      password))
-        return self.finish_auth(dbpw == password_dig)
-
-class PlaintextClient(Client):
-    extensions = ["auth"]
-
-    def start(self, username, realm, password):
-        if self.stub.auth(username, password):
-            return session_key(username, realm, password)
-        else:
-            return None
-
-register_module("plaintext", StorageClass, PlaintextClient, Database)
diff --git a/branches/bug1734/src/ZEO/tests/deadlock.py b/branches/bug1734/src/ZEO/tests/deadlock.py
deleted file mode 100644
index 5516c9b7..00000000
--- a/branches/bug1734/src/ZEO/tests/deadlock.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import ZODB
-from ZODB.POSException import ConflictError
-from ZEO.ClientStorage import ClientStorage, ClientDisconnected
-from ZEO.zrpc.error import DisconnectedError
-
-import os
-import random
-import time
-
-L = range(1, 100)
-
-def main():
-    z1 = ClientStorage(('localhost', 2001), wait=1)
-    z2 = ClientStorage(('localhost', 2002), wait=2)
-    db1 = ZODB.DB(z1)
-    db2 = ZODB.DB(z2)
-    c1 = db1.open()
-    c2 = db2.open()
-    r1 = c1.root()
-    r2 = c2.root()
-
-    while 1:
-        try:
-            try:
-                update(r1, r2)
-            except ConflictError, msg:
-                print msg
-                transaction.abort()
-                c1.sync()
-                c2.sync()
-        except (ClientDisconnected, DisconnectedError), err:
-            print "disconnected", err
-            time.sleep(2)
-
-def update(r1, r2):
-    k1 = random.choice(L)
-    k2 = random.choice(L)
-
-    updates = [(k1, r1),
-               (k2, r2)]
-    random.shuffle(updates)
-    for key, root in updates:
-        root[key] = time.time()
-    transaction.commit()
-    print os.getpid(), k1, k2
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/tests/forker.py b/branches/bug1734/src/ZEO/tests/forker.py
deleted file mode 100644
index c3bbeaf7..00000000
--- a/branches/bug1734/src/ZEO/tests/forker.py
+++ /dev/null
@@ -1,192 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Library for forking storage server and connecting client storage"""
-
-import os
-import sys
-import time
-import errno
-import socket
-import logging
-import StringIO
-import tempfile
-import logging
-
-logger = logging.getLogger('ZEO.tests.forker')
-
-class ZEOConfig:
-    """Class to generate ZEO configuration file. """
-
-    def __init__(self, addr):
-        self.address = addr
-        self.read_only = None
-        self.invalidation_queue_size = None
-        self.monitor_address = None
-        self.transaction_timeout = None
-        self.authentication_protocol = None
-        self.authentication_database = None
-        self.authentication_realm = None
-
-    def dump(self, f):
-        print >> f, "<zeo>"
-        print >> f, "address %s:%s" % self.address
-        if self.read_only is not None:
-            print >> f, "read-only", self.read_only and "true" or "false"
-        if self.invalidation_queue_size is not None:
-            print >> f, "invalidation-queue-size", self.invalidation_queue_size
-        if self.monitor_address is not None:
-            print >> f, "monitor-address %s:%s" % self.monitor_address
-        if self.transaction_timeout is not None:
-            print >> f, "transaction-timeout", self.transaction_timeout
-        if self.authentication_protocol is not None:
-            print >> f, "authentication-protocol", self.authentication_protocol
-        if self.authentication_database is not None:
-            print >> f, "authentication-database", self.authentication_database
-        if self.authentication_realm is not None:
-            print >> f, "authentication-realm", self.authentication_realm
-        print >> f, "</zeo>"
-
-        logger = logging.getLogger()
-        print >> f
-        print >> f, "<eventlog>"
-        print >> f, "level", logger.level
-        for handler in logger.handlers:
-            if isinstance(handler, logging.FileHandler):
-                path = handler.baseFilename
-            elif isinstance(handler, logging.StreamHandler):
-                stream = handler.stream
-                if stream.name == "<stdout>":
-                    path = "STDOUT"
-                elif stream.name == "<stderr>":
-                    path = "STDERR"
-                else:
-                    # just drop it on the floor; unlikely an issue when testing
-                    continue
-            else:
-                # just drop it on the floor; unlikely an issue when testing
-                continue
-            # This doesn't convert the level values to names, so the
-            # generated configuration isn't as nice as it could be,
-            # but it doesn't really need to be.
-            print >> f, "<logfile>"
-            print >> f, "level", handler.level
-            print >> f, "path ", path
-            if handler.formatter:
-                formatter = handler.formatter
-                if formatter._fmt:
-                    print >> f, "format", encode_format(formatter._fmt)
-                if formatter.datefmt:
-                    print >> f, "dateformat", encode_format(formatter.datefmt)
-            print >> f, "</logfile>"
-        print >> f, "</eventlog>"
-
-    def __str__(self):
-        f = StringIO.StringIO()
-        self.dump(f)
-        return f.getvalue()
-
-
-def encode_format(fmt):
-    # The list of replacements mirrors
-    # ZConfig.components.logger.handlers._control_char_rewrites
-    for xform in (("\n", r"\n"), ("\t", r"\t"), ("\b", r"\b"),
-                  ("\f", r"\f"), ("\r", r"\r")):
-        fmt = fmt.replace(*xform)
-    return fmt
-
-
-def start_zeo_server(storage_conf, zeo_conf, port, keep=0):
-    """Start a ZEO server in a separate process.
-
-    Takes two positional arguments a string containing the storage conf
-    and a ZEOConfig object.
-
-    Returns the ZEO port, the test server port, the pid, and the path
-    to the config file.
-    """
-
-    # Store the config info in a temp file.
-    tmpfile = tempfile.mktemp(".conf")
-    fp = open(tmpfile, 'w')
-    zeo_conf.dump(fp)
-    fp.write(storage_conf)
-    fp.close()
-
-    # Find the zeoserver script
-    import ZEO.tests.zeoserver
-    script = ZEO.tests.zeoserver.__file__
-    if script.endswith('.pyc'):
-        script = script[:-1]
-
-    # Create a list of arguments, which we'll tuplify below
-    qa = _quote_arg
-    args = [qa(sys.executable), qa(script), '-C', qa(tmpfile)]
-    if keep:
-        args.append("-k")
-    d = os.environ.copy()
-    d['PYTHONPATH'] = os.pathsep.join(sys.path)
-    pid = os.spawnve(os.P_NOWAIT, sys.executable, tuple(args), d)
-    adminaddr = ('localhost', port + 1)
-    # We need to wait until the server starts, but not forever.
-    # 30 seconds is a somewhat arbitrary upper bound.  A BDBStorage
-    # takes a long time to open -- more than 10 seconds on occasion.
-    for i in range(120):
-        time.sleep(0.25)
-        try:
-            logger.debug('connect %s', i)
-            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            s.connect(adminaddr)
-            ack = s.recv(1024)
-            s.close()
-            logging.debug('acked: %s' % ack)
-            break
-        except socket.error, e:
-            if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
-                raise
-            s.close()
-    else:
-        logging.debug('boo hoo')
-        raise
-    return ('localhost', port), adminaddr, pid, tmpfile
-
-
-if sys.platform[:3].lower() == "win":
-    def _quote_arg(s):
-        return '"%s"' % s
-else:
-    def _quote_arg(s):
-        return s
-
-
-def shutdown_zeo_server(adminaddr):
-    # Do this in a loop to guard against the possibility that the
-    # client failed to connect to the adminaddr earlier.  That really
-    # only requires two iterations, but do a third for pure
-    # superstition.
-    for i in range(3):
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        try:
-            s.connect(adminaddr)
-        except socket.error, e:
-            if e[0] == errno.ECONNREFUSED and i > 0:
-                break
-            raise
-        try:
-            ack = s.recv(1024)
-        except socket.error, e:
-            if e[0] == errno.ECONNRESET:
-                raise
-            ack = 'no ack received'
-        logger.debug('shutdown_zeo_server(): acked: %s' % ack)
-        s.close()
diff --git a/branches/bug1734/src/ZEO/tests/multi.py b/branches/bug1734/src/ZEO/tests/multi.py
deleted file mode 100644
index 6aeeb3b6..00000000
--- a/branches/bug1734/src/ZEO/tests/multi.py
+++ /dev/null
@@ -1,158 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A multi-client test of the ZEO storage server"""
-# TODO:  This code is currently broken.
-
-import ZODB, ZODB.DB, ZODB.FileStorage, ZODB.POSException
-import persistent
-import persistent.mapping
-import transaction
-from ZEO.tests import forker
-
-import os
-import tempfile
-import time
-import types
-
-VERBOSE = 1
-CLIENTS = 4
-RECORDS_PER_CLIENT = 100
-CONFLICT_DELAY = 0.1
-CONNECT_DELAY = 0.1
-CLIENT_CACHE = '' # use temporary cache
-
-class Record(persistent.Persistent):
-    def __init__(self, client=None, value=None):
-        self.client = client
-        self.value = None
-        self.next = None
-
-    def set_next(self, next):
-        self.next = next
-
-class Stats(persistent.Persistent):
-    def __init__(self):
-        self.begin = time.time()
-        self.end = None
-
-    def done(self):
-        self.end = time.time()
-
-def init_storage():
-    path = tempfile.mktemp()
-    if VERBOSE:
-        print "FileStorage path:", path
-    fs = ZODB.FileStorage.FileStorage(path)
-
-    db = ZODB.DB(fs)
-    root = db.open().root()
-    root["multi"] = persistent.mapping.PersistentMapping()
-    transaction.commit()
-
-    return fs
-
-def start_server(addr):
-    storage = init_storage()
-    pid, exit = forker.start_zeo_server(storage, addr)
-    return pid, exit
-
-def start_client(addr, client_func=None):
-    pid = os.fork()
-    if pid == 0:
-        try:
-            import ZEO.ClientStorage
-            if VERBOSE:
-                print "Client process started:", os.getpid()
-            cli = ZEO.ClientStorage.ClientStorage(addr, client=CLIENT_CACHE)
-            if client_func is None:
-                run(cli)
-            else:
-                client_func(cli)
-            cli.close()
-        finally:
-            os._exit(0)
-    else:
-        return pid
-
-def run(storage):
-    if hasattr(storage, 'is_connected'):
-        while not storage.is_connected():
-            time.sleep(CONNECT_DELAY)
-    pid = os.getpid()
-    print "Client process connected:", pid, storage
-    db = ZODB.DB(storage)
-    root = db.open().root()
-    while 1:
-        try:
-            s = root[pid] = Stats()
-            transaction.commit()
-        except ZODB.POSException.ConflictError:
-            transaction.abort()
-            time.sleep(CONFLICT_DELAY)
-        else:
-            break
-
-    dict = root["multi"]
-    prev = None
-    i = 0
-    while i < RECORDS_PER_CLIENT:
-        try:
-            size = len(dict)
-            r = dict[size] = Record(pid, size)
-            if prev:
-                prev.set_next(r)
-            transaction.commit()
-        except ZODB.POSException.ConflictError, err:
-            transaction.abort()
-            time.sleep(CONFLICT_DELAY)
-        else:
-            i = i + 1
-            if VERBOSE and (i < 5 or i % 10 == 0):
-                print "Client %s: %s of %s" % (pid, i, RECORDS_PER_CLIENT)
-    s.done()
-    transaction.commit()
-
-    print "Client completed:", pid
-
-def main(client_func=None):
-    if VERBOSE:
-        print "Main process:", os.getpid()
-    addr = tempfile.mktemp()
-    t0 = time.time()
-    server_pid, server = start_server(addr)
-    t1 = time.time()
-    pids = []
-    for i in range(CLIENTS):
-        pids.append(start_client(addr, client_func))
-    for pid in pids:
-        assert type(pid) == types.IntType, "invalid pid type: %s (%s)" % \
-               (repr(pid), type(pid))
-        try:
-            if VERBOSE:
-                print "waitpid(%s)" % repr(pid)
-            os.waitpid(pid, 0)
-        except os.error, err:
-            print "waitpid(%s) failed: %s" % (repr(pid), err)
-    t2 = time.time()
-    server.close()
-    os.waitpid(server_pid, 0)
-
-    # TODO:  Should check that the results are consistent!
-
-    print "Total time:", t2 - t0
-    print "Server start time", t1 - t0
-    print "Client time:", t2 - t1
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/tests/speed.py b/branches/bug1734/src/ZEO/tests/speed.py
deleted file mode 100644
index 72be8eb0..00000000
--- a/branches/bug1734/src/ZEO/tests/speed.py
+++ /dev/null
@@ -1,215 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-usage="""Test speed of a ZODB storage
-
-Options:
-
-    -d file    The data file to use as input.
-               The default is this script.
-
-    -n n       The number of repititions
-
-    -s module  A module that defines a 'Storage'
-               attribute, which is an open storage.
-               If not specified, a FileStorage will ne
-               used.
-
-    -z         Test compressing data
-
-    -D         Run in debug mode
-
-    -L         Test loads as well as stores by minimizing
-               the cache after eachrun
-
-    -M         Output means only
-
-    -C         Run with a persistent client cache
-
-    -U         Run ZEO using a Unix domain socket
-
-    -t n       Number of concurrent threads to run.
-"""
-
-import asyncore
-import sys, os, getopt, time
-##sys.path.insert(0, os.getcwd())
-
-import persistent
-import transaction
-import ZODB
-from ZODB.POSException import ConflictError
-from ZEO.tests import forker
-
-class P(persistent.Persistent):
-    pass
-
-fs_name = "zeo-speed.fs"
-
-class ZEOExit(asyncore.file_dispatcher):
-    """Used to exit ZEO.StorageServer when run is done"""
-    def writable(self):
-        return 0
-    def readable(self):
-        return 1
-    def handle_read(self):
-        buf = self.recv(4)
-        assert buf == "done"
-        self.delete_fs()
-        os._exit(0)
-    def handle_close(self):
-        print "Parent process exited unexpectedly"
-        self.delete_fs()
-        os._exit(0)
-    def delete_fs(self):
-        os.unlink(fs_name)
-        os.unlink(fs_name + ".lock")
-        os.unlink(fs_name + ".tmp")
-
-def work(db, results, nrep, compress, data, detailed, minimize, threadno=None):
-    for j in range(nrep):
-        for r in 1, 10, 100, 1000:
-            t = time.time()
-            conflicts = 0
-
-            jar = db.open()
-            while 1:
-                try:
-                    transaction.begin()
-                    rt = jar.root()
-                    key = 's%s' % r
-                    if rt.has_key(key):
-                        p = rt[key]
-                    else:
-                        rt[key] = p =P()
-                    for i in range(r):
-                        v = getattr(p, str(i), P())
-                        if compress is not None:
-                            v.d = compress(data)
-                        else:
-                            v.d = data
-                        setattr(p, str(i), v)
-                    transaction.commit()
-                except ConflictError:
-                    conflicts = conflicts + 1
-                else:
-                    break
-            jar.close()
-
-            t = time.time() - t
-            if detailed:
-                if threadno is None:
-                    print "%s\t%s\t%.4f\t%d" % (j, r, t, conflicts)
-                else:
-                    print "%s\t%s\t%.4f\t%d\t%d" % (j, r, t, conflicts,
-                                                    threadno)
-            results[r].append((t, conflicts))
-            rt=d=p=v=None # release all references
-            if minimize:
-                time.sleep(3)
-                jar.cacheMinimize()
-
-def main(args):
-    opts, args = getopt.getopt(args, 'zd:n:Ds:LMt:U')
-    s = None
-    compress = None
-    data=sys.argv[0]
-    nrep=5
-    minimize=0
-    detailed=1
-    cache = None
-    domain = 'AF_INET'
-    threads = 1
-    for o, v in opts:
-        if o=='-n': nrep = int(v)
-        elif o=='-d': data = v
-        elif o=='-s': s = v
-        elif o=='-z':
-            import zlib
-            compress = zlib.compress
-        elif o=='-L':
-            minimize=1
-        elif o=='-M':
-            detailed=0
-        elif o=='-D':
-            global debug
-            os.environ['STUPID_LOG_FILE']=''
-            os.environ['STUPID_LOG_SEVERITY']='-999'
-            debug = 1
-        elif o == '-C':
-            cache = 'speed'
-        elif o == '-U':
-            domain = 'AF_UNIX'
-        elif o == '-t':
-            threads = int(v)
-
-    zeo_pipe = None
-    if s:
-        s = __import__(s, globals(), globals(), ('__doc__',))
-        s = s.Storage
-        server = None
-    else:
-        s, server, pid = forker.start_zeo("FileStorage",
-                                          (fs_name, 1), domain=domain)
-
-    data=open(data).read()
-    db=ZODB.DB(s,
-               # disable cache deactivation
-               cache_size=4000,
-               cache_deactivate_after=6000,)
-
-    print "Beginning work..."
-    results={1:[], 10:[], 100:[], 1000:[]}
-    if threads > 1:
-        import threading
-        l = []
-        for i in range(threads):
-            t = threading.Thread(target=work,
-                                 args=(db, results, nrep, compress, data,
-                                       detailed, minimize, i))
-            l.append(t)
-        for t in l:
-            t.start()
-        for t in l:
-            t.join()
-
-    else:
-        work(db, results, nrep, compress, data, detailed, minimize)
-
-    if server is not None:
-        server.close()
-        os.waitpid(pid, 0)
-
-    if detailed:
-        print '-'*24
-    print "num\tmean\tmin\tmax"
-    for r in 1, 10, 100, 1000:
-        times = []
-        for time, conf in results[r]:
-            times.append(time)
-        t = mean(times)
-        print "%d\t%.4f\t%.4f\t%.4f" % (r, t, min(times), max(times))
-
-def mean(l):
-    tot = 0
-    for v in l:
-        tot = tot + v
-    return tot / len(l)
-
-##def compress(s):
-##    c = zlib.compressobj()
-##    o = c.compress(s)
-##    return o + c.flush()
-
-if __name__=='__main__':
-    main(sys.argv[1:])
diff --git a/branches/bug1734/src/ZEO/tests/stress.py b/branches/bug1734/src/ZEO/tests/stress.py
deleted file mode 100644
index e8a58042..00000000
--- a/branches/bug1734/src/ZEO/tests/stress.py
+++ /dev/null
@@ -1,137 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A ZEO client-server stress test to look for leaks.
-
-The stress test should run in an infinite loop and should involve
-multiple connections.
-"""
-# TODO: This code is currently broken.
-
-import transaction
-import ZODB
-from ZODB.MappingStorage import MappingStorage
-from ZODB.tests import MinPO
-from ZEO.ClientStorage import ClientStorage
-from ZEO.tests import forker
-
-import os
-import random
-import types
-
-NUM_TRANSACTIONS_PER_CONN = 10
-NUM_CONNECTIONS = 10
-NUM_ROOTS = 20
-MAX_DEPTH = 20
-MIN_OBJSIZE = 128
-MAX_OBJSIZE = 2048
-
-def an_object():
-    """Return an object suitable for a PersistentMapping key"""
-    size = random.randrange(MIN_OBJSIZE, MAX_OBJSIZE)
-    if os.path.exists("/dev/urandom"):
-        f = open("/dev/urandom")
-        buf = f.read(size)
-        f.close()
-        return buf
-    else:
-        f = open(MinPO.__file__)
-        l = list(f.read(size))
-        f.close()
-        random.shuffle(l)
-        return "".join(l)
-
-def setup(cn):
-    """Initialize the database with some objects"""
-    root = cn.root()
-    for i in range(NUM_ROOTS):
-        prev = an_object()
-        for j in range(random.randrange(1, MAX_DEPTH)):
-            o = MinPO.MinPO(prev)
-            prev = o
-        root[an_object()] = o
-        transaction.commit()
-    cn.close()
-
-def work(cn):
-    """Do some work with a transaction"""
-    cn.sync()
-    root = cn.root()
-    obj = random.choice(root.values())
-    # walk down to the bottom
-    while not isinstance(obj.value, types.StringType):
-        obj = obj.value
-    obj.value = an_object()
-    transaction.commit()
-
-def main():
-    # Yuck!  Need to cleanup forker so that the API is consistent
-    # across Unix and Windows, at least if that's possible.
-    if os.name == "nt":
-        zaddr, tport, pid = forker.start_zeo_server('MappingStorage', ())
-        def exitserver():
-            import socket
-            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            s.connect(tport)
-            s.close()
-    else:
-        zaddr = '', random.randrange(20000, 30000)
-        pid, exitobj = forker.start_zeo_server(MappingStorage(), zaddr)
-        def exitserver():
-            exitobj.close()
-
-    while 1:
-        pid = start_child(zaddr)
-        print "started", pid
-        os.waitpid(pid, 0)
-
-    exitserver()
-
-def start_child(zaddr):
-
-    pid = os.fork()
-    if pid != 0:
-        return pid
-    try:
-        _start_child(zaddr)
-    finally:
-        os._exit(0)
-
-def _start_child(zaddr):
-    storage = ClientStorage(zaddr, debug=1, min_disconnect_poll=0.5, wait=1)
-    db = ZODB.DB(storage, pool_size=NUM_CONNECTIONS)
-    setup(db.open())
-    conns = []
-    conn_count = 0
-
-    for i in range(NUM_CONNECTIONS):
-        c = db.open()
-        c.__count = 0
-        conns.append(c)
-        conn_count += 1
-
-    while conn_count < 25:
-        c = random.choice(conns)
-        if c.__count > NUM_TRANSACTIONS_PER_CONN:
-            conns.remove(c)
-            c.close()
-            conn_count += 1
-            c = db.open()
-            c.__count = 0
-            conns.append(c)
-        else:
-            c.__count += 1
-        work(c)
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/tests/testAuth.py b/branches/bug1734/src/ZEO/tests/testAuth.py
deleted file mode 100644
index 74459fff..00000000
--- a/branches/bug1734/src/ZEO/tests/testAuth.py
+++ /dev/null
@@ -1,134 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Test suite for AuthZEO."""
-
-import os
-import tempfile
-import time
-import unittest
-
-from ZEO import zeopasswd
-from ZEO.Exceptions import ClientDisconnected
-from ZEO.tests.ConnectionTests import CommonSetupTearDown
-
-class AuthTest(CommonSetupTearDown):
-    __super_getServerConfig = CommonSetupTearDown.getServerConfig
-    __super_setUp = CommonSetupTearDown.setUp
-    __super_tearDown = CommonSetupTearDown.tearDown
-
-    realm = None
-
-    def setUp(self):
-        self.pwfile = tempfile.mktemp()
-        if self.realm:
-            self.pwdb = self.dbclass(self.pwfile, self.realm)
-        else:
-            self.pwdb = self.dbclass(self.pwfile)
-        self.pwdb.add_user("foo", "bar")
-        self.pwdb.save()
-        self._checkZEOpasswd()
-        self.__super_setUp()
-
-    def _checkZEOpasswd(self):
-        args = ["-f", self.pwfile, "-p", self.protocol]
-        if self.protocol == "plaintext":
-            from ZEO.auth.base import Database
-            zeopasswd.main(args + ["-d", "foo"], Database)
-            zeopasswd.main(args + ["foo", "bar"], Database)
-        else:
-            zeopasswd.main(args + ["-d", "foo"])
-            zeopasswd.main(args + ["foo", "bar"])
-
-    def tearDown(self):
-        self.__super_tearDown()
-        os.remove(self.pwfile)
-
-    def getConfig(self, path, create, read_only):
-        return "<mappingstorage 1/>"
-
-    def getServerConfig(self, addr, ro_svr):
-        zconf = self.__super_getServerConfig(addr, ro_svr)
-        zconf.authentication_protocol = self.protocol
-        zconf.authentication_database = self.pwfile
-        zconf.authentication_realm = self.realm
-        return zconf
-
-    def wait(self):
-        for i in range(25):
-            time.sleep(0.1)
-            if self._storage.test_connection:
-                return
-        self.fail("Timed out waiting for client to authenticate")
-
-    def testOK(self):
-        # Sleep for 0.2 seconds to give the server some time to start up
-        # seems to be needed before and after creating the storage
-        self._storage = self.openClientStorage(wait=0, username="foo",
-                                              password="bar", realm=self.realm)
-        self.wait()
-
-        self.assert_(self._storage._connection)
-        self._storage._connection.poll()
-        self.assert_(self._storage.is_connected())
-        # Make a call to make sure the mechanism is working
-        self._storage.versions()
-
-    def testNOK(self):
-        self._storage = self.openClientStorage(wait=0, username="foo",
-                                              password="noogie",
-                                              realm=self.realm)
-        self.wait()
-        # If the test established a connection, then it failed.
-        self.failIf(self._storage._connection)
-
-    def testUnauthenticatedMessage(self):
-        # Test that an unauthenticated message is rejected by the server
-        # if it was sent after the connection was authenticated.
-        # Sleep for 0.2 seconds to give the server some time to start up
-        # seems to be needed before and after creating the storage
-        self._storage = self.openClientStorage(wait=0, username="foo",
-                                              password="bar", realm=self.realm)
-        self.wait()
-        self._storage.versions()
-        # Manually clear the state of the hmac connection
-        self._storage._connection._SizedMessageAsyncConnection__hmac_send = None
-        # Once the client stops using the hmac, it should be disconnected.
-        self.assertRaises(ClientDisconnected, self._storage.versions)
-
-
-class PlainTextAuth(AuthTest):
-    import ZEO.tests.auth_plaintext
-    protocol = "plaintext"
-    database = "authdb.sha"
-    dbclass = ZEO.tests.auth_plaintext.Database
-    realm = "Plaintext Realm"
-
-class DigestAuth(AuthTest):
-    import ZEO.auth.auth_digest
-    protocol = "digest"
-    database = "authdb.digest"
-    dbclass = ZEO.auth.auth_digest.DigestDatabase
-    realm = "Digest Realm"
-
-test_classes = [PlainTextAuth, DigestAuth]
-
-def test_suite():
-    suite = unittest.TestSuite()
-    for klass in test_classes:
-        sub = unittest.makeSuite(klass)
-        suite.addTest(sub)
-    return suite
-
-if __name__ == "__main__":
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZEO/tests/testConnection.py b/branches/bug1734/src/ZEO/tests/testConnection.py
deleted file mode 100644
index ec2a067d..00000000
--- a/branches/bug1734/src/ZEO/tests/testConnection.py
+++ /dev/null
@@ -1,142 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Test setup for ZEO connection logic.
-
-The actual tests are in ConnectionTests.py; this file provides the
-platform-dependent scaffolding.
-"""
-
-# System imports
-import unittest
-# Import the actual test class
-from ZEO.tests import ConnectionTests, InvalidationTests
-
-
-class FileStorageConfig:
-    def getConfig(self, path, create, read_only):
-        return """\
-        <filestorage 1>
-        path %s
-        create %s
-        read-only %s
-        </filestorage>""" % (path,
-                             create and 'yes' or 'no',
-                             read_only and 'yes' or 'no')
-
-class BerkeleyStorageConfig:
-    def getConfig(self, path, create, read_only):
-        return """\
-        <fullstorage 1>
-        envdir %s
-        read-only %s
-        </fullstorage>""" % (path, read_only and "yes" or "no")
-
-class MappingStorageConfig:
-    def getConfig(self, path, create, read_only):
-        return """<mappingstorage 1/>"""
-
-
-class FileStorageConnectionTests(
-    FileStorageConfig,
-    ConnectionTests.ConnectionTests,
-    InvalidationTests.InvalidationTests
-    ):
-    """FileStorage-specific connection tests."""
-    level = 2
-
-class FileStorageReconnectionTests(
-    FileStorageConfig,
-    ConnectionTests.ReconnectionTests,
-    ):
-    """FileStorage-specific re-connection tests."""
-    # Run this at level 1 because MappingStorage can't do reconnection tests
-    level = 1
-
-class FileStorageInvqTests(
-    FileStorageConfig,
-    ConnectionTests.InvqTests
-    ):
-    """FileStorage-specific invalidation queue tests."""
-    level = 1
-
-class FileStorageTimeoutTests(
-    FileStorageConfig,
-    ConnectionTests.TimeoutTests
-    ):
-    level = 2
-
-class BDBConnectionTests(
-    BerkeleyStorageConfig,
-    ConnectionTests.ConnectionTests,
-    InvalidationTests.InvalidationTests
-    ):
-    """Berkeley storage connection tests."""
-    level = 2
-
-class BDBReconnectionTests(
-    BerkeleyStorageConfig,
-    ConnectionTests.ReconnectionTests
-    ):
-    """Berkeley storage re-connection tests."""
-    level = 2
-
-class BDBInvqTests(
-    BerkeleyStorageConfig,
-    ConnectionTests.InvqTests
-    ):
-    """Berkeley storage invalidation queue tests."""
-    level = 2
-
-class BDBTimeoutTests(
-    BerkeleyStorageConfig,
-    ConnectionTests.TimeoutTests
-    ):
-    level = 2
-
-
-class MappingStorageConnectionTests(
-    MappingStorageConfig,
-    ConnectionTests.ConnectionTests
-    ):
-    """Mapping storage connection tests."""
-    level = 1
-
-# The ReconnectionTests can't work with MappingStorage because it's only an
-# in-memory storage and has no persistent state.
-
-class MappingStorageTimeoutTests(
-    MappingStorageConfig,
-    ConnectionTests.TimeoutTests
-    ):
-    level = 1
-
-
-
-test_classes = [FileStorageConnectionTests,
-                FileStorageReconnectionTests,
-                FileStorageInvqTests,
-                FileStorageTimeoutTests,
-                MappingStorageConnectionTests,
-                MappingStorageTimeoutTests]
-
-def test_suite():
-    suite = unittest.TestSuite()
-    for klass in test_classes:
-        sub = unittest.makeSuite(klass, 'check')
-        suite.addTest(sub)
-    return suite
-
-
-if __name__ == "__main__":
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZEO/tests/testMonitor.py b/branches/bug1734/src/ZEO/tests/testMonitor.py
deleted file mode 100644
index eb5b4cc3..00000000
--- a/branches/bug1734/src/ZEO/tests/testMonitor.py
+++ /dev/null
@@ -1,89 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Test that the monitor produce sensible results.
-
-$Id$
-"""
-
-import socket
-import unittest
-
-from ZEO.tests.ConnectionTests import CommonSetupTearDown
-from ZEO.monitor import StorageStats
-
-class MonitorTests(CommonSetupTearDown):
-
-    monitor = 1
-
-    def get_monitor_output(self):
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        s.connect(('localhost', 42000))
-        L = []
-        while 1:
-            buf = s.recv(8192)
-            if buf:
-                L.append(buf)
-            else:
-                break
-        s.close()
-        return "".join(L)
-
-    def parse(self, s):
-        # Return a list of StorageStats, one for each storage.
-        lines = s.split("\n")
-        self.assert_(lines[0].startswith("ZEO monitor server"))
-        # lines[1] is a date
-
-        # Break up rest of lines into sections starting with Storage:
-        # and ending with a blank line.
-        sections = []
-        cur = None
-        for line in lines[2:]:
-            if line.startswith("Storage:"):
-                cur = [line]
-            elif line:
-                cur.append(line)
-            else:
-                if cur is not None:
-                    sections.append(cur)
-                    cur = None
-        assert cur is None # bug in the test code if this fails
-
-        d = {}
-        for sect in sections:
-            hdr = sect[0]
-            key, value = hdr.split(":")
-            storage = int(value)
-            s = d[storage] = StorageStats()
-            s.parse("\n".join(sect[1:]))
-
-        return d
-
-    def getConfig(self, path, create, read_only):
-        return """<mappingstorage 1/>"""
-
-    def testMonitor(self):
-        # Just open a client to know that the server is up and running
-        # TODO: should put this in setUp.
-        self.storage = self.openClientStorage()
-        s = self.get_monitor_output()
-        self.storage.close()
-        self.assert_(s.find("monitor") != -1)
-        d = self.parse(s)
-        stats = d[1]
-        self.assertEqual(stats.clients, 1)
-        self.assertEqual(stats.commits, 0)
-
-def test_suite():
-    return unittest.makeSuite(MonitorTests)
diff --git a/branches/bug1734/src/ZEO/tests/testTransactionBuffer.py b/branches/bug1734/src/ZEO/tests/testTransactionBuffer.py
deleted file mode 100644
index 16780da1..00000000
--- a/branches/bug1734/src/ZEO/tests/testTransactionBuffer.py
+++ /dev/null
@@ -1,70 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import random
-import unittest
-
-from ZEO.TransactionBuffer import TransactionBuffer
-
-def random_string(size):
-    """Return a random string of size size."""
-    l = [chr(random.randrange(256)) for i in range(size)]
-    return "".join(l)
-
-def new_store_data():
-    """Return arbitrary data to use as argument to store() method."""
-    return random_string(8), '', random_string(random.randrange(1000))
-
-def new_invalidate_data():
-    """Return arbitrary data to use as argument to invalidate() method."""
-    return random_string(8), ''
-
-class TransBufTests(unittest.TestCase):
-
-    def checkTypicalUsage(self):
-        tbuf = TransactionBuffer()
-        tbuf.store(*new_store_data())
-        tbuf.invalidate(*new_invalidate_data())
-        for o in tbuf:
-            pass
-
-    def doUpdates(self, tbuf):
-        data = []
-        for i in range(10):
-            d = new_store_data()
-            tbuf.store(*d)
-            data.append(d)
-            d = new_invalidate_data()
-            tbuf.invalidate(*d)
-            data.append(d)
-
-        for i, x in enumerate(tbuf):
-            if x[2] is None:
-                # the tbuf add a dummy None to invalidates
-                x = x[:2]
-            self.assertEqual(x, data[i])
-
-    def checkOrderPreserved(self):
-        tbuf = TransactionBuffer()
-        self.doUpdates(tbuf)
-
-    def checkReusable(self):
-        tbuf = TransactionBuffer()
-        self.doUpdates(tbuf)
-        tbuf.clear()
-        self.doUpdates(tbuf)
-        tbuf.clear()
-        self.doUpdates(tbuf)
-
-def test_suite():
-    return unittest.makeSuite(TransBufTests, 'check')
diff --git a/branches/bug1734/src/ZEO/tests/testZEO.py b/branches/bug1734/src/ZEO/tests/testZEO.py
deleted file mode 100644
index 84deb14e..00000000
--- a/branches/bug1734/src/ZEO/tests/testZEO.py
+++ /dev/null
@@ -1,215 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Test suite for ZEO based on ZODB.tests."""
-
-# System imports
-import os
-import random
-import socket
-import asyncore
-import tempfile
-import unittest
-import logging
-
-# ZODB test support
-import ZODB
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle
-
-# ZODB test mixin classes
-from ZODB.tests import StorageTestBase, BasicStorage, VersionStorage, \
-     TransactionalUndoStorage, TransactionalUndoVersionStorage, \
-     PackableStorage, Synchronization, ConflictResolution, RevisionStorage, \
-     MTStorage, ReadOnlyStorage
-
-from ZEO.ClientStorage import ClientStorage
-from ZEO.tests import forker, Cache, CommitLockTests, ThreadTests
-
-logger = logging.getLogger('ZEO.tests.testZEO')
-
-class DummyDB:
-    def invalidate(self, *args):
-        pass
-
-
-class MiscZEOTests:
-    """ZEO tests that don't fit in elsewhere."""
-
-    def checkLargeUpdate(self):
-        obj = MinPO("X" * (10 * 128 * 1024))
-        self._dostore(data=obj)
-
-    def checkZEOInvalidation(self):
-        addr = self._storage._addr
-        storage2 = ClientStorage(addr, wait=1, min_disconnect_poll=0.1)
-        try:
-            oid = self._storage.new_oid()
-            ob = MinPO('first')
-            revid1 = self._dostore(oid, data=ob)
-            data, serial = storage2.load(oid, '')
-            self.assertEqual(zodb_unpickle(data), MinPO('first'))
-            self.assertEqual(serial, revid1)
-            revid2 = self._dostore(oid, data=MinPO('second'), revid=revid1)
-            for n in range(3):
-                # Let the server and client talk for a moment.
-                # Is there a better way to do this?
-                asyncore.poll(0.1)
-            data, serial = storage2.load(oid, '')
-            self.assertEqual(zodb_unpickle(data), MinPO('second'),
-                             'Invalidation message was not sent!')
-            self.assertEqual(serial, revid2)
-        finally:
-            storage2.close()
-
-def get_port():
-    """Return a port that is not in use.
-
-    Checks if a port is in use by trying to connect to it.  Assumes it
-    is not in use if connect raises an exception.
-
-    Raises RuntimeError after 10 tries.
-    """
-    for i in range(10):
-        port = random.randrange(20000, 30000)
-        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        try:
-            try:
-                s.connect(('localhost', port))
-            except socket.error:
-                # Perhaps we should check value of error too.
-                return port
-        finally:
-            s.close()
-    raise RuntimeError, "Can't find port"
-
-class GenericTests(
-    # Base class for all ZODB tests
-    StorageTestBase.StorageTestBase,
-    # ZODB test mixin classes (in the same order as imported)
-    BasicStorage.BasicStorage,
-    PackableStorage.PackableStorage,
-    Synchronization.SynchronizedStorage,
-    MTStorage.MTStorage,
-    ReadOnlyStorage.ReadOnlyStorage,
-    # ZEO test mixin classes (in the same order as imported)
-    CommitLockTests.CommitLockVoteTests,
-    ThreadTests.ThreadTests,
-    # Locally defined (see above)
-    MiscZEOTests
-    ):
-
-    """Combine tests from various origins in one class."""
-
-    def setUp(self):
-        logger.info("setUp() %s", self.id())
-        port = get_port()
-        zconf = forker.ZEOConfig(('', port))
-        zport, adminaddr, pid, path = forker.start_zeo_server(self.getConfig(),
-                                                              zconf, port)
-        self._pids = [pid]
-        self._servers = [adminaddr]
-        self._conf_path = path
-        self._storage = ClientStorage(zport, '1', cache_size=20000000,
-                                      min_disconnect_poll=0.5, wait=1,
-                                      wait_timeout=60)
-        self._storage.registerDB(DummyDB(), None)
-
-    def tearDown(self):
-        self._storage.close()
-        os.remove(self._conf_path)
-        for server in self._servers:
-            forker.shutdown_zeo_server(server)
-        if hasattr(os, 'waitpid'):
-            # Not in Windows Python until 2.3
-            for pid in self._pids:
-                os.waitpid(pid, 0)
-
-    def open(self, read_only=0):
-        # Needed to support ReadOnlyStorage tests.  Ought to be a
-        # cleaner way.
-        addr = self._storage._addr
-        self._storage.close()
-        self._storage = ClientStorage(addr, read_only=read_only, wait=1)
-
-    def checkWriteMethods(self):
-        # ReadOnlyStorage defines checkWriteMethods.  The decision
-        # about where to raise the read-only error was changed after
-        # Zope 2.5 was released.  So this test needs to detect Zope
-        # of the 2.5 vintage and skip the test.
-
-        # The __version__ attribute was not present in Zope 2.5.
-        if hasattr(ZODB, "__version__"):
-            ReadOnlyStorage.ReadOnlyStorage.checkWriteMethods(self)
-
-    def checkSortKey(self):
-        key = '%s:%s' % (self._storage._storage, self._storage._server_addr)
-        self.assertEqual(self._storage.sortKey(), key)
-
-class FullGenericTests(
-    GenericTests,
-    Cache.StorageWithCache,
-    Cache.TransUndoStorageWithCache,
-    CommitLockTests.CommitLockUndoTests,
-    ConflictResolution.ConflictResolvingStorage,
-    ConflictResolution.ConflictResolvingTransUndoStorage,
-    PackableStorage.PackableUndoStorage,
-    RevisionStorage.RevisionStorage,
-    TransactionalUndoStorage.TransactionalUndoStorage,
-    TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
-    VersionStorage.VersionStorage,
-    ):
-    """Extend GenericTests with tests that MappingStorage can't pass."""
-
-class FileStorageTests(FullGenericTests):
-    """Test ZEO backed by a FileStorage."""
-    level = 2
-
-    def getConfig(self):
-        filename = self.__fs_base = tempfile.mktemp()
-        return """\
-        <filestorage 1>
-        path %s
-        </filestorage>
-        """ % filename
-
-class BDBTests(FullGenericTests):
-    """ZEO backed by a Berkeley full storage."""
-    level = 2
-
-    def getConfig(self):
-        self._envdir = tempfile.mktemp()
-        return """\
-        <fullstorage 1>
-        envdir %s
-        </fullstorage>
-        """ % self._envdir
-
-class MappingStorageTests(GenericTests):
-    """ZEO backed by a Mapping storage."""
-
-    def getConfig(self):
-        return """<mappingstorage 1/>"""
-
-test_classes = [FileStorageTests, MappingStorageTests]
-
-def test_suite():
-    suite = unittest.TestSuite()
-    for klass in test_classes:
-        sub = unittest.makeSuite(klass, "check")
-        suite.addTest(sub)
-    return suite
-
-
-if __name__ == "__main__":
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZEO/tests/testZEOOptions.py b/branches/bug1734/src/ZEO/tests/testZEOOptions.py
deleted file mode 100644
index 3c106670..00000000
--- a/branches/bug1734/src/ZEO/tests/testZEOOptions.py
+++ /dev/null
@@ -1,112 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Test suite for ZEO.runzeo.ZEOOptions."""
-
-import os
-import sys
-import tempfile
-import unittest
-
-import ZODB.config
-
-from ZEO.runzeo import ZEOOptions
-from zdaemon.tests.testzdoptions import TestZDOptions
-
-# When a hostname isn't specified in an address, ZConfig supplies a
-# platform-dependent default value.
-DEFAULT_HOSTNAME = ''
-if sys.platform in ['win32',]:
-    DEFAULT_HOSTNAME = 'localhost'
-
-class TestZEOOptions(TestZDOptions):
-
-    OptionsClass = ZEOOptions
-
-    input_args = ["-f", "Data.fs", "-a", "5555"]
-    output_opts = [("-f", "Data.fs"), ("-a", "5555")]
-    output_args = []
-
-    configdata = """
-        <zeo>
-          address 5555
-        </zeo>
-        <filestorage fs>
-          path Data.fs
-        </filestorage>
-        """
-
-    def setUp(self):
-        self.tempfilename = tempfile.mktemp()
-        f = open(self.tempfilename, "w")
-        f.write(self.configdata)
-        f.close()
-
-    def tearDown(self):
-        try:
-            os.remove(self.tempfilename)
-        except os.error:
-            pass
-
-    def test_configure(self):
-        # Hide the base class test_configure
-        pass
-
-    def test_defaults_with_schema(self):
-        options = self.OptionsClass()
-        options.realize(["-C", self.tempfilename])
-        self.assertEqual(options.address, (DEFAULT_HOSTNAME, 5555))
-        self.assertEqual(len(options.storages), 1)
-        opener = options.storages[0]
-        self.assertEqual(opener.name, "fs")
-        self.assertEqual(opener.__class__, ZODB.config.FileStorage)
-        self.assertEqual(options.read_only, 0)
-        self.assertEqual(options.transaction_timeout, None)
-        self.assertEqual(options.invalidation_queue_size, 100)
-
-    def test_defaults_without_schema(self):
-        options = self.OptionsClass()
-        options.realize(["-a", "5555", "-f", "Data.fs"])
-        self.assertEqual(options.address, (DEFAULT_HOSTNAME, 5555))
-        self.assertEqual(len(options.storages), 1)
-        opener = options.storages[0]
-        self.assertEqual(opener.name, "1")
-        self.assertEqual(opener.__class__, ZODB.config.FileStorage)
-        self.assertEqual(opener.config.path, "Data.fs")
-        self.assertEqual(options.read_only, 0)
-        self.assertEqual(options.transaction_timeout, None)
-        self.assertEqual(options.invalidation_queue_size, 100)
-
-    def test_commandline_overrides(self):
-        options = self.OptionsClass()
-        options.realize(["-C", self.tempfilename,
-                         "-a", "6666", "-f", "Wisdom.fs"])
-        self.assertEqual(options.address, (DEFAULT_HOSTNAME, 6666))
-        self.assertEqual(len(options.storages), 1)
-        opener = options.storages[0]
-        self.assertEqual(opener.__class__, ZODB.config.FileStorage)
-        self.assertEqual(opener.config.path, "Wisdom.fs")
-        self.assertEqual(options.read_only, 0)
-        self.assertEqual(options.transaction_timeout, None)
-        self.assertEqual(options.invalidation_queue_size, 100)
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    for cls in [TestZEOOptions]:
-        suite.addTest(unittest.makeSuite(cls))
-    return suite
-
-if __name__ == "__main__":
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZEO/tests/test_cache.py b/branches/bug1734/src/ZEO/tests/test_cache.py
deleted file mode 100644
index 7053c13c..00000000
--- a/branches/bug1734/src/ZEO/tests/test_cache.py
+++ /dev/null
@@ -1,156 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Basic unit tests for a multi-version client cache."""
-
-import os
-import tempfile
-import unittest
-
-import ZEO.cache
-from ZODB.utils import p64
-
-n1 = p64(1)
-n2 = p64(2)
-n3 = p64(3)
-n4 = p64(4)
-n5 = p64(5)
-
-class CacheTests(unittest.TestCase):
-
-    def setUp(self):
-        # The default cache size is much larger than we need here.  Since
-        # testSerialization reads the entire file into a string, it's not
-        # good to leave it that big.
-        self.cache = ZEO.cache.ClientCache(size=1024**2)
-        self.cache.open()
-
-    def tearDown(self):
-        if self.cache.path:
-            os.remove(self.cache.path)
-
-    def testLastTid(self):
-        self.assertEqual(self.cache.getLastTid(), None)
-        self.cache.setLastTid(n2)
-        self.assertEqual(self.cache.getLastTid(), n2)
-        self.cache.invalidate(None, "", n1)
-        self.assertEqual(self.cache.getLastTid(), n2)
-        self.cache.invalidate(None, "", n3)
-        self.assertEqual(self.cache.getLastTid(), n3)
-        self.assertRaises(ValueError, self.cache.setLastTid, n2)
-
-    def testLoad(self):
-        data1 = "data for n1"
-        self.assertEqual(self.cache.load(n1, ""), None)
-        self.assertEqual(self.cache.load(n1, "version"), None)
-        self.cache.store(n1, "", n3, None, data1)
-        self.assertEqual(self.cache.load(n1, ""), (data1, n3, ""))
-        # The cache doesn't know whether version exists, because it
-        # only has non-version data.
-        self.assertEqual(self.cache.load(n1, "version"), None)
-        self.assertEqual(self.cache.modifiedInVersion(n1), None)
-
-    def testInvalidate(self):
-        data1 = "data for n1"
-        self.cache.store(n1, "", n3, None, data1)
-        self.cache.invalidate(n1, "", n4)
-        self.cache.invalidate(n2, "", n2)
-        self.assertEqual(self.cache.load(n1, ""), None)
-        self.assertEqual(self.cache.loadBefore(n1, n4),
-                         (data1, n3, n4))
-
-    def testVersion(self):
-        data1 = "data for n1"
-        data1v = "data for n1 in version"
-        self.cache.store(n1, "version", n3, None, data1v)
-        self.assertEqual(self.cache.load(n1, ""), None)
-        self.assertEqual(self.cache.load(n1, "version"),
-                         (data1v, n3, "version"))
-        self.assertEqual(self.cache.load(n1, "random"), None)
-        self.assertEqual(self.cache.modifiedInVersion(n1), "version")
-        self.cache.invalidate(n1, "version", n4)
-        self.assertEqual(self.cache.load(n1, "version"), None)
-
-    def testNonCurrent(self):
-        data1 = "data for n1"
-        data2 = "data for n2"
-        self.cache.store(n1, "", n4, None, data1)
-        self.cache.store(n1, "", n2, n3, data2)
-        # can't say anything about state before n2
-        self.assertEqual(self.cache.loadBefore(n1, n2), None)
-        # n3 is the upper bound of non-current record n2
-        self.assertEqual(self.cache.loadBefore(n1, n3), (data2, n2, n3))
-        # no data for between n2 and n3
-        self.assertEqual(self.cache.loadBefore(n1, n4), None)
-        self.cache.invalidate(n1, "", n5)
-        self.assertEqual(self.cache.loadBefore(n1, n5), (data1, n4, n5))
-        self.assertEqual(self.cache.loadBefore(n2, n4), None)
-
-    def testException(self):
-        self.assertRaises(ValueError,
-                          self.cache.store,
-                          n1, "version", n2, n3, "data")
-        self.cache.store(n1, "", n2, None, "data")
-        self.assertRaises(ValueError,
-                          self.cache.store,
-                          n1, "", n3, None, "data")
-
-    def testEviction(self):
-        # Manually override the current maxsize
-        maxsize = self.cache.size = self.cache.fc.maxsize = 3395 # 1245
-        self.cache.fc = ZEO.cache.FileCache(3395, None, self.cache)
-
-        # Trivial test of eviction code.  Doesn't test non-current
-        # eviction.
-        data = ["z" * i for i in range(100)]
-        for i in range(50):
-            n = p64(i)
-            self.cache.store(n, "", n, None, data[i])
-            self.assertEquals(len(self.cache), i + 1)
-        # The cache now uses 1225 bytes.  The next insert
-        # should delete some objects.
-        n = p64(50)
-        self.cache.store(n, "", n, None, data[51])
-        self.assert_(len(self.cache) < 51)
-
-        # TODO:  Need to make sure eviction of non-current data
-        # and of version data are handled correctly.
-
-    def testSerialization(self):
-        self.cache.store(n1, "", n2, None, "data for n1")
-        self.cache.store(n2, "version", n2, None, "version data for n2")
-        self.cache.store(n3, "", n3, n4, "non-current data for n3")
-        self.cache.store(n3, "", n4, n5, "more non-current data for n3")
-
-        path = tempfile.mktemp()
-        # Copy data from self.cache into path, reaching into the cache
-        # guts to make the copy.
-        dst = open(path, "wb+")
-        src = self.cache.fc.f
-        src.seek(0)
-        dst.write(src.read(self.cache.fc.maxsize))
-        dst.close()
-        copy = ZEO.cache.ClientCache(path)
-        copy.open()
-
-        # Verify that internals of both objects are the same.
-        # Could also test that external API produces the same results.
-        eq = self.assertEqual
-        eq(copy.getLastTid(), self.cache.getLastTid())
-        eq(len(copy), len(self.cache))
-        eq(copy.version, self.cache.version)
-        eq(copy.current, self.cache.current)
-        eq(copy.noncurrent, self.cache.noncurrent)
-
-def test_suite():
-    return unittest.makeSuite(CacheTests)
diff --git a/branches/bug1734/src/ZEO/tests/zeoserver.py b/branches/bug1734/src/ZEO/tests/zeoserver.py
deleted file mode 100644
index 17f713f3..00000000
--- a/branches/bug1734/src/ZEO/tests/zeoserver.py
+++ /dev/null
@@ -1,216 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Helper file used to launch a ZEO server cross platform"""
-
-import os
-import sys
-import time
-import errno
-import getopt
-import socket
-import signal
-import asyncore
-import threading
-import logging
-
-import ThreadedAsync.LoopCallback
-
-from ZEO.StorageServer import StorageServer
-from ZEO.runzeo import ZEOOptions
-
-
-def cleanup(storage):
-    # FileStorage and the Berkeley storages have this method, which deletes
-    # all files and directories used by the storage.  This prevents @-files
-    # from clogging up /tmp
-    try:
-        storage.cleanup()
-    except AttributeError:
-        pass
-
-logger = logging.getLogger('ZEO.tests.zeoserver')
-
-def log(label, msg, *args):
-    message = "(%s) %s" % (label, msg)
-    logger.debug(message, args)
-
-
-class ZEOTestServer(asyncore.dispatcher):
-    """A server for killing the whole process at the end of a test.
-
-    The first time we connect to this server, we write an ack character down
-    the socket.  The other end should block on a recv() of the socket so it
-    can guarantee the server has started up before continuing on.
-
-    The second connect to the port immediately exits the process, via
-    os._exit(), without writing data on the socket.  It does close and clean
-    up the storage first.  The other end will get the empty string from its
-    recv() which will be enough to tell it that the server has exited.
-
-    I think this should prevent us from ever getting a legitimate addr-in-use
-    error.
-    """
-    __super_init = asyncore.dispatcher.__init__
-
-    def __init__(self, addr, server, keep):
-        self.__super_init()
-        self._server = server
-        self._sockets = [self]
-        self._keep = keep
-        # Count down to zero, the number of connects
-        self._count = 1
-        self._label ='%d @ %s' % (os.getpid(), addr)
-        self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
-        # Some ZEO tests attempt a quick start of the server using the same
-        # port so we have to set the reuse flag.
-        self.set_reuse_addr()
-        try:
-            self.bind(addr)
-        except:
-            # We really want to see these exceptions
-            import traceback
-            traceback.print_exc()
-            raise
-        self.listen(5)
-        self.log('bound and listening')
-
-    def log(self, msg, *args):
-        log(self._label, msg, *args)
-
-    def handle_accept(self):
-        sock, addr = self.accept()
-        self.log('in handle_accept()')
-        # When we're done with everything, close the storage.  Do not write
-        # the ack character until the storage is finished closing.
-        if self._count <= 0:
-            self.log('closing the storage')
-            self._server.close_server()
-            if not self._keep:
-                for storage in self._server.storages.values():
-                    cleanup(storage)
-            self.log('exiting')
-            # Close all the other sockets so that we don't have to wait
-            # for os._exit() to get to it before starting the next
-            # server process.
-            for s in self._sockets:
-                s.close()
-            # Now explicitly close the socket returned from accept(),
-            # since it didn't go through the wrapper.
-            sock.close()
-            os._exit(0)
-        self.log('continuing')
-        sock.send('X')
-        self._count -= 1
-
-    def register_socket(self, sock):
-        # Register a socket to be closed when server shutsdown.
-        self._sockets.append(sock)
-
-class Suicide(threading.Thread):
-    def __init__(self, addr):
-        threading.Thread.__init__(self)
-        self._adminaddr = addr
-
-    def run(self):
-        # If this process doesn't exit in 330 seconds, commit suicide.
-        # The client threads in the ConcurrentUpdate tests will run for
-        # as long as 300 seconds.  Set this timeout to 330 to minimize
-        # chance that the server gives up before the clients.
-        time.sleep(330)
-        log(str(os.getpid()), "suicide thread invoking shutdown")
-
-        # If the server hasn't shut down yet, the client may not be
-        # able to connect to it.  If so, try to kill the process to
-        # force it to shutdown.
-        if hasattr(os, "kill"):
-            os.kill(pid, signal.SIGTERM)
-            time.sleep(5)
-            os.kill(pid, signal.SIGKILL)
-        else:
-            from ZEO.tests.forker import shutdown_zeo_server
-            # Nott:  If the -k option was given to zeoserver, then the
-            # process will go away but the temp files won't get
-            # cleaned up.
-            shutdown_zeo_server(self._adminaddr)
-
-
-def main():
-    global pid
-    pid = os.getpid()
-    label = str(pid)
-    log(label, "starting")
-
-    # We don't do much sanity checking of the arguments, since if we get it
-    # wrong, it's a bug in the test suite.
-    keep = 0
-    configfile = None
-    # Parse the arguments and let getopt.error percolate
-    opts, args = getopt.getopt(sys.argv[1:], 'kC:')
-    for opt, arg in opts:
-        if opt == '-k':
-            keep = 1
-        elif opt == '-C':
-            configfile = arg
-
-    zo = ZEOOptions()
-    zo.realize(["-C", configfile])
-    zeo_port = int(zo.address[1])
-
-    if zo.auth_protocol == "plaintext":
-        import ZEO.tests.auth_plaintext
-
-    # Open the config file and let ZConfig parse the data there.  Then remove
-    # the config file, otherwise we'll leave turds.
-    # The rest of the args are hostname, portnum
-    test_port = zeo_port + 1
-    test_addr = ('localhost', test_port)
-    addr = ('localhost', zeo_port)
-    log(label, 'creating the storage server')
-    storage = zo.storages[0].open()
-    mon_addr = None
-    if zo.monitor_address:
-        mon_addr = zo.monitor_address
-    server = StorageServer(
-        zo.address,
-        {"1": storage},
-        read_only=zo.read_only,
-        invalidation_queue_size=zo.invalidation_queue_size,
-        transaction_timeout=zo.transaction_timeout,
-        monitor_address=mon_addr,
-        auth_protocol=zo.auth_protocol,
-        auth_database=zo.auth_database,
-        auth_realm=zo.auth_realm)
-
-    try:
-        log(label, 'creating the test server, keep: %s', keep)
-        t = ZEOTestServer(test_addr, server, keep)
-    except socket.error, e:
-        if e[0] <> errno.EADDRINUSE: raise
-        log(label, 'addr in use, closing and exiting')
-        storage.close()
-        cleanup(storage)
-        sys.exit(2)
-
-    t.register_socket(server.dispatcher)
-    # Create daemon suicide thread
-    d = Suicide(test_addr)
-    d.setDaemon(1)
-    d.start()
-    # Loop for socket events
-    log(label, 'entering ThreadedAsync loop')
-    ThreadedAsync.LoopCallback.loop()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/ZEO/util.py b/branches/bug1734/src/ZEO/util.py
deleted file mode 100644
index 69ffdb2d..00000000
--- a/branches/bug1734/src/ZEO/util.py
+++ /dev/null
@@ -1,56 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Utilities for setting up the server environment."""
-
-import os
-
-def parentdir(p, n=1):
-    """Return the ancestor of p from n levels up."""
-    d = p
-    while n:
-        d = os.path.dirname(d)
-        if not d or d == '.':
-            d = os.getcwd()
-        n -= 1
-    return d
-
-class Environment:
-    """Determine location of the Data.fs & ZEO_SERVER.pid files.
-
-    Pass the argv[0] used to start ZEO to the constructor.
-
-    Use the zeo_pid and fs attributes to get the filenames.
-    """
-
-    def __init__(self, argv0):
-        v = os.environ.get("INSTANCE_HOME")
-        if v is None:
-            # looking for a Zope/var directory assuming that this code
-            # is installed in Zope/lib/python/ZEO
-            p = parentdir(argv0, 4)
-            if os.path.isdir(os.path.join(p, "var")):
-                v = p
-            else:
-                v = os.getcwd()
-        self.home = v
-        self.var = os.path.join(v, "var")
-        if not os.path.isdir(self.var):
-            self.var = self.home
-
-        pid = os.environ.get("ZEO_SERVER_PID")
-        if pid is None:
-            pid = os.path.join(self.var, "ZEO_SERVER.pid")
-
-        self.zeo_pid = pid
-        self.fs = os.path.join(self.var, "Data.fs")
diff --git a/branches/bug1734/src/ZEO/version.txt b/branches/bug1734/src/ZEO/version.txt
deleted file mode 100644
index 30fe9db2..00000000
--- a/branches/bug1734/src/ZEO/version.txt
+++ /dev/null
@@ -1 +0,0 @@
-2.4a0
diff --git a/branches/bug1734/src/ZEO/zeoctl.py b/branches/bug1734/src/ZEO/zeoctl.py
deleted file mode 100644
index 58c75bbd..00000000
--- a/branches/bug1734/src/ZEO/zeoctl.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Wrapper script for zdctl.py that causes it to use the ZEO schema."""
-
-import os
-
-import ZEO
-import zdaemon.zdctl
-
-
-# Main program
-def main(args=None):
-    options = zdaemon.zdctl.ZDCtlOptions()
-    options.schemadir = os.path.dirname(ZEO.__file__)
-    options.schemafile = "zeoctl.xml"
-    zdaemon.zdctl.main(args, options)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZEO/zeoctl.xml b/branches/bug1734/src/ZEO/zeoctl.xml
deleted file mode 100644
index 0c337e76..00000000
--- a/branches/bug1734/src/ZEO/zeoctl.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<schema>
-
-  <description>
-    This schema describes the configuration of the ZEO storage server
-    controller.  It differs from the schema for the storage server
-    only in that the "runner" section is required.
-  </description>
-
-  <!-- Use the storage types defined by ZODB. -->
-  <import package="ZODB"/>
-
-  <!-- Use the ZEO server information structure. -->
-  <import package="ZEO"/>
-
-  <import package="ZConfig.components.logger"/>
-
-  <!-- runner control -->
-  <import package="zdaemon"/>
-
-
-  <section type="zeo" name="*" required="yes" attribute="zeo" />
-
-  <section type="runner" name="*" required="yes" attribute="runner" />
-
-  <multisection name="+" type="ZODB.storage"
-                attribute="storages"
-                required="yes" />
-
-  <section name="*" type="eventlog" attribute="eventlog" required="no" />
-
-</schema>
diff --git a/branches/bug1734/src/ZEO/zeopasswd.py b/branches/bug1734/src/ZEO/zeopasswd.py
deleted file mode 100644
index e45c220d..00000000
--- a/branches/bug1734/src/ZEO/zeopasswd.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!python
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Update a user's authentication tokens for a ZEO server.
-
-usage: python zeopasswd.py [options] username [password]
-
-Specify either a configuration file:
-
-    -C/--configuration -- ZConfig configuration file
-
-or the individual options:
-
-    -f/--filename -- authentication database filename
-    -p/--protocol -- authentication protocol name
-    -r/--realm -- authentication database realm
-
-Additional options:
-
-    -d/--delete -- delete user instead of updating password
-"""
-
-import getopt
-import getpass
-import sys
-import os
-
-import ZConfig
-import ZEO
-
-def usage(msg):
-    print __doc__
-    print msg
-    sys.exit(2)
-
-def options(args):
-    """Password-specific options loaded from regular ZEO config file."""
-    try:
-        opts, args = getopt.getopt(args, "dr:p:f:C:", ["configure=",
-                                                          "protocol=",
-                                                          "filename=",
-                                                          "realm"])
-    except getopt.error, msg:
-        usage(msg)
-    config = None
-    delete = 0
-    auth_protocol = None
-    auth_db = ""
-    auth_realm = None
-    for k, v in opts:
-        if k == '-C' or k == '--configure':
-            schemafile = os.path.join(os.path.dirname(ZEO.__file__),
-                                                     "schema.xml")
-            schema = ZConfig.loadSchema(schemafile)
-            config, nil = ZConfig.loadConfig(schema, v)
-        if k == '-d' or k == '--delete':
-            delete = 1
-        if k == '-p' or k == '--protocol':
-            auth_protocol = v
-        if k == '-f' or k == '--filename':
-            auth_db = v
-        if k == '-r' or k == '--realm':
-            auth_realm = v
-
-    if config is not None:
-        if auth_protocol or auth_db:
-            usage("Error: Conflicting options; use either -C *or* -p and -f")
-        auth_protocol = config.zeo.authentication_protocol
-        auth_db = config.zeo.authentication_database
-        auth_realm = config.zeo.authentication_realm
-    elif not (auth_protocol and auth_db):
-        usage("Error: Must specifiy configuration file or protocol and database")
-
-    password = None
-    if delete:
-        if not args:
-            usage("Error: Must specify a username to delete")
-        elif len(args) > 1:
-            usage("Error: Too many arguments")
-        username = args[0]
-    else:
-        if not args:
-            usage("Error: Must specify a username")
-        elif len(args) > 2:
-            usage("Error: Too many arguments")
-        elif len(args) == 1:
-            username = args[0]
-        else:
-            username, password = args
-
-    return auth_protocol, auth_db, auth_realm, delete, username, password
-
-def main(args=None, dbclass=None):
-    p, auth_db, auth_realm, delete, username, password = options(args)
-    if p is None:
-        usage("Error: configuration does not specify auth protocol")
-    if p == "digest":
-        from ZEO.auth.auth_digest import DigestDatabase as Database
-    elif p == "srp":
-        from ZEO.auth.auth_srp import SRPDatabase as Database
-    elif dbclass:
-        # dbclass is used for testing tests.auth_plaintext, see testAuth.py
-        Database = dbclass
-    else:
-        raise ValueError, "Unknown database type %r" % p
-    if auth_db is None:
-        usage("Error: configuration does not specify auth database")
-    db = Database(auth_db, auth_realm)
-    if delete:
-        db.del_user(username)
-    else:
-        if password is None:
-            password = getpass.getpass("Enter password: ")
-        db.add_user(username, password)
-    db.save()
-
-if __name__ == "__main__":
-    main(sys.argv[1:])
diff --git a/branches/bug1734/src/ZEO/zrpc/__init__.py b/branches/bug1734/src/ZEO/zrpc/__init__.py
deleted file mode 100644
index 279a2de1..00000000
--- a/branches/bug1734/src/ZEO/zrpc/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-# zrpc is a package with the following modules
-# client -- manages connection creation to remote server
-# connection -- object dispatcher
-# log -- logging helper
-# error -- exceptions raised by zrpc
-# marshal -- internal, handles basic protocol issues
-# server -- manages incoming connections from remote clients
-# smac -- sized message async connections
-# trigger -- medusa's trigger
-
-# zrpc is not an advertised subpackage of ZEO; its interfaces are internal
diff --git a/branches/bug1734/src/ZEO/zrpc/_hmac.py b/branches/bug1734/src/ZEO/zrpc/_hmac.py
deleted file mode 100644
index 2fd259f1..00000000
--- a/branches/bug1734/src/ZEO/zrpc/_hmac.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This file is a slightly modified copy of Python 2.3's Lib/hmac.py.
-# This file is under the Python Software Foundation (PSF) license.
-
-"""HMAC (Keyed-Hashing for Message Authentication) Python module.
-
-Implements the HMAC algorithm as described by RFC 2104.
-"""
-
-def _strxor(s1, s2):
-    """Utility method. XOR the two strings s1 and s2 (must have same length).
-    """
-    return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
-
-# The size of the digests returned by HMAC depends on the underlying
-# hashing module used.
-digest_size = None
-
-class HMAC:
-    """RFC2104 HMAC class.
-
-    This supports the API for Cryptographic Hash Functions (PEP 247).
-    """
-
-    def __init__(self, key, msg = None, digestmod = None):
-        """Create a new HMAC object.
-
-        key:       key for the keyed hash object.
-        msg:       Initial input for the hash, if provided.
-        digestmod: A module supporting PEP 247. Defaults to the md5 module.
-        """
-        if digestmod is None:
-            import md5
-            digestmod = md5
-
-        self.digestmod = digestmod
-        self.outer = digestmod.new()
-        self.inner = digestmod.new()
-        # Python 2.1 and 2.2 differ about the correct spelling
-        try:
-            self.digest_size = digestmod.digestsize
-        except AttributeError:
-            self.digest_size = digestmod.digest_size
-
-        blocksize = 64
-        ipad = "\x36" * blocksize
-        opad = "\x5C" * blocksize
-
-        if len(key) > blocksize:
-            key = digestmod.new(key).digest()
-
-        key = key + chr(0) * (blocksize - len(key))
-        self.outer.update(_strxor(key, opad))
-        self.inner.update(_strxor(key, ipad))
-        if msg is not None:
-            self.update(msg)
-
-##    def clear(self):
-##        raise NotImplementedError, "clear() method not available in HMAC."
-
-    def update(self, msg):
-        """Update this hashing object with the string msg.
-        """
-        self.inner.update(msg)
-
-    def copy(self):
-        """Return a separate copy of this hashing object.
-
-        An update to this copy won't affect the original object.
-        """
-        other = HMAC("")
-        other.digestmod = self.digestmod
-        other.inner = self.inner.copy()
-        other.outer = self.outer.copy()
-        return other
-
-    def digest(self):
-        """Return the hash value of this hashing object.
-
-        This returns a string containing 8-bit data.  The object is
-        not altered in any way by this function; you can continue
-        updating the object after calling this function.
-        """
-        h = self.outer.copy()
-        h.update(self.inner.digest())
-        return h.digest()
-
-    def hexdigest(self):
-        """Like digest(), but returns a string of hexadecimal digits instead.
-        """
-        return "".join([hex(ord(x))[2:].zfill(2)
-                        for x in tuple(self.digest())])
-
-def new(key, msg = None, digestmod = None):
-    """Create a new hashing object and return it.
-
-    key: The starting key for the hash.
-    msg: if available, will immediately be hashed into the object's starting
-    state.
-
-    You can now feed arbitrary strings into the object using its update()
-    method, and can ask for the hash value at any time by calling its digest()
-    method.
-    """
-    return HMAC(key, msg, digestmod)
diff --git a/branches/bug1734/src/ZEO/zrpc/client.py b/branches/bug1734/src/ZEO/zrpc/client.py
deleted file mode 100644
index e7986a79..00000000
--- a/branches/bug1734/src/ZEO/zrpc/client.py
+++ /dev/null
@@ -1,531 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import errno
-import select
-import socket
-import sys
-import threading
-import time
-import types
-import logging
-
-import ThreadedAsync
-
-from ZODB.POSException import ReadOnlyError
-from ZODB.loglevels import BLATHER
-
-from ZEO.zrpc.log import log
-from ZEO.zrpc.trigger import trigger
-from ZEO.zrpc.connection import ManagedClientConnection
-
-class ConnectionManager(object):
-    """Keeps a connection up over time"""
-
-    def __init__(self, addrs, client, tmin=1, tmax=180):
-        self.addrlist = self._parse_addrs(addrs)
-        self.client = client
-        self.tmin = tmin
-        self.tmax = tmax
-        self.cond = threading.Condition(threading.Lock())
-        self.connection = None # Protected by self.cond
-        self.closed = 0
-        # If thread is not None, then there is a helper thread
-        # attempting to connect.
-        self.thread = None # Protected by self.cond
-        self.trigger = None
-        self.thr_async = 0
-        ThreadedAsync.register_loop_callback(self.set_async)
-
-    def __repr__(self):
-        return "<%s for %s>" % (self.__class__.__name__, self.addrlist)
-
-    def _parse_addrs(self, addrs):
-        # Return a list of (addr_type, addr) pairs.
-
-        # For backwards compatibility (and simplicity?) the
-        # constructor accepts a single address in the addrs argument --
-        # a string for a Unix domain socket or a 2-tuple with a
-        # hostname and port.  It can also accept a list of such addresses.
-
-        addr_type = self._guess_type(addrs)
-        if addr_type is not None:
-            return [(addr_type, addrs)]
-        else:
-            addrlist = []
-            for addr in addrs:
-                addr_type = self._guess_type(addr)
-                if addr_type is None:
-                    raise ValueError, (
-                        "unknown address in list: %s" % repr(addr))
-                addrlist.append((addr_type, addr))
-            return addrlist
-
-    def _guess_type(self, addr):
-        if isinstance(addr, types.StringType):
-            return socket.AF_UNIX
-
-        if (len(addr) == 2
-            and isinstance(addr[0], types.StringType)
-            and isinstance(addr[1], types.IntType)):
-            return socket.AF_INET
-
-        # not anything I know about
-        return None
-
-    def close(self):
-        """Prevent ConnectionManager from opening new connections"""
-        self.closed = 1
-        ThreadedAsync.remove_loop_callback(self.set_async)
-        self.cond.acquire()
-        try:
-            t = self.thread
-            self.thread = None
-            conn = self.connection
-        finally:
-            self.cond.release()
-        if t is not None:
-            log("CM.close(): stopping and joining thread")
-            t.stop()
-            t.join(30)
-            if t.isAlive():
-                log("CM.close(): self.thread.join() timed out",
-                    level=logging.WARNING)
-        if conn is not None:
-            # This will call close_conn() below which clears self.connection
-            conn.close()
-        if self.trigger is not None:
-            self.trigger.close()
-            self.trigger = None
-        ThreadedAsync.remove_loop_callback(self.set_async)
-
-    def set_async(self, map):
-        # This is the callback registered with ThreadedAsync.  The
-        # callback might be called multiple times, so it shouldn't
-        # create a trigger every time and should never do anything
-        # after it's closed.
-
-        # It may be that the only case where it is called multiple
-        # times is in the test suite, where ThreadedAsync's loop can
-        # be started in a child process after a fork.  Regardless,
-        # it's good to be defensive.
-
-        # We need each connection started with async==0 to have a
-        # callback.
-        log("CM.set_async(%s)" % repr(map), level=logging.DEBUG)
-        if not self.closed and self.trigger is None:
-            log("CM.set_async(): first call")
-            self.trigger = trigger()
-            self.thr_async = 1 # needs to be set on the Connection
-
-    def attempt_connect(self):
-        """Attempt a connection to the server without blocking too long.
-
-        There isn't a crisp definition for too long.  When a
-        ClientStorage is created, it attempts to connect to the
-        server.  If the server isn't immediately available, it can
-        operate from the cache.  This method will start the background
-        connection thread and wait a little while to see if it
-        finishes quickly.
-        """
-
-        # Will a single attempt take too long?
-        # Answer:  it depends -- normally, you'll connect or get a
-        # connection refused error very quickly.  Packet-eating
-        # firewalls and other mishaps may cause the connect to take a
-        # long time to time out though.  It's also possible that you
-        # connect quickly to a slow server, and the attempt includes
-        # at least one roundtrip to the server (the register() call).
-        # But that's as fast as you can expect it to be.
-        self.connect()
-        self.cond.acquire()
-        try:
-            t = self.thread
-            conn = self.connection
-        finally:
-            self.cond.release()
-        if t is not None and conn is None:
-            event = t.one_attempt
-            event.wait()
-            self.cond.acquire()
-            try:
-                conn = self.connection
-            finally:
-                self.cond.release()
-        return conn is not None
-
-    def connect(self, sync=0):
-        self.cond.acquire()
-        try:
-            if self.connection is not None:
-                return
-            t = self.thread
-            if t is None:
-                log("CM.connect(): starting ConnectThread")
-                self.thread = t = ConnectThread(self, self.client,
-                                                self.addrlist,
-                                                self.tmin, self.tmax)
-                t.setDaemon(1)
-                t.start()
-            if sync:
-                while self.connection is None:
-                    self.cond.wait(30)
-                    if self.connection is None:
-                        log("CM.connect(sync=1): still waiting...")
-        finally:
-            self.cond.release()
-        if sync:
-            assert self.connection is not None
-
-    def connect_done(self, conn, preferred):
-        # Called by ConnectWrapper.notify_client() after notifying the client
-        log("CM.connect_done(preferred=%s)" % preferred)
-        self.cond.acquire()
-        try:
-            self.connection = conn
-            if preferred:
-                self.thread = None
-            self.cond.notifyAll() # Wake up connect(sync=1)
-        finally:
-            self.cond.release()
-
-    def close_conn(self, conn):
-        # Called by the connection when it is closed
-        self.cond.acquire()
-        try:
-            if conn is not self.connection:
-                # Closing a non-current connection
-                log("CM.close_conn() non-current", level=BLATHER)
-                return
-            log("CM.close_conn()")
-            self.connection = None
-        finally:
-            self.cond.release()
-        self.client.notifyDisconnected()
-        if not self.closed:
-            self.connect()
-
-    def is_connected(self):
-        self.cond.acquire()
-        try:
-            return self.connection is not None
-        finally:
-            self.cond.release()
-
-# When trying to do a connect on a non-blocking socket, some outcomes
-# are expected.  Set _CONNECT_IN_PROGRESS to the errno value(s) expected
-# when an initial connect can't complete immediately.  Set _CONNECT_OK
-# to the errno value(s) expected if the connect succeeds *or* if it's
-# already connected (our code can attempt redundant connects).
-if hasattr(errno, "WSAEWOULDBLOCK"):    # Windows
-    # Caution:  The official Winsock docs claim that WSAEALREADY should be
-    # treated as yet another "in progress" indicator, but we've never
-    # seen this.
-    _CONNECT_IN_PROGRESS = (errno.WSAEWOULDBLOCK,)
-    # Win98: WSAEISCONN; Win2K: WSAEINVAL
-    _CONNECT_OK          = (0, errno.WSAEISCONN, errno.WSAEINVAL)
-else:                                   # Unix
-    _CONNECT_IN_PROGRESS = (errno.EINPROGRESS,)
-    _CONNECT_OK          = (0, errno.EISCONN)
-
-class ConnectThread(threading.Thread):
-    """Thread that tries to connect to server given one or more addresses.
-
-    The thread is passed a ConnectionManager and the manager's client
-    as arguments.  It calls testConnection() on the client when a
-    socket connects; that should return 1 or 0 indicating whether this
-    is a preferred or a fallback connection.  It may also raise an
-    exception, in which case the connection is abandoned.
-
-    The thread will continue to run, attempting connections, until a
-    preferred connection is seen and successfully handed over to the
-    manager and client.
-
-    As soon as testConnection() finds a preferred connection, or after
-    all sockets have been tried and at least one fallback connection
-    has been seen, notifyConnected(connection) is called on the client
-    and connect_done() on the manager.  If this was a preferred
-    connection, the thread then exits; otherwise, it keeps trying
-    until it gets a preferred connection, and then reconnects the
-    client using that connection.
-
-    """
-
-    __super_init = threading.Thread.__init__
-
-    # We don't expect clients to call any methods of this Thread other
-    # than close() and those defined by the Thread API.
-
-    def __init__(self, mgr, client, addrlist, tmin, tmax):
-        self.__super_init(name="Connect(%s)" % addrlist)
-        self.mgr = mgr
-        self.client = client
-        self.addrlist = addrlist
-        self.tmin = tmin
-        self.tmax = tmax
-        self.stopped = 0
-        self.one_attempt = threading.Event()
-        # A ConnectThread keeps track of whether it has finished a
-        # call to try_connecting().  This allows the ConnectionManager
-        # to make an attempt to connect right away, but not block for
-        # too long if the server isn't immediately available.
-
-    def stop(self):
-        self.stopped = 1
-
-    def run(self):
-        delay = self.tmin
-        success = 0
-        # Don't wait too long the first time.
-        # TODO: make timeout configurable?
-        attempt_timeout = 5
-        while not self.stopped:
-            success = self.try_connecting(attempt_timeout)
-            if not self.one_attempt.isSet():
-                self.one_attempt.set()
-                attempt_timeout = 75
-            if success > 0:
-                break
-            time.sleep(delay)
-            if self.mgr.is_connected():
-                log("CT: still trying to replace fallback connection",
-                    level=logging.INFO)
-            delay = min(delay*2, self.tmax)
-        log("CT: exiting thread: %s" % self.getName())
-
-    def try_connecting(self, timeout):
-        """Try connecting to all self.addrlist addresses.
-
-        Return 1 if a preferred connection was found; 0 if no
-        connection was found; and -1 if a fallback connection was
-        found.
-
-        If no connection is found within timeout seconds, return 0.
-        """
-        log("CT: attempting to connect on %d sockets" % len(self.addrlist))
-        deadline = time.time() + timeout
-        wrappers = self._create_wrappers()
-        for wrap in wrappers.keys():
-            if wrap.state == "notified":
-                return 1
-        try:
-            if time.time() > deadline:
-                return 0
-            r = self._connect_wrappers(wrappers, deadline)
-            if r is not None:
-                return r
-            if time.time() > deadline:
-                return 0
-            r = self._fallback_wrappers(wrappers, deadline)
-            if r is not None:
-                return r
-            # Alas, no luck.
-            assert not wrappers
-        finally:
-            for wrap in wrappers.keys():
-                wrap.close()
-            del wrappers
-        return 0
-
-    def _create_wrappers(self):
-        # Create socket wrappers
-        wrappers = {}  # keys are active wrappers
-        for domain, addr in self.addrlist:
-            wrap = ConnectWrapper(domain, addr, self.mgr, self.client)
-            wrap.connect_procedure()
-            if wrap.state == "notified":
-                for w in wrappers.keys():
-                    w.close()
-                return {wrap: wrap}
-            if wrap.state != "closed":
-                wrappers[wrap] = wrap
-        return wrappers
-
-    def _connect_wrappers(self, wrappers, deadline):
-        # Next wait until they all actually connect (or fail)
-        # The deadline is necessary, because we'd wait forever if a
-        # sockets never connects or fails.
-        while wrappers:
-            if self.stopped:
-                for wrap in wrappers.keys():
-                    wrap.close()
-                return 0
-            # Select connecting wrappers
-            connecting = [wrap
-                          for wrap in wrappers.keys()
-                          if wrap.state == "connecting"]
-            if not connecting:
-                break
-            if time.time() > deadline:
-                break
-            try:
-                r, w, x = select.select([], connecting, connecting, 1.0)
-                log("CT: select() %d, %d, %d" % tuple(map(len, (r,w,x))))
-            except select.error, msg:
-                log("CT: select failed; msg=%s" % str(msg),
-                    level=logging.WARNING)
-                continue
-            # Exceptable wrappers are in trouble; close these suckers
-            for wrap in x:
-                log("CT: closing troubled socket %s" % str(wrap.addr))
-                del wrappers[wrap]
-                wrap.close()
-            # Writable sockets are connected
-            for wrap in w:
-                wrap.connect_procedure()
-                if wrap.state == "notified":
-                    del wrappers[wrap] # Don't close this one
-                    for wrap in wrappers.keys():
-                        wrap.close()
-                    return 1
-                if wrap.state == "closed":
-                    del wrappers[wrap]
-
-    def _fallback_wrappers(self, wrappers, deadline):
-        # If we've got wrappers left at this point, they're fallback
-        # connections.  Try notifying them until one succeeds.
-        for wrap in wrappers.keys():
-            assert wrap.state == "tested" and wrap.preferred == 0
-            if self.mgr.is_connected():
-                wrap.close()
-            else:
-                wrap.notify_client()
-                if wrap.state == "notified":
-                    del wrappers[wrap] # Don't close this one
-                    for wrap in wrappers.keys():
-                        wrap.close()
-                    return -1
-            assert wrap.state == "closed"
-            del wrappers[wrap]
-
-            # TODO: should check deadline
-
-
-class ConnectWrapper:
-    """An object that handles the connection procedure for one socket.
-
-    This is a little state machine with states:
-        closed
-        opened
-        connecting
-        connected
-        tested
-        notified
-    """
-
-    def __init__(self, domain, addr, mgr, client):
-        """Store arguments and create non-blocking socket."""
-        self.domain = domain
-        self.addr = addr
-        self.mgr = mgr
-        self.client = client
-        # These attributes are part of the interface
-        self.state = "closed"
-        self.sock = None
-        self.conn = None
-        self.preferred = 0
-        log("CW: attempt to connect to %s" % repr(addr))
-        try:
-            self.sock = socket.socket(domain, socket.SOCK_STREAM)
-        except socket.error, err:
-            log("CW: can't create socket, domain=%s: %s" % (domain, err),
-                level=logging.ERROR)
-            self.close()
-            return
-        self.sock.setblocking(0)
-        self.state = "opened"
-
-    def connect_procedure(self):
-        """Call sock.connect_ex(addr) and interpret result."""
-        if self.state in ("opened", "connecting"):
-            try:
-                err = self.sock.connect_ex(self.addr)
-            except socket.error, msg:
-                log("CW: connect_ex(%r) failed: %s" % (self.addr, msg),
-                    level=logging.ERROR)
-                self.close()
-                return
-            log("CW: connect_ex(%s) returned %s" %
-                (self.addr, errno.errorcode.get(err) or str(err)))
-            if err in _CONNECT_IN_PROGRESS:
-                self.state = "connecting"
-                return
-            if err not in _CONNECT_OK:
-                log("CW: error connecting to %s: %s" %
-                    (self.addr, errno.errorcode.get(err) or str(err)),
-                    level=logging.WARNING)
-                self.close()
-                return
-            self.state = "connected"
-        if self.state == "connected":
-            self.test_connection()
-
-    def test_connection(self):
-        """Establish and test a connection at the zrpc level.
-
-        Call the client's testConnection(), giving the client a chance
-        to do app-level check of the connection.
-        """
-        self.conn = ManagedClientConnection(self.sock, self.addr,
-                                            self.client, self.mgr)
-        self.sock = None # The socket is now owned by the connection
-        try:
-            self.preferred = self.client.testConnection(self.conn)
-            self.state = "tested"
-        except ReadOnlyError:
-            log("CW: ReadOnlyError in testConnection (%s)" % repr(self.addr))
-            self.close()
-            return
-        except:
-            log("CW: error in testConnection (%s)" % repr(self.addr),
-                level=logging.ERROR, exc_info=True)
-            self.close()
-            return
-        if self.preferred:
-            self.notify_client()
-
-    def notify_client(self):
-        """Call the client's notifyConnected().
-
-        If this succeeds, call the manager's connect_done().
-
-        If the client is already connected, we assume it's a fallback
-        connection, and the new connection must be a preferred
-        connection.  The client will close the old connection.
-        """
-        try:
-            self.client.notifyConnected(self.conn)
-        except:
-            log("CW: error in notifyConnected (%s)" % repr(self.addr),
-                level=logging.ERROR, exc_info=True)
-            self.close()
-            return
-        self.state = "notified"
-        self.mgr.connect_done(self.conn, self.preferred)
-
-    def close(self):
-        """Close the socket and reset everything."""
-        self.state = "closed"
-        self.mgr = self.client = None
-        self.preferred = 0
-        if self.conn is not None:
-            # Closing the ZRPC connection will eventually close the
-            # socket, somewhere in asyncore.  Guido asks: Why do we care?
-            self.conn.close()
-            self.conn = None
-        if self.sock is not None:
-            self.sock.close()
-            self.sock = None
-
-    def fileno(self):
-        return self.sock.fileno()
diff --git a/branches/bug1734/src/ZEO/zrpc/connection.py b/branches/bug1734/src/ZEO/zrpc/connection.py
deleted file mode 100644
index d9c4ea89..00000000
--- a/branches/bug1734/src/ZEO/zrpc/connection.py
+++ /dev/null
@@ -1,781 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import asyncore
-import errno
-import select
-import sys
-import threading
-import types
-import logging
-
-import ThreadedAsync
-from ZEO.zrpc import smac
-from ZEO.zrpc.error import ZRPCError, DisconnectedError
-from ZEO.zrpc.marshal import Marshaller
-from ZEO.zrpc.trigger import trigger
-from ZEO.zrpc.log import short_repr, log
-from ZODB.loglevels import BLATHER, TRACE
-
-REPLY = ".reply" # message name used for replies
-ASYNC = 1
-
-class Delay:
-    """Used to delay response to client for synchronous calls.
-
-    When a synchronous call is made and the original handler returns
-    without handling the call, it returns a Delay object that prevents
-    the mainloop from sending a response.
-    """
-
-    def set_sender(self, msgid, send_reply, return_error):
-        self.msgid = msgid
-        self.send_reply = send_reply
-        self.return_error = return_error
-
-    def reply(self, obj):
-        self.send_reply(self.msgid, obj)
-
-    def error(self, exc_info):
-        log("Error raised in delayed method", logging.ERROR, exc_info=True)
-        self.return_error(self.msgid, 0, *exc_info[:2])
-
-class MTDelay(Delay):
-
-    def __init__(self):
-        self.ready = threading.Event()
-
-    def set_sender(self, msgid, send_reply, return_error):
-        Delay.set_sender(self, msgid, send_reply, return_error)
-        self.ready.set()
-
-    def reply(self, obj):
-        self.ready.wait()
-        Delay.reply(self, obj)
-
-    def error(self, exc_info):
-        self.ready.wait()
-        Delay.error(self, exc_info)
-
-# PROTOCOL NEGOTIATION
-#
-# The code implementing protocol version 2.0.0 (which is deployed
-# in the field and cannot be changed) *only* talks to peers that
-# send a handshake indicating protocol version 2.0.0.  In that
-# version, both the client and the server immediately send out
-# their protocol handshake when a connection is established,
-# without waiting for their peer, and disconnect when a different
-# handshake is receive.
-#
-# The new protocol uses this to enable new clients to talk to
-# 2.0.0 servers.  In the new protocol:
-#
-#    The server sends its protocol handshake to the client at once.
-#
-#    The client waits until it receives the server's protocol handshake
-#    before sending its own handshake.  The client sends the lower of its
-#    own protocol version and the server protocol version, allowing it to
-#    talk to servers using later protocol versions (2.0.2 and higher) as
-#    well:  the effective protocol used will be the lower of the client
-#    and server protocol.
-#
-# [Ugly details:  In order to treat the first received message (protocol
-#  handshake) differently than all later messages, both client and server
-#  start by patching their message_input() method to refer to their
-#  recv_handshake() method instead.  In addition, the client has to arrange
-#  to queue (delay) outgoing messages until it receives the server's
-#  handshake, so that the first message the client sends to the server is
-#  the client's handshake.  This multiply-special treatment of the first
-#  message is delicate, and several asyncore and thread subtleties were
-#  handled unsafely before ZODB 3.2.6.
-# ]
-#
-# The ZEO modules ClientStorage and ServerStub have backwards
-# compatibility code for dealing with the previous version of the
-# protocol.  The client accepts the old version of some messages,
-# and will not send new messages when talking to an old server.
-#
-# As long as the client hasn't sent its handshake, it can't send
-# anything else; output messages are queued during this time.
-# (Output can happen because the connection testing machinery can
-# start sending requests before the handshake is received.)
-#
-# UPGRADING FROM ZEO 2.0.0 TO NEWER VERSIONS:
-#
-# Because a new client can talk to an old server, but not vice
-# versa, all clients should be upgraded before upgrading any
-# servers.  Protocol upgrades beyond 2.0.1 will not have this
-# restriction, because clients using protocol 2.0.1 or later can
-# talk to both older and newer servers.
-#
-# No compatibility with protocol version 1 is provided.
-
-# Connection is abstract (it must be derived from).  ManagedServerConnection
-# and ManagedClientConnection are the concrete subclasses.  They need to
-# supply a handshake() method appropriate for their role in protocol
-# negotiation.
-
-class Connection(smac.SizedMessageAsyncConnection, object):
-    """Dispatcher for RPC on object on both sides of socket.
-
-    The connection supports synchronous calls, which expect a return,
-    and asynchronous calls, which do not.
-
-    It uses the Marshaller class to handle encoding and decoding of
-    method calls and arguments.  Marshaller uses pickle to encode
-    arbitrary Python objects.  The code here doesn't ever see the wire
-    format.
-
-    A Connection is designed for use in a multithreaded application,
-    where a synchronous call must block until a response is ready.
-
-    A socket connection between a client and a server allows either
-    side to invoke methods on the other side.  The processes on each
-    end of the socket use a Connection object to manage communication.
-
-    The Connection deals with decoded RPC messages.  They are
-    represented as four-tuples containing: msgid, flags, method name,
-    and a tuple of method arguments.
-
-    The msgid starts at zero and is incremented by one each time a
-    method call message is sent.  Each side of the connection has a
-    separate msgid state.
-
-    When one side of the connection (the client) calls a method, it
-    sends a message with a new msgid.  The other side (the server),
-    replies with a message that has the same msgid, the string
-    ".reply" (the global variable REPLY) as the method name, and the
-    actual return value in the args position.  Note that each side of
-    the Connection can initiate a call, in which case it will be the
-    client for that particular call.
-
-    The protocol also supports asynchronous calls.  The client does
-    not wait for a return value for an asynchronous call.  The only
-    defined flag is ASYNC.  If a method call message has the ASYNC
-    flag set, the server will raise an exception.
-
-    If a method call raises an Exception, the exception is propagated
-    back to the client via the REPLY message.  The client side will
-    raise any exception it receives instead of returning the value to
-    the caller.
-    """
-
-    __super_init = smac.SizedMessageAsyncConnection.__init__
-    __super_close = smac.SizedMessageAsyncConnection.close
-    __super_setSessionKey = smac.SizedMessageAsyncConnection.setSessionKey
-
-    # Protocol variables:
-    #
-    # oldest_protocol_version -- the oldest protocol version we support
-    # protocol_version -- the newest protocol version we support; preferred
-
-    oldest_protocol_version = "Z200"
-    protocol_version = "Z201"
-
-    # Protocol history:
-    #
-    # Z200 -- Original ZEO 2.0 protocol
-    #
-    # Z201 -- Added invalidateTransaction() to client.
-    #         Renamed several client methods.
-    #         Added several sever methods:
-    #             lastTransaction()
-    #             getAuthProtocol() and scheme-specific authentication methods
-    #             getExtensionMethods().
-    #             getInvalidations().
-
-    # Client constructor passes 'C' for tag, server constructor 'S'.  This
-    # is used in log messages.
-    def __init__(self, sock, addr, obj, tag):
-        self.obj = None
-        self.marshal = Marshaller()
-        self.closed = False
-        self.peer_protocol_version = None # set in recv_handshake()
-
-        assert tag in "CS"
-        self.logger = logging.getLogger('ZEO.zrpc.Connection(%c)' % tag)
-        if isinstance(addr, types.TupleType):
-            self.log_label = "(%s:%d) " % addr
-        else:
-            self.log_label = "(%s) " % addr
-
-        # Supply our own socket map, so that we don't get registered with
-        # the asyncore socket map just yet.  The initial protocol messages
-        # are treated very specially, and we dare not get invoked by asyncore
-        # before that special-case setup is complete.  Some of that setup
-        # occurs near the end of this constructor, and the rest is done by
-        # a concrete subclass's handshake() method.  Unfortunately, because
-        # we ultimately derive from asyncore.dispatcher, it's not possible
-        # to invoke the superclass constructor without asyncore stuffing
-        # us into _some_ socket map.
-        ourmap = {}
-        self.__super_init(sock, addr, map=ourmap)
-
-        # A Connection either uses asyncore directly or relies on an
-        # asyncore mainloop running in a separate thread.  If
-        # thr_async is true, then the mainloop is running in a
-        # separate thread.  If thr_async is true, then the asyncore
-        # trigger (self.trigger) is used to notify that thread of
-        # activity on the current thread.
-        self.thr_async = False
-        self.trigger = None
-        self._prepare_async()
-
-        # The singleton dict is used in synchronous mode when a method
-        # needs to call into asyncore to try to force some I/O to occur.
-        # The singleton dict is a socket map containing only this object.
-        self._singleton = {self._fileno: self}
-
-        # msgid_lock guards access to msgid
-        self.msgid = 0
-        self.msgid_lock = threading.Lock()
-
-        # replies_cond is used to block when a synchronous call is
-        # waiting for a response
-        self.replies_cond = threading.Condition()
-        self.replies = {}
-
-        # waiting_for_reply is used internally to indicate whether
-        # a call is in progress.  setting a session key is deferred
-        # until after the call returns.
-        self.waiting_for_reply = False
-        self.delay_sesskey = None
-        self.register_object(obj)
-
-        # The first message we see is a protocol handshake.  message_input()
-        # is temporarily replaced by recv_handshake() to treat that message
-        # specially.  revc_handshake() does "del self.message_input", which
-        # uncovers the normal message_input() method thereafter.
-        self.message_input = self.recv_handshake
-
-        # Server and client need to do different things for protocol
-        # negotiation, and handshake() is implemented differently in each.
-        self.handshake()
-
-        # Now it's safe to register with asyncore's socket map; it was not
-        # safe before message_input was replaced, or before handshake() was
-        # invoked.
-        # Obscure:  in Python 2.4, the base asyncore.dispatcher class grew
-        # a ._map attribute, which is used instead of asyncore's global
-        # socket map when ._map isn't None.  Because we passed `ourmap` to
-        # the base class constructor above, in 2.4 asyncore believes we want
-        # to use `ourmap` instead of the global socket map -- but we don't.
-        # So we have to replace our ._map with the global socket map, and
-        # update the global socket map with `ourmap`.  Replacing our ._map
-        # isn't necessary before Python 2.4, but doesn't hurt then (it just
-        # gives us an unused attribute in 2.3); updating the global socket
-        # map is necessary regardless of Python version.
-        self._map = asyncore.socket_map
-        asyncore.socket_map.update(ourmap)
-
-    def __repr__(self):
-        return "<%s %s>" % (self.__class__.__name__, self.addr)
-
-    __str__ = __repr__ # Defeat asyncore's dreaded __getattr__
-
-    def log(self, message, level=BLATHER, exc_info=False):
-        self.logger.log(level, self.log_label + message, exc_info=exc_info)
-
-    def close(self):
-        if self.closed:
-            return
-        self._singleton.clear()
-        self.closed = True
-        self.close_trigger()
-        self.__super_close()
-
-    def close_trigger(self):
-        # Overridden by ManagedClientConnection.
-        if self.trigger is not None:
-            self.trigger.close()
-
-    def register_object(self, obj):
-        """Register obj as the true object to invoke methods on."""
-        self.obj = obj
-
-    # Subclass must implement.  handshake() is called by the constructor,
-    # near its end, but before self is added to asyncore's socket map.
-    # When a connection is created the first message sent is a 4-byte
-    # protocol version.  This allows the protocol to evolve over time, and
-    # lets servers handle clients using multiple versions of the protocol.
-    # In general, the server's handshake() just needs to send the server's
-    # preferred protocol; the client's also needs to queue (delay) outgoing
-    # messages until it sees the handshake from the server.
-    def handshake(self):
-        raise NotImplementedError
-
-    # Replaces message_input() for the first message received.  Records the
-    # protocol sent by the peer in `peer_protocol_version`, restores the
-    # normal message_input() method, and raises an exception if the peer's
-    # protocol is unacceptable.  That's all the server needs to do.  The
-    # client needs to do additional work in response to the server's
-    # handshake, and extends this method.
-    def recv_handshake(self, proto):
-        # Extended by ManagedClientConnection.
-        del self.message_input  # uncover normal-case message_input()
-        self.peer_protocol_version = proto
-        if self.oldest_protocol_version <= proto <= self.protocol_version:
-            self.log("received handshake %r" % proto, level=logging.INFO)
-        else:
-            self.log("bad handshake %s" % short_repr(proto),
-                     level=logging.ERROR)
-            raise ZRPCError("bad handshake %r" % proto)
-
-    def message_input(self, message):
-        """Decode an incoming message and dispatch it"""
-        # If something goes wrong during decoding, the marshaller
-        # will raise an exception.  The exception will ultimately
-        # result in asycnore calling handle_error(), which will
-        # close the connection.
-        msgid, flags, name, args = self.marshal.decode(message)
-
-        if __debug__:
-            self.log("recv msg: %s, %s, %s, %s" % (msgid, flags, name,
-                                                   short_repr(args)),
-                     level=TRACE)
-        if name == REPLY:
-            self.handle_reply(msgid, flags, args)
-        else:
-            self.handle_request(msgid, flags, name, args)
-
-    def handle_reply(self, msgid, flags, args):
-        if __debug__:
-            self.log("recv reply: %s, %s, %s"
-                     % (msgid, flags, short_repr(args)), level=TRACE)
-        self.replies_cond.acquire()
-        try:
-            self.replies[msgid] = flags, args
-            self.replies_cond.notifyAll()
-        finally:
-            self.replies_cond.release()
-
-    def handle_request(self, msgid, flags, name, args):
-        if not self.check_method(name):
-            msg = "Invalid method name: %s on %s" % (name, repr(self.obj))
-            raise ZRPCError(msg)
-        if __debug__:
-            self.log("calling %s%s" % (name, short_repr(args)),
-                     level=logging.DEBUG)
-
-        meth = getattr(self.obj, name)
-        try:
-            self.waiting_for_reply = True
-            try:
-                ret = meth(*args)
-            finally:
-                self.waiting_for_reply = False
-        except (SystemExit, KeyboardInterrupt):
-            raise
-        except Exception, msg:
-            self.log("%s() raised exception: %s" % (name, msg), logging.INFO,
-                     exc_info=True)
-            error = sys.exc_info()[:2]
-            return self.return_error(msgid, flags, *error)
-
-        if flags & ASYNC:
-            if ret is not None:
-                raise ZRPCError("async method %s returned value %s" %
-                                (name, short_repr(ret)))
-        else:
-            if __debug__:
-                self.log("%s returns %s" % (name, short_repr(ret)),
-                         logging.DEBUG)
-            if isinstance(ret, Delay):
-                ret.set_sender(msgid, self.send_reply, self.return_error)
-            else:
-                self.send_reply(msgid, ret)
-
-        if self.delay_sesskey:
-            self.__super_setSessionKey(self.delay_sesskey)
-            self.delay_sesskey = None
-
-    def handle_error(self):
-        if sys.exc_info()[0] == SystemExit:
-            raise sys.exc_info()
-        self.log("Error caught in asyncore",
-                 level=logging.ERROR, exc_info=True)
-        self.close()
-
-    def check_method(self, name):
-        # TODO:  This is hardly "secure".
-        if name.startswith('_'):
-            return None
-        return hasattr(self.obj, name)
-
-    def send_reply(self, msgid, ret):
-        try:
-            msg = self.marshal.encode(msgid, 0, REPLY, ret)
-        except self.marshal.errors:
-            try:
-                r = short_repr(ret)
-            except:
-                r = "<unreprable>"
-            err = ZRPCError("Couldn't pickle return %.100s" % r)
-            msg = self.marshal.encode(msgid, 0, REPLY, (ZRPCError, err))
-        self.message_output(msg)
-        self.poll()
-
-    def return_error(self, msgid, flags, err_type, err_value):
-        if flags & ASYNC:
-            self.log("Asynchronous call raised exception: %s" % self,
-                     level=logging.ERROR, exc_info=True)
-            return
-        if type(err_value) is not types.InstanceType:
-            err_value = err_type, err_value
-
-        try:
-            msg = self.marshal.encode(msgid, 0, REPLY, (err_type, err_value))
-        except self.marshal.errors:
-            try:
-                r = short_repr(err_value)
-            except:
-                r = "<unreprable>"
-            err = ZRPCError("Couldn't pickle error %.100s" % r)
-            msg = self.marshal.encode(msgid, 0, REPLY, (ZRPCError, err))
-        self.message_output(msg)
-        self.poll()
-
-    def setSessionKey(self, key):
-        if self.waiting_for_reply:
-            self.delay_sesskey = key
-        else:
-            self.__super_setSessionKey(key)
-
-    # The next two public methods (call and callAsync) are used by
-    # clients to invoke methods on remote objects
-
-    def send_call(self, method, args, flags):
-        # send a message and return its msgid
-        self.msgid_lock.acquire()
-        try:
-            msgid = self.msgid
-            self.msgid = self.msgid + 1
-        finally:
-            self.msgid_lock.release()
-        if __debug__:
-            self.log("send msg: %d, %d, %s, ..." % (msgid, flags, method),
-                     level=TRACE)
-        buf = self.marshal.encode(msgid, flags, method, args)
-        self.message_output(buf)
-        return msgid
-
-    def call(self, method, *args):
-        if self.closed:
-            raise DisconnectedError()
-        msgid = self.send_call(method, args, 0)
-        r_flags, r_args = self.wait(msgid)
-        if (isinstance(r_args, types.TupleType) and len(r_args) > 1
-            and type(r_args[0]) == types.ClassType
-            and issubclass(r_args[0], Exception)):
-            inst = r_args[1]
-            raise inst # error raised by server
-        else:
-            return r_args
-
-    # For testing purposes, it is useful to begin a synchronous call
-    # but not block waiting for its response.  Since these methods are
-    # used for testing they can assume they are not in async mode and
-    # call asyncore.poll() directly to get the message out without
-    # also waiting for the reply.
-
-    def _deferred_call(self, method, *args):
-        if self.closed:
-            raise DisconnectedError()
-        msgid = self.send_call(method, args, 0)
-        asyncore.poll(0.01, self._singleton)
-        return msgid
-
-    def _deferred_wait(self, msgid):
-        r_flags, r_args = self.wait(msgid)
-        if (isinstance(r_args, types.TupleType)
-            and type(r_args[0]) == types.ClassType
-            and issubclass(r_args[0], Exception)):
-            inst = r_args[1]
-            raise inst # error raised by server
-        else:
-            return r_args
-
-    def callAsync(self, method, *args):
-        if self.closed:
-            raise DisconnectedError()
-        self.send_call(method, args, ASYNC)
-        self.poll()
-
-    def callAsyncNoPoll(self, method, *args):
-        # Like CallAsync but doesn't poll.  This exists so that we can
-        # send invalidations atomically to all clients without
-        # allowing any client to sneak in a load request.
-        if self.closed:
-            raise DisconnectedError()
-        self.send_call(method, args, ASYNC)
-
-    # handle IO, possibly in async mode
-
-    def _prepare_async(self):
-        self.thr_async = False
-        ThreadedAsync.register_loop_callback(self.set_async)
-        # TODO:  If we are not in async mode, this will cause dead
-        # Connections to be leaked.
-
-    def set_async(self, map):
-        self.trigger = trigger()
-        self.thr_async = True
-
-    def is_async(self):
-        # Overridden by ManagedConnection
-        if self.thr_async:
-            return 1
-        else:
-            return 0
-
-    def _pull_trigger(self, tryagain=10):
-        try:
-            self.trigger.pull_trigger()
-        except OSError:
-            self.trigger.close()
-            self.trigger = trigger()
-            if tryagain > 0:
-                self._pull_trigger(tryagain=tryagain-1)
-
-    def wait(self, msgid):
-        """Invoke asyncore mainloop and wait for reply."""
-        if __debug__:
-            self.log("wait(%d), async=%d" % (msgid, self.is_async()),
-                     level=TRACE)
-        if self.is_async():
-            self._pull_trigger()
-
-        # Delay used when we call asyncore.poll() directly.
-        # Start with a 1 msec delay, double until 1 sec.
-        delay = 0.001
-
-        self.replies_cond.acquire()
-        try:
-            while 1:
-                if self.closed:
-                    raise DisconnectedError()
-                reply = self.replies.get(msgid)
-                if reply is not None:
-                    del self.replies[msgid]
-                    if __debug__:
-                        self.log("wait(%d): reply=%s" %
-                                 (msgid, short_repr(reply)), level=TRACE)
-                    return reply
-                if self.is_async():
-                    self.replies_cond.wait(10.0)
-                else:
-                    self.replies_cond.release()
-                    try:
-                        try:
-                            if __debug__:
-                                self.log("wait(%d): asyncore.poll(%s)" %
-                                         (msgid, delay), level=TRACE)
-                            asyncore.poll(delay, self._singleton)
-                            if delay < 1.0:
-                                delay += delay
-                        except select.error, err:
-                            self.log("Closing.  asyncore.poll() raised %s."
-                                     % err, level=BLATHER)
-                            self.close()
-                    finally:
-                        self.replies_cond.acquire()
-        finally:
-            self.replies_cond.release()
-
-    def flush(self):
-        """Invoke poll() until the output buffer is empty."""
-        if __debug__:
-            self.log("flush")
-        while self.writable():
-            self.poll()
-
-    def poll(self):
-        """Invoke asyncore mainloop to get pending message out."""
-        if __debug__:
-            self.log("poll(), async=%d" % self.is_async(), level=TRACE)
-        if self.is_async():
-            self._pull_trigger()
-        else:
-            asyncore.poll(0.0, self._singleton)
-
-    def pending(self, timeout=0):
-        """Invoke mainloop until any pending messages are handled."""
-        if __debug__:
-            self.log("pending(), async=%d" % self.is_async(), level=TRACE)
-        if self.is_async():
-            return
-        # Inline the asyncore poll() function to know whether any input
-        # was actually read.  Repeat until no input is ready.
-
-        # Pending does reads and writes.  In the case of server
-        # startup, we may need to write out zeoVerify() messages.
-        # Always check for read status, but don't check for write status
-        # only there is output to do.  Only continue in this loop as
-        # long as there is data to read.
-        r = r_in = [self._fileno]
-        x_in = []
-        while r and not self.closed:
-            if self.writable():
-                w_in = [self._fileno]
-            else:
-                w_in = []
-            try:
-                r, w, x = select.select(r_in, w_in, x_in, timeout)
-            except select.error, err:
-                if err[0] == errno.EINTR:
-                    timeout = 0
-                    continue
-                else:
-                    raise
-            else:
-                # Make sure any subsequent select does not block.  The
-                # loop is only intended to make sure all incoming data is
-                # returned.
-
-                # Insecurity:  What if the server sends a lot of
-                # invalidations, such that pending never finishes?  Seems
-                # unlikely, but possible.
-                timeout = 0
-            if r:
-                try:
-                    self.handle_read_event()
-                except asyncore.ExitNow:
-                    raise
-                except:
-                    self.handle_error()
-            if w:
-                try:
-                    self.handle_write_event()
-                except asyncore.ExitNow:
-                    raise
-                except:
-                    self.handle_error()
-
-class ManagedServerConnection(Connection):
-    """Server-side Connection subclass."""
-    __super_init = Connection.__init__
-    __super_close = Connection.close
-
-    def __init__(self, sock, addr, obj, mgr):
-        self.mgr = mgr
-        self.__super_init(sock, addr, obj, 'S')
-        self.obj.notifyConnected(self)
-
-    def handshake(self):
-        # Send the server's preferred protocol to the client.
-        self.message_output(self.protocol_version)
-
-    def close(self):
-        self.obj.notifyDisconnected()
-        self.mgr.close_conn(self)
-        self.__super_close()
-
-class ManagedClientConnection(Connection):
-    """Client-side Connection subclass."""
-    __super_init = Connection.__init__
-    __super_close = Connection.close
-    base_message_output = Connection.message_output
-
-    def __init__(self, sock, addr, obj, mgr):
-        self.mgr = mgr
-
-        # We can't use the base smac's message_output directly because the
-        # client needs to queue outgoing messages until it's seen the
-        # initial protocol handshake from the server.  So we have our own
-        # message_ouput() method, and support for initial queueing.  This is
-        # a delicate design, requiring an output mutex to be wholly
-        # thread-safe.
-        # Caution:  we must set this up before calling the base class
-        # constructor, because the latter registers us with asyncore;
-        # we need to guarantee that we'll queue outgoing messages before
-        # asyncore learns about us.
-        self.output_lock = threading.Lock()
-        self.queue_output = True
-        self.queued_messages = []
-
-        self.__super_init(sock, addr, obj, tag='C')
-        self.check_mgr_async()
-
-    # Our message_ouput() queues messages until recv_handshake() gets the
-    # protocol handshake from the server.
-    def message_output(self, message):
-        self.output_lock.acquire()
-        try:
-            if self.queue_output:
-                self.queued_messages.append(message)
-            else:
-                assert not self.queued_messages
-                self.base_message_output(message)
-        finally:
-            self.output_lock.release()
-
-    def handshake(self):
-        # The client waits to see the server's handshake.  Outgoing messages
-        # are queued for the duration.  The client will send its own
-        # handshake after the server's handshake is seen, in recv_handshake()
-        # below.  It will then send any messages queued while waiting.
-        assert self.queue_output # the constructor already set this
-
-    def recv_handshake(self, proto):
-        # The protocol to use is the older of our and the server's preferred
-        # protocols.
-        proto = min(proto, self.protocol_version)
-
-        # Restore the normal message_input method, and raise an exception
-        # if the protocol version is too old.
-        Connection.recv_handshake(self, proto)
-
-        # Tell the server the protocol in use, then send any messages that
-        # were queued while waiting to hear the server's protocol, and stop
-        # queueing messages.
-        self.output_lock.acquire()
-        try:
-            self.base_message_output(proto)
-            for message in self.queued_messages:
-                self.base_message_output(message)
-            self.queued_messages = []
-            self.queue_output = False
-        finally:
-            self.output_lock.release()
-
-    # Defer the ThreadedAsync work to the manager.
-
-    def close_trigger(self):
-        # the manager should actually close the trigger
-        del self.trigger
-
-    def set_async(self, map):
-        pass
-
-    def _prepare_async(self):
-        # Don't do the register_loop_callback that the superclass does
-        pass
-
-    def check_mgr_async(self):
-        if not self.thr_async and self.mgr.thr_async:
-            assert self.mgr.trigger is not None, \
-                   "manager (%s) has no trigger" % self.mgr
-            self.thr_async = True
-            self.trigger = self.mgr.trigger
-            return 1
-        return 0
-
-    def is_async(self):
-        # TODO: could the check_mgr_async() be avoided on each test?
-        if self.thr_async:
-            return 1
-        return self.check_mgr_async()
-
-    def close(self):
-        self.mgr.close_conn(self)
-        self.__super_close()
diff --git a/branches/bug1734/src/ZEO/zrpc/error.py b/branches/bug1734/src/ZEO/zrpc/error.py
deleted file mode 100644
index 9de3662c..00000000
--- a/branches/bug1734/src/ZEO/zrpc/error.py
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-from ZODB import POSException
-from ZEO.Exceptions import ClientDisconnected
-
-class ZRPCError(POSException.StorageError):
-    pass
-
-class DisconnectedError(ZRPCError, ClientDisconnected):
-    """The database storage is disconnected from the storage server.
-
-    The error occurred because a problem in the low-level RPC connection,
-    or because the connection was closed.
-    """
-
-    # This subclass is raised when zrpc catches the error.
diff --git a/branches/bug1734/src/ZEO/zrpc/log.py b/branches/bug1734/src/ZEO/zrpc/log.py
deleted file mode 100644
index a3554efd..00000000
--- a/branches/bug1734/src/ZEO/zrpc/log.py
+++ /dev/null
@@ -1,77 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import os
-import threading
-import logging
-
-from ZODB.loglevels import BLATHER
-
-LOG_THREAD_ID = 0 # Set this to 1 during heavy debugging
-
-logger = logging.getLogger('ZEO.zrpc')
-
-_label = "%s" % os.getpid()
-
-def new_label():
-    global _label
-    _label = str(os.getpid())
-
-def log(message, level=BLATHER, label=None, exc_info=False):
-    label = label or _label
-    if LOG_THREAD_ID:
-        label = label + ':' + threading.currentThread().getName()
-    logger.log(level, '(%s) %s' % (label, message), exc_info=exc_info)
-
-REPR_LIMIT = 60
-
-def short_repr(obj):
-    "Return an object repr limited to REPR_LIMIT bytes."
-
-    # Some of the objects being repr'd are large strings. A lot of memory
-    # would be wasted to repr them and then truncate, so they are treated
-    # specially in this function.
-    # Also handle short repr of a tuple containing a long string.
-
-    # This strategy works well for arguments to StorageServer methods.
-    # The oid is usually first and will get included in its entirety.
-    # The pickle is near the beginning, too, and you can often fit the
-    # module name in the pickle.
-
-    if isinstance(obj, str):
-        if len(obj) > REPR_LIMIT:
-            r = repr(obj[:REPR_LIMIT])
-        else:
-            r = repr(obj)
-        if len(r) > REPR_LIMIT:
-            r = r[:REPR_LIMIT-4] + '...' + r[-1]
-        return r
-    elif isinstance(obj, (list, tuple)):
-        elts = []
-        size = 0
-        for elt in obj:
-            r = short_repr(elt)
-            elts.append(r)
-            size += len(r)
-            if size > REPR_LIMIT:
-                break
-        if isinstance(obj, tuple):
-            r = "(%s)" % (", ".join(elts))
-        else:
-            r = "[%s]" % (", ".join(elts))
-    else:
-        r = repr(obj)
-    if len(r) > REPR_LIMIT:
-        return r[:REPR_LIMIT] + '...'
-    else:
-        return r
diff --git a/branches/bug1734/src/ZEO/zrpc/marshal.py b/branches/bug1734/src/ZEO/zrpc/marshal.py
deleted file mode 100644
index 88d193ff..00000000
--- a/branches/bug1734/src/ZEO/zrpc/marshal.py
+++ /dev/null
@@ -1,79 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import cPickle
-from cStringIO import StringIO
-import types
-import logging
-
-from ZEO.zrpc.error import ZRPCError
-from ZEO.zrpc.log import log, short_repr
-
-class Marshaller:
-    """Marshal requests and replies to second across network"""
-
-    def encode(self, msgid, flags, name, args):
-        """Returns an encoded message"""
-        # (We used to have a global pickler, but that's not thread-safe. :-( )
-        # Note that args may contain very large binary pickles already; for
-        # this reason, it's important to use proto 1 (or higher) pickles here
-        # too.  For a long time, this used proto 0 pickles, and that can
-        # bloat our pickle to 4x the size (due to high-bit and control bytes
-        # being represented by \xij escapes in proto 0).
-        # Undocumented:  cPickle.Pickler accepts a lone protocol argument;
-        # pickle.py does not.
-        pickler = cPickle.Pickler(1)
-        pickler.fast = 1
-
-        # Undocumented:  pickler.dump(), for a cPickle.Pickler, takes
-        # an optional boolean argument.  When true, it returns the pickle;
-        # when false or unspecified, it returns the pickler object itself.
-        # pickle.py does none of this.
-        return pickler.dump((msgid, flags, name, args), 1)
-
-    def decode(self, msg):
-        """Decodes msg and returns its parts"""
-        unpickler = cPickle.Unpickler(StringIO(msg))
-        unpickler.find_global = find_global
-
-        try:
-            return unpickler.load() # msgid, flags, name, args
-        except:
-            log("can't decode message: %s" % short_repr(msg),
-                level=logging.ERROR)
-            raise
-
-_globals = globals()
-_silly = ('__doc__',)
-
-def find_global(module, name):
-    """Helper for message unpickler"""
-    try:
-        m = __import__(module, _globals, _globals, _silly)
-    except ImportError, msg:
-        raise ZRPCError("import error %s: %s" % (module, msg))
-
-    try:
-        r = getattr(m, name)
-    except AttributeError:
-        raise ZRPCError("module %s has no global %s" % (module, name))
-
-    safe = getattr(r, '__no_side_effects__', 0)
-    if safe:
-        return r
-
-    # TODO:  is there a better way to do this?
-    if type(r) == types.ClassType and issubclass(r, Exception):
-        return r
-
-    raise ZRPCError("Unsafe global: %s.%s" % (module, name))
diff --git a/branches/bug1734/src/ZEO/zrpc/server.py b/branches/bug1734/src/ZEO/zrpc/server.py
deleted file mode 100644
index a821b6c4..00000000
--- a/branches/bug1734/src/ZEO/zrpc/server.py
+++ /dev/null
@@ -1,59 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-import asyncore
-import socket
-import types
-
-from ZEO.zrpc.connection import Connection
-from ZEO.zrpc.log import log
-import logging
-import ThreadedAsync.LoopCallback
-
-# Export the main asyncore loop
-loop = ThreadedAsync.LoopCallback.loop
-
-class Dispatcher(asyncore.dispatcher):
-    """A server that accepts incoming RPC connections"""
-    __super_init = asyncore.dispatcher.__init__
-
-    def __init__(self, addr, factory=Connection):
-        self.__super_init()
-        self.addr = addr
-        self.factory = factory
-        self._open_socket()
-
-    def _open_socket(self):
-        if type(self.addr) == types.TupleType:
-            self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
-        else:
-            self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        self.set_reuse_addr()
-        log("listening on %s" % str(self.addr), logging.INFO)
-        self.bind(self.addr)
-        self.listen(5)
-
-    def writable(self):
-        return 0
-
-    def readable(self):
-        return 1
-
-    def handle_accept(self):
-        try:
-            sock, addr = self.accept()
-        except socket.error, msg:
-            log("accepted failed: %s" % msg)
-            return
-        c = self.factory(sock, addr)
-        log("connect from %s: %s" % (repr(addr), c))
diff --git a/branches/bug1734/src/ZEO/zrpc/smac.py b/branches/bug1734/src/ZEO/zrpc/smac.py
deleted file mode 100644
index 1d668fb3..00000000
--- a/branches/bug1734/src/ZEO/zrpc/smac.py
+++ /dev/null
@@ -1,308 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Sized Message Async Connections.
-
-This class extends the basic asyncore layer with a record-marking
-layer.  The message_output() method accepts an arbitrary sized string
-as its argument.  It sends over the wire the length of the string
-encoded using struct.pack('>I') and the string itself.  The receiver
-passes the original string to message_input().
-
-This layer also supports an optional message authentication code
-(MAC).  If a session key is present, it uses HMAC-SHA-1 to generate a
-20-byte MAC.  If a MAC is present, the high-order bit of the length
-is set to 1 and the MAC immediately follows the length.
-"""
-
-import asyncore
-import errno
-try:
-    import hmac
-except ImportError:
-    import _hmac as hmac
-import sha
-import socket
-import struct
-import threading
-import logging
-from types import StringType
-
-from ZODB.loglevels import TRACE
-
-from ZEO.zrpc.log import log, short_repr
-from ZEO.zrpc.error import DisconnectedError
-
-
-# Use the dictionary to make sure we get the minimum number of errno
-# entries.   We expect that EWOULDBLOCK == EAGAIN on most systems --
-# or that only one is actually used.
-
-tmp_dict = {errno.EWOULDBLOCK: 0,
-            errno.EAGAIN: 0,
-            errno.EINTR: 0,
-            }
-expected_socket_read_errors = tuple(tmp_dict.keys())
-
-tmp_dict = {errno.EAGAIN: 0,
-            errno.EWOULDBLOCK: 0,
-            errno.ENOBUFS: 0,
-            errno.EINTR: 0,
-            }
-expected_socket_write_errors = tuple(tmp_dict.keys())
-del tmp_dict
-
-# We chose 60000 as the socket limit by looking at the largest strings
-# that we could pass to send() without blocking.
-SEND_SIZE = 60000
-
-MAC_BIT = 0x80000000L
-
-class SizedMessageAsyncConnection(asyncore.dispatcher):
-    __super_init = asyncore.dispatcher.__init__
-    __super_close = asyncore.dispatcher.close
-
-    __closed = True # Marker indicating that we're closed
-
-    socket = None # to outwit Sam's getattr
-
-    def __init__(self, sock, addr, map=None, debug=None):
-        self.addr = addr
-        if debug is not None:
-            self._debug = debug
-        elif not hasattr(self, '_debug'):
-            self._debug = __debug__
-        # __input_lock protects __inp, __input_len, __state, __msg_size
-        self.__input_lock = threading.Lock()
-        self.__inp = None # None, a single String, or a list
-        self.__input_len = 0
-        # Instance variables __state, __msg_size and __has_mac work together:
-        #   when __state == 0:
-        #     __msg_size == 4, and the next thing read is a message size;
-        #     __has_mac is set according to the MAC_BIT in the header
-        #   when __state == 1:
-        #     __msg_size is variable, and the next thing read is a message.
-        #     __has_mac indicates if we're in MAC mode or not (and
-        #               therefore, if we need to check the mac header)
-        # The next thing read is always of length __msg_size.
-        # The state alternates between 0 and 1.
-        self.__state = 0
-        self.__has_mac = 0
-        self.__msg_size = 4
-        self.__output_lock = threading.Lock() # Protects __output
-        self.__output = []
-        self.__closed = False
-        # Each side of the connection sends and receives messages.  A
-        # MAC is generated for each message and depends on each
-        # previous MAC; the state of the MAC generator depends on the
-        # history of operations it has performed.  So the MACs must be
-        # generated in the same order they are verified.
-
-        # Each side is guaranteed to receive messages in the order
-        # they are sent, but there is no ordering constraint between
-        # message sends and receives.  If the two sides are A and B
-        # and message An indicates the nth message sent by A, then
-        # A1 A2 B1 B2 and A1 B1 B2 A2 are both legitimate total
-        # orderings of the messages.
-
-        # As a result, there must be seperate MAC generators for each
-        # side of the connection.  If not, the generator state would
-        # be different after A1 A2 B1 B2 than it would be after
-        # A1 B1 B2 A2; if the generator state was different, the MAC
-        # could not be verified.
-        self.__hmac_send = None
-        self.__hmac_recv = None
-
-        self.__super_init(sock, map)
-
-    def setSessionKey(self, sesskey):
-        log("set session key %r" % sesskey)
-        self.__hmac_send = hmac.HMAC(sesskey, digestmod=sha)
-        self.__hmac_recv = hmac.HMAC(sesskey, digestmod=sha)
-
-    def get_addr(self):
-        return self.addr
-
-    # TODO: avoid expensive getattr calls?  Can't remember exactly what
-    # this comment was supposed to mean, but it has something to do
-    # with the way asyncore uses getattr and uses if sock:
-    def __nonzero__(self):
-        return 1
-
-    def handle_read(self):
-        self.__input_lock.acquire()
-        try:
-            # Use a single __inp buffer and integer indexes to make this fast.
-            try:
-                d = self.recv(8192)
-            except socket.error, err:
-                if err[0] in expected_socket_read_errors:
-                    return
-                raise
-            if not d:
-                return
-
-            input_len = self.__input_len + len(d)
-            msg_size = self.__msg_size
-            state = self.__state
-            has_mac = self.__has_mac
-
-            inp = self.__inp
-            if msg_size > input_len:
-                if inp is None:
-                    self.__inp = d
-                elif type(self.__inp) is StringType:
-                    self.__inp = [self.__inp, d]
-                else:
-                    self.__inp.append(d)
-                self.__input_len = input_len
-                return # keep waiting for more input
-
-            # load all previous input and d into single string inp
-            if isinstance(inp, StringType):
-                inp = inp + d
-            elif inp is None:
-                inp = d
-            else:
-                inp.append(d)
-                inp = "".join(inp)
-
-            offset = 0
-            while (offset + msg_size) <= input_len:
-                msg = inp[offset:offset + msg_size]
-                offset = offset + msg_size
-                if not state:
-                    msg_size = struct.unpack(">I", msg)[0]
-                    has_mac = msg_size & MAC_BIT
-                    if has_mac:
-                        msg_size ^= MAC_BIT
-                        msg_size += 20
-                    elif self.__hmac_send:
-                        raise ValueError("Received message without MAC")
-                    state = 1
-                else:
-                    msg_size = 4
-                    state = 0
-                    # Obscure:  We call message_input() with __input_lock
-                    # held!!!  And message_input() may end up calling
-                    # message_output(), which has its own lock.  But
-                    # message_output() cannot call message_input(), so
-                    # the locking order is always consistent, which
-                    # prevents deadlock.  Also, message_input() may
-                    # take a long time, because it can cause an
-                    # incoming call to be handled.  During all this
-                    # time, the __input_lock is held.  That's a good
-                    # thing, because it serializes incoming calls.
-                    if has_mac:
-                        mac = msg[:20]
-                        msg = msg[20:]
-                        if self.__hmac_recv:
-                            self.__hmac_recv.update(msg)
-                            _mac = self.__hmac_recv.digest()
-                            if mac != _mac:
-                                raise ValueError("MAC failed: %r != %r"
-                                                 % (_mac, mac))
-                        else:
-                            log("Received MAC but no session key set")
-                    elif self.__hmac_send:
-                        raise ValueError("Received message without MAC")
-                    self.message_input(msg)
-
-            self.__state = state
-            self.__has_mac = has_mac
-            self.__msg_size = msg_size
-            self.__inp = inp[offset:]
-            self.__input_len = input_len - offset
-        finally:
-            self.__input_lock.release()
-
-    def readable(self):
-        return True
-
-    def writable(self):
-        if len(self.__output) == 0:
-            return False
-        else:
-            return True
-
-    def handle_write(self):
-        self.__output_lock.acquire()
-        try:
-            output = self.__output
-            while output:
-                # Accumulate output into a single string so that we avoid
-                # multiple send() calls, but avoid accumulating too much
-                # data.  If we send a very small string and have more data
-                # to send, we will likely incur delays caused by the
-                # unfortunate interaction between the Nagle algorithm and
-                # delayed acks.  If we send a very large string, only a
-                # portion of it will actually be delivered at a time.
-
-                l = 0
-                for i in range(len(output)):
-                    l += len(output[i])
-                    if l > SEND_SIZE:
-                        break
-
-                i += 1
-                # It is very unlikely that i will be 1.
-                v = "".join(output[:i])
-                del output[:i]
-
-                try:
-                    n = self.send(v)
-                except socket.error, err:
-                    if err[0] in expected_socket_write_errors:
-                        break # we couldn't write anything
-                    raise
-                if n < len(v):
-                    output.insert(0, v[n:])
-                    break # we can't write any more
-        finally:
-            self.__output_lock.release()
-
-    def handle_close(self):
-        self.close()
-
-    def message_output(self, message):
-        if __debug__:
-            if self._debug:
-                log("message_output %d bytes: %s hmac=%d" %
-                    (len(message), short_repr(message),
-                    self.__hmac_send and 1 or 0),
-                    level=TRACE)
-
-        if self.__closed:
-            raise DisconnectedError(
-                "This action is temporarily unavailable.<p>")
-        self.__output_lock.acquire()
-        try:
-            # do two separate appends to avoid copying the message string
-            if self.__hmac_send:
-                self.__output.append(struct.pack(">I", len(message) | MAC_BIT))
-                self.__hmac_send.update(message)
-                self.__output.append(self.__hmac_send.digest())
-            else:
-                self.__output.append(struct.pack(">I", len(message)))
-            if len(message) <= SEND_SIZE:
-                self.__output.append(message)
-            else:
-                for i in range(0, len(message), SEND_SIZE):
-                    self.__output.append(message[i:i+SEND_SIZE])
-        finally:
-            self.__output_lock.release()
-
-    def close(self):
-        if not self.__closed:
-            self.__closed = True
-            self.__super_close()
diff --git a/branches/bug1734/src/ZEO/zrpc/trigger.py b/branches/bug1734/src/ZEO/zrpc/trigger.py
deleted file mode 100644
index 451e8123..00000000
--- a/branches/bug1734/src/ZEO/zrpc/trigger.py
+++ /dev/null
@@ -1,216 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import asyncore
-import os
-import socket
-import thread
-
-if os.name == 'posix':
-
-    class trigger(asyncore.file_dispatcher):
-
-        "Wake up a call to select() running in the main thread"
-
-        # This is useful in a context where you are using Medusa's I/O
-        # subsystem to deliver data, but the data is generated by another
-        # thread.  Normally, if Medusa is in the middle of a call to
-        # select(), new output data generated by another thread will have
-        # to sit until the call to select() either times out or returns.
-        # If the trigger is 'pulled' by another thread, it should immediately
-        # generate a READ event on the trigger object, which will force the
-        # select() invocation to return.
-
-        # A common use for this facility: letting Medusa manage I/O for a
-        # large number of connections; but routing each request through a
-        # thread chosen from a fixed-size thread pool.  When a thread is
-        # acquired, a transaction is performed, but output data is
-        # accumulated into buffers that will be emptied more efficiently
-        # by Medusa. [picture a server that can process database queries
-        # rapidly, but doesn't want to tie up threads waiting to send data
-        # to low-bandwidth connections]
-
-        # The other major feature provided by this class is the ability to
-        # move work back into the main thread: if you call pull_trigger()
-        # with a thunk argument, when select() wakes up and receives the
-        # event it will call your thunk from within that thread.  The main
-        # purpose of this is to remove the need to wrap thread locks around
-        # Medusa's data structures, which normally do not need them.  [To see
-        # why this is true, imagine this scenario: A thread tries to push some
-        # new data onto a channel's outgoing data queue at the same time that
-        # the main thread is trying to remove some]
-
-        def __init__(self):
-            r, w = self._fds = os.pipe()
-            self.trigger = w
-            asyncore.file_dispatcher.__init__(self, r)
-            self.lock = thread.allocate_lock()
-            self.thunks = []
-            self._closed = 0
-
-        # Override the asyncore close() method, because it seems that
-        # it would only close the r file descriptor and not w.  The
-        # constructor calls file_dispatcher.__init__ and passes r,
-        # which would get stored in a file_wrapper and get closed by
-        # the default close.  But that would leave w open...
-
-        def close(self):
-            if not self._closed:
-                self._closed = 1
-                self.del_channel()
-                for fd in self._fds:
-                    os.close(fd)
-                self._fds = []
-
-        def __repr__(self):
-            return '<select-trigger (pipe) at %x>' % id(self)
-
-        def readable(self):
-            return 1
-
-        def writable(self):
-            return 0
-
-        def handle_connect(self):
-            pass
-
-        def handle_close(self):
-            self.close()
-
-        def pull_trigger(self, thunk=None):
-            if thunk:
-                self.lock.acquire()
-                try:
-                    self.thunks.append(thunk)
-                finally:
-                    self.lock.release()
-            os.write(self.trigger, 'x')
-
-        def handle_read(self):
-            try:
-                self.recv(8192)
-            except socket.error:
-                return
-            self.lock.acquire()
-            try:
-                for thunk in self.thunks:
-                    try:
-                        thunk()
-                    except:
-                        nil, t, v, tbinfo = asyncore.compact_traceback()
-                        print ('exception in trigger thunk:'
-                               ' (%s:%s %s)' % (t, v, tbinfo))
-                self.thunks = []
-            finally:
-                self.lock.release()
-
-else:
-
-    # TODO:  Should define a base class that has the common methods and
-    # then put the platform-specific in a subclass named trigger.
-
-    # win32-safe version
-
-    HOST = '127.0.0.1'
-    MINPORT = 19950
-    NPORTS = 50
-
-    class trigger(asyncore.dispatcher):
-
-        portoffset = 0
-
-        def __init__(self):
-            a = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            w = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
-            # set TCP_NODELAY to true to avoid buffering
-            w.setsockopt(socket.IPPROTO_TCP, 1, 1)
-
-            # tricky: get a pair of connected sockets
-            for i in range(NPORTS):
-                trigger.portoffset = (trigger.portoffset + 1) % NPORTS
-                port = MINPORT + trigger.portoffset
-                address = (HOST, port)
-                try:
-                    a.bind(address)
-                except socket.error:
-                    continue
-                else:
-                    break
-            else:
-                raise RuntimeError, 'Cannot bind trigger!'
-
-            a.listen(1)
-            w.setblocking(0)
-            try:
-                w.connect(address)
-            except:
-                pass
-            r, addr = a.accept()
-            a.close()
-            w.setblocking(1)
-            self.trigger = w
-
-            asyncore.dispatcher.__init__(self, r)
-            self.lock = thread.allocate_lock()
-            self.thunks = []
-            self._trigger_connected = 0
-            self._closed = 0
-
-        def close(self):
-            if not self._closed:
-                self._closed = 1
-                self.del_channel()
-                # self.socket is a, self.trigger is w from __init__
-                self.socket.close()
-                self.trigger.close()
-
-        def __repr__(self):
-            return '<select-trigger (loopback) at %x>' % id(self)
-
-        def readable(self):
-            return 1
-
-        def writable(self):
-            return 0
-
-        def handle_connect(self):
-            pass
-
-        def pull_trigger(self, thunk=None):
-            if thunk:
-                self.lock.acquire()
-                try:
-                    self.thunks.append(thunk)
-                finally:
-                    self.lock.release()
-            self.trigger.send('x')
-
-        def handle_read(self):
-            try:
-                self.recv(8192)
-            except socket.error:
-                return
-            self.lock.acquire()
-            try:
-                for thunk in self.thunks:
-                    try:
-                        thunk()
-                    except:
-                        nil, t, v, tbinfo = asyncore.compact_traceback()
-                        print ('exception in trigger thunk:'
-                               ' (%s:%s %s)' % (t, v, tbinfo))
-                self.thunks = []
-            finally:
-                self.lock.release()
diff --git a/branches/bug1734/src/ZODB/ActivityMonitor.py b/branches/bug1734/src/ZODB/ActivityMonitor.py
deleted file mode 100644
index 6b993929..00000000
--- a/branches/bug1734/src/ZODB/ActivityMonitor.py
+++ /dev/null
@@ -1,108 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""ZODB transfer activity monitoring
-
-$Id$"""
-
-import time
-
-
-class ActivityMonitor:
-    """ZODB load/store activity monitor
-
-    This simple implementation just keeps a small log in memory
-    and iterates over the log when getActivityAnalysis() is called.
-
-    It assumes that log entries are added in chronological sequence,
-    which is only guaranteed because DB.py holds a lock when calling
-    the closedConnection() method.
-    """
-
-    def __init__(self, history_length=3600):
-        self.history_length = history_length  # Number of seconds
-        self.log = []                     # [(time, loads, stores)]
-
-    def closedConnection(self, conn):
-        log = self.log
-        now = time.time()
-        loads, stores = conn.getTransferCounts(1)
-        log.append((now, loads, stores))
-        self.trim(now)
-
-    def trim(self, now):
-        log = self.log
-        cutoff = now - self.history_length
-        n = 0
-        loglen = len(log)
-        while n < loglen and log[n][0] < cutoff:
-            n = n + 1
-        if n:
-            del log[:n]
-
-    def setHistoryLength(self, history_length):
-        self.history_length = history_length
-        self.trim(time.time())
-
-    def getHistoryLength(self):
-        return self.history_length
-
-    def getActivityAnalysis(self, start=0, end=0, divisions=10):
-        res = []
-        now = time.time()
-        if start == 0:
-            start = now - self.history_length
-        if end == 0:
-            end = now
-        for n in range(divisions):
-            res.append({
-                'start': start + (end - start) * n / divisions,
-                'end': start + (end - start) * (n + 1) / divisions,
-                'loads': 0,
-                'stores': 0,
-                'connections': 0,
-                })
-
-        div = res[0]
-        div_end = div['end']
-        div_index = 0
-        connections = 0
-        total_loads = 0
-        total_stores = 0
-        for t, loads, stores in self.log:
-            if t < start:
-                # We could use a binary search to find the start.
-                continue
-            elif t > end:
-                # We could use a binary search to find the end also.
-                break
-            while t > div_end:
-                div['loads'] = total_loads
-                div['stores'] = total_stores
-                div['connections'] = connections
-                total_loads = 0
-                total_stores = 0
-                connections = 0
-                div_index = div_index + 1
-                if div_index < divisions:
-                    div = res[div_index]
-                    div_end = div['end']
-            connections = connections + 1
-            total_loads = total_loads + loads
-            total_stores = total_stores + stores
-
-        div['stores'] = div['stores'] + total_stores
-        div['loads'] = div['loads'] + total_loads
-        div['connections'] = div['connections'] + connections
-
-        return res
diff --git a/branches/bug1734/src/ZODB/BaseStorage.py b/branches/bug1734/src/ZODB/BaseStorage.py
deleted file mode 100644
index e090e219..00000000
--- a/branches/bug1734/src/ZODB/BaseStorage.py
+++ /dev/null
@@ -1,435 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Handy standard storage machinery
-
-$Id$
-"""
-import cPickle
-import threading
-import time
-import logging
-
-from persistent.TimeStamp import TimeStamp
-
-from ZODB import POSException
-from ZODB.utils import z64, oid_repr
-from ZODB.UndoLogCompatible import UndoLogCompatible
-
-log = logging.getLogger("ZODB.BaseStorage")
-
-class BaseStorage(UndoLogCompatible):
-    """Abstract base class that supports storage implementations.
-
-    A subclass must define the following methods:
-    load()
-    close()
-    cleanup()
-    lastSerial()
-    lastTransaction()
-
-    It must override these hooks:
-    _begin()
-    _vote()
-    _abort()
-    _finish()
-    _clear_temp()
-
-    If it stores multiple revisions, it should implement
-    loadSerial()
-    loadBefore()
-    iterator()
-
-    If the subclass wants to implement undo, it should implement the
-    multiple revision methods and:
-    loadSerial()
-    undo()
-    undoInfo()
-    undoLog()
-
-    If the subclass wants to implement versions, it must implement:
-    abortVersion()
-    commitVersion()
-    modifiedInVersion()
-    versionEmpty()
-    versions()
-
-    Each storage will have two locks that are accessed via lock
-    acquire and release methods bound to the instance.  (Yuck.)
-    _lock_acquire / _lock_release (reentrant)
-    _commit_lock_acquire / _commit_lock_release
-
-    The commit lock is acquired in tpc_begin() and released in
-    tpc_abort() and tpc_finish().  It is never acquired with the other
-    lock held.
-
-    The other lock appears to protect _oid and _transaction and
-    perhaps other things.  It is always held when load() is called, so
-    presumably the load() implementation should also acquire the lock.
-    """
-    _transaction=None # Transaction that is being committed
-    _tstatus=' '      # Transaction status, used for copying data
-    _is_read_only = False
-
-    def __init__(self, name, base=None):
-        self.__name__= name
-        log.debug("create storage %s", self.__name__)
-
-        # Allocate locks:
-        l = threading.RLock()
-        self._lock_acquire = l.acquire
-        self._lock_release = l.release
-        l = threading.Lock()
-        self._commit_lock_acquire = l.acquire
-        self._commit_lock_release = l.release
-
-        t=time.time()
-        t=self._ts=apply(TimeStamp,(time.gmtime(t)[:5]+(t%60,)))
-        self._tid = `t`
-        if base is None:
-            self._oid=z64
-        else:
-            self._oid=base._oid
-
-    def abortVersion(self, src, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-        return self._tid, []
-
-    def commitVersion(self, src, dest, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-        return self._tid, []
-
-    def close(self):
-        pass
-
-    def cleanup(self):
-        pass
-
-    def sortKey(self):
-        """Return a string that can be used to sort storage instances.
-
-        The key must uniquely identify a storage and must be the same
-        across multiple instantiations of the same storage.
-        """
-        # name may not be sufficient, e.g. ZEO has a user-definable name.
-        return self.__name__
-
-    def getName(self):
-        return self.__name__
-
-    def getSize(self):
-        return len(self)*300 # WAG!
-
-    def history(self, oid, version, length=1, filter=None):
-        pass
-
-    def modifiedInVersion(self, oid):
-        return ''
-
-    def new_oid(self, last=None):
-        # 'last' is only for internal use, not part of the public API
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if last is None:
-            self._lock_acquire()
-            try:
-                last=self._oid
-                d=ord(last[-1])
-                if d < 255: last=last[:-1]+chr(d+1)
-                else:       last=self.new_oid(last[:-1])
-                self._oid=last
-                return last
-            finally: self._lock_release()
-        else:
-            d=ord(last[-1])
-            if d < 255: return last[:-1]+chr(d+1)+'\0'*(8-len(last))
-            else:       return self.new_oid(last[:-1])
-
-    # Update the maximum oid in use, under protection of a lock.  The
-    # maximum-in-use attribute is changed only if possible_new_max_oid is
-    # larger than its current value.
-    def set_max_oid(self, possible_new_max_oid):
-        self._lock_acquire()
-        try:
-            if possible_new_max_oid > self._oid:
-                self._oid = possible_new_max_oid
-        finally:
-            self._lock_release()
-
-    def registerDB(self, db, limit):
-        pass # we don't care
-
-    def isReadOnly(self):
-        return self._is_read_only
-
-    def supportsUndo(self):
-        return 0
-
-    def supportsVersions(self):
-        return 0
-
-    def tpc_abort(self, transaction):
-        self._lock_acquire()
-        try:
-            if transaction is not self._transaction:
-                return
-            self._abort()
-            self._clear_temp()
-            self._transaction = None
-            self._commit_lock_release()
-        finally:
-            self._lock_release()
-
-    def _abort(self):
-        """Subclasses should redefine this to supply abort actions"""
-        pass
-
-    def tpc_begin(self, transaction, tid=None, status=' '):
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        self._lock_acquire()
-        try:
-            if self._transaction is transaction:
-                return
-            self._lock_release()
-            self._commit_lock_acquire()
-            self._lock_acquire()
-            self._transaction = transaction
-            self._clear_temp()
-
-            user = transaction.user
-            desc = transaction.description
-            ext = transaction._extension
-            if ext:
-                ext = cPickle.dumps(ext, 1)
-            else:
-                ext = ""
-            self._ude = user, desc, ext
-
-            if tid is None:
-                now = time.time()
-                t = TimeStamp(*(time.gmtime(now)[:5] + (now % 60,)))
-                self._ts = t = t.laterThan(self._ts)
-                self._tid = `t`
-            else:
-                self._ts = TimeStamp(tid)
-                self._tid = tid
-
-            self._tstatus = status
-            self._begin(self._tid, user, desc, ext)
-        finally:
-            self._lock_release()
-
-    def _begin(self, tid, u, d, e):
-        """Subclasses should redefine this to supply transaction start actions.
-        """
-        pass
-
-    def tpc_vote(self, transaction):
-        self._lock_acquire()
-        try:
-            if transaction is not self._transaction:
-                return
-            self._vote()
-        finally:
-            self._lock_release()
-
-    def _vote(self):
-        """Subclasses should redefine this to supply transaction vote actions.
-        """
-        pass
-
-    def tpc_finish(self, transaction, f=None):
-        # It's important that the storage calls the function we pass
-        # while it still has its lock.  We don't want another thread
-        # to be able to read any updated data until we've had a chance
-        # to send an invalidation message to all of the other
-        # connections!
-
-        self._lock_acquire()
-        try:
-            if transaction is not self._transaction:
-                return
-            try:
-                if f is not None:
-                    f(self._tid)
-                u, d, e = self._ude
-                self._finish(self._tid, u, d, e)
-                self._clear_temp()
-                return self._tid
-            finally:
-                self._ude = None
-                self._transaction = None
-                self._commit_lock_release()
-        finally:
-            self._lock_release()
-
-    def _finish(self, tid, u, d, e):
-        """Subclasses should redefine this to supply transaction finish actions
-        """
-        pass
-
-    def undo(self, transaction_id, txn):
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        raise POSException.UndoError, 'non-undoable transaction'
-
-    def undoLog(self, first, last, filter=None):
-        return ()
-
-    def versionEmpty(self, version):
-        return 1
-
-    def versions(self, max=None):
-        return ()
-
-    def pack(self, t, referencesf):
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-
-    def getSerial(self, oid):
-        self._lock_acquire()
-        try:
-            v = self.modifiedInVersion(oid)
-            pickledata, serial = self.load(oid, v)
-            return serial
-        finally:
-            self._lock_release()
-
-    def loadSerial(self, oid, serial):
-        raise POSException.Unsupported, (
-            "Retrieval of historical revisions is not supported")
-
-    def loadBefore(self, oid, tid):
-        """Return most recent revision of oid before tid committed."""
-
-        # Unsure: Is it okay for loadBefore() to return current data?
-        # There doesn't seem to be a good reason to forbid it, even
-        # though the typical use of this method will never find
-        # current data.  But maybe we should call it loadByTid()?
-
-        n = 2
-        start_time = None
-        end_time = None
-        while start_time is None:
-            # The history() approach is a hack, because the dict
-            # returned by history() doesn't contain a tid.  It
-            # contains a serialno, which is often the same, but isn't
-            # required to be.  We'll pretend it is for now.
-
-            # A second problem is that history() doesn't say anything
-            # about whether the transaction status.  If it falls before
-            # the pack time, we can't honor the MVCC request.
-
-            # Note: history() returns the most recent record first.
-
-            # TODO: The filter argument to history() only appears to be
-            # supported by FileStorage.  Perhaps it shouldn't be used.
-            L = self.history(oid, "", n, lambda d: not d["version"])
-            if not L:
-                return
-            for d in L:
-                if d["serial"] < tid:
-                    start_time = d["serial"]
-                    break
-                else:
-                    end_time = d["serial"]
-            if len(L) < n:
-                break
-            n *= 2
-        if start_time is None:
-            return None
-        data = self.loadSerial(oid, start_time)
-        return data, start_time, end_time
-
-    def getExtensionMethods(self):
-        """getExtensionMethods
-
-        This returns a dictionary whose keys are names of extra methods
-        provided by this storage. Storage proxies (such as ZEO) should
-        call this method to determine the extra methods that they need
-        to proxy in addition to the standard storage methods.
-        Dictionary values should be None; this will be a handy place
-        for extra marshalling information, should we need it
-        """
-        return {}
-
-    def copyTransactionsFrom(self, other, verbose=0):
-        """Copy transactions from another storage.
-
-        This is typically used for converting data from one storage to
-        another.  `other` must have an .iterator() method.
-        """
-        _ts=None
-        ok=1
-        preindex={};
-        preget=preindex.get   # waaaa
-        # restore() is a new storage API method which has an identical
-        # signature to store() except that it does not return anything.
-        # Semantically, restore() is also identical to store() except that it
-        # doesn't do the ConflictError or VersionLockError consistency
-        # checks.  The reason to use restore() over store() in this method is
-        # that store() cannot be used to copy transactions spanning a version
-        # commit or abort, or over transactional undos.
-        #
-        # We'll use restore() if it's available, otherwise we'll fall back to
-        # using store().  However, if we use store, then
-        # copyTransactionsFrom() may fail with VersionLockError or
-        # ConflictError.
-        restoring = hasattr(self, 'restore')
-        fiter = other.iterator()
-        for transaction in fiter:
-            tid=transaction.tid
-            if _ts is None:
-                _ts=TimeStamp(tid)
-            else:
-                t=TimeStamp(tid)
-                if t <= _ts:
-                    if ok: print ('Time stamps out of order %s, %s' % (_ts, t))
-                    ok=0
-                    _ts=t.laterThan(_ts)
-                    tid=`_ts`
-                else:
-                    _ts = t
-                    if not ok:
-                        print ('Time stamps back in order %s' % (t))
-                        ok=1
-
-            if verbose:
-                print _ts
-
-            self.tpc_begin(transaction, tid, transaction.status)
-            for r in transaction:
-                oid=r.oid
-                if verbose:
-                    print oid_repr(oid), r.version, len(r.data)
-                if restoring:
-                    self.restore(oid, r.tid, r.data, r.version,
-                                 r.data_txn, transaction)
-                else:
-                    pre=preget(oid, None)
-                    s=self.store(oid, pre, r.data, r.version, transaction)
-                    preindex[oid]=s
-
-            self.tpc_vote(transaction)
-            self.tpc_finish(transaction)
-
-        fiter.close()
-
-class TransactionRecord:
-    """Abstract base class for iterator protocol"""
-
-class DataRecord:
-    """Abstract base class for iterator protocol"""
diff --git a/branches/bug1734/src/ZODB/ConflictResolution.py b/branches/bug1734/src/ZODB/ConflictResolution.py
deleted file mode 100644
index 7b9ef63c..00000000
--- a/branches/bug1734/src/ZODB/ConflictResolution.py
+++ /dev/null
@@ -1,147 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import logging
-from cStringIO import StringIO
-from cPickle import Unpickler, Pickler
-from pickle import PicklingError
-
-from ZODB.POSException import ConflictError
-from ZODB.loglevels import BLATHER
-
-logger = logging.getLogger('ZODB.ConflictResolution')
-
-ResolvedSerial = 'rs'
-
-class BadClassName(Exception):
-    pass
-
-_class_cache = {}
-_class_cache_get = _class_cache.get
-def find_global(*args):
-    cls = _class_cache_get(args, 0)
-    if cls == 0:
-        # Not cached. Try to import
-        try:
-            module = __import__(args[0], {}, {}, ['cluck'])
-        except ImportError:
-            cls = 1
-        else:
-            cls = getattr(module, args[1], 1)
-        _class_cache[args] = cls
-
-        if cls == 1:
-            logger.log(BLATHER, "Unable to load class", exc_info=True)
-
-    if cls == 1:
-        # Not importable
-        raise BadClassName(*args)
-    return cls
-
-def state(self, oid, serial, prfactory, p=''):
-    p = p or self.loadSerial(oid, serial)
-    file = StringIO(p)
-    unpickler = Unpickler(file)
-    unpickler.find_global = find_global
-    unpickler.persistent_load = prfactory.persistent_load
-    unpickler.load() # skip the class tuple
-    return unpickler.load()
-
-class PersistentReference:
-
-    def __repr__(self):
-        return "PR(%s %s)" % (id(self), self.data)
-
-    def __getstate__(self):
-        raise PicklingError, "Can't pickle PersistentReference"
-
-class PersistentReferenceFactory:
-
-    data = None
-
-    def persistent_load(self, oid):
-        if self.data is None:
-            self.data = {}
-
-        r = self.data.get(oid, None)
-        if r is None:
-            r = PersistentReference()
-            r.data = oid
-            self.data[oid] = r
-
-        return r
-
-def persistent_id(object):
-    if getattr(object, '__class__', 0) is not PersistentReference:
-        return None
-    return object.data
-
-_unresolvable = {}
-def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
-                         committedData=''):
-    # class_tuple, old, committed, newstate = ('',''), 0, 0, 0
-    try:
-        prfactory = PersistentReferenceFactory()
-        file = StringIO(newpickle)
-        unpickler = Unpickler(file)
-        unpickler.find_global = find_global
-        unpickler.persistent_load = prfactory.persistent_load
-        meta = unpickler.load()
-        if isinstance(meta, tuple):
-            klass = meta[0]
-            newargs = meta[1] or ()
-            if isinstance(klass, tuple):
-                klass = find_global(*klass)
-        else:
-            klass = meta
-            newargs = ()
-
-        if klass in _unresolvable:
-            return None
-
-        newstate = unpickler.load()
-        inst = klass.__new__(klass, *newargs)
-
-        try:
-            resolve = inst._p_resolveConflict
-        except AttributeError:
-            _unresolvable[klass] = 1
-            return None
-
-        old = state(self, oid, oldSerial, prfactory)
-        committed = state(self, oid, committedSerial, prfactory, committedData)
-
-        resolved = resolve(old, committed, newstate)
-
-        file = StringIO()
-        pickler = Pickler(file,1)
-        pickler.persistent_id = persistent_id
-        pickler.dump(meta)
-        pickler.dump(resolved)
-        return file.getvalue(1)
-    except (ConflictError, BadClassName):
-        return None
-    except:
-        # If anything else went wrong, catch it here and avoid passing an
-        # arbitrary exception back to the client.  The error here will mask
-        # the original ConflictError.  A client can recover from a
-        # ConflictError, but not necessarily from other errors.  But log
-        # the error so that any problems can be fixed.
-        logger.error("Unexpected error", exc_info=True)
-        return None
-
-class ConflictResolvingStorage:
-    "Mix-in class that provides conflict resolution handling for storages"
-
-    tryToResolveConflict = tryToResolveConflict
diff --git a/branches/bug1734/src/ZODB/Connection.py b/branches/bug1734/src/ZODB/Connection.py
deleted file mode 100644
index b7c00537..00000000
--- a/branches/bug1734/src/ZODB/Connection.py
+++ /dev/null
@@ -1,918 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Database connection support
-
-$Id$"""
-
-import logging
-import sys
-import threading
-import warnings
-from time import time
-
-from persistent import PickleCache
-
-# interfaces
-from persistent.interfaces import IPersistentDataManager 
-from ZODB.interfaces import IConnection 
-from transaction.interfaces import IDataManager
-from zope.interface import implements
-
-import transaction
-
-from ZODB.ConflictResolution import ResolvedSerial
-from ZODB.ExportImport import ExportImport
-from ZODB.POSException \
-     import ConflictError, ReadConflictError, InvalidObjectReference, \
-            ConnectionStateError
-from ZODB.TmpStore import TmpStore
-from ZODB.serialize import ObjectWriter, ConnectionObjectReader, myhasattr
-from ZODB.utils import u64, oid_repr, z64, positive_id, \
-        DEPRECATED_ARGUMENT, deprecated36
-
-global_reset_counter = 0
-
-def resetCaches():
-    """Causes all connection caches to be reset as connections are reopened.
-
-    Zope's refresh feature uses this.  When you reload Python modules,
-    instances of classes continue to use the old class definitions.
-    To use the new code immediately, the refresh feature asks ZODB to
-    clear caches by calling resetCaches().  When the instances are
-    loaded by subsequent connections, they will use the new class
-    definitions.
-    """
-    global global_reset_counter
-    global_reset_counter += 1
-
-class Connection(ExportImport, object):
-    """Connection to ZODB for loading and storing objects."""
-
-    implements(IConnection, IDataManager, IPersistentDataManager)
-
-    _tmp = None
-    _code_timestamp = 0
-
-    # ZODB.IConnection
-
-    def __init__(self, version='', cache_size=400,
-                 cache_deactivate_after=None, mvcc=True, txn_mgr=None,
-                 synch=True):
-        """Create a new Connection."""
-        self._log = logging.getLogger("ZODB.Connection")
-        self._storage = None
-        self._debug_info = ()
-        self._opened = None # time.time() when DB.open() opened us
-
-        self._version = version
-        self._cache = cache = PickleCache(self, cache_size)
-        if version:
-            # Caches for versions end up empty if the version
-            # is not used for a while. Non-version caches
-            # keep their content indefinitely.
-            # Unclear:  Why do we want version caches to behave this way?
-
-            self._cache.cache_drain_resistance = 100
-        self._committed = []
-        self._added = {}
-        self._added_during_commit = None
-        self._reset_counter = global_reset_counter
-        self._load_count = 0   # Number of objects unghosted
-        self._store_count = 0  # Number of objects stored
-
-        # List of oids of modified objects (to be invalidated on an abort).
-        self._modified = []
-
-        # List of all objects (not oids) registered as modified by the
-        # persistence machinery.
-        self._registered_objects = []
-
-        # Do we need to join a txn manager?
-        self._needs_to_join = True
-
-        # If a transaction manager is passed to the constructor, use
-        # it instead of the global transaction manager.  The instance
-        # variable will hold a TM instance.
-        self._txn_mgr = txn_mgr or transaction.manager
-        # _synch is a boolean; if True, the Connection will register
-        # with the TM to receive afterCompletion() calls.
-        self._synch = synch
-
-        # _invalidated queues invalidate messages delivered from the DB
-        # _inv_lock prevents one thread from modifying the set while
-        # another is processing invalidations.  All the invalidations
-        # from a single transaction should be applied atomically, so
-        # the lock must be held when reading _invalidated.
-
-        # It sucks that we have to hold the lock to read _invalidated.
-        # Normally, _invalidated is written by calling dict.update, which
-        # will execute atomically by virtue of the GIL.  But some storage
-        # might generate oids where hash or compare invokes Python code.  In
-        # that case, the GIL can't save us.
-        self._inv_lock = threading.Lock()
-        self._invalidated = d = {}
-        self._invalid = d.has_key
-
-        # We intend to prevent committing a transaction in which
-        # ReadConflictError occurs.  _conflicts is the set of oids that
-        # experienced ReadConflictError.  Any time we raise ReadConflictError,
-        # the oid should be added to this set, and we should be sure that the
-        # object is registered.  Because it's registered, Connection.commit()
-        # will raise ReadConflictError again (because the oid is in
-        # _conflicts).
-        self._conflicts = {}
-
-        # If MVCC is enabled, then _mvcc is True and _txn_time stores
-        # the upper bound on transactions visible to this connection.
-        # That is, all object revisions must be written before _txn_time.
-        # If it is None, then the current revisions are acceptable.
-        # If the connection is in a version, mvcc will be disabled, because
-        # loadBefore() only returns non-version data.
-        self._mvcc = mvcc and not version
-        self._txn_time = None
-
-        # To support importFile(), implemented in the ExportImport base
-        # class, we need to run _importDuringCommit() from our commit()
-        # method.  If _import is not None, it is a two-tuple of arguments
-        # to pass to _importDuringCommit().
-        self._import = None
-
-        self.connections = None
-
-    def get_connection(self, database_name):
-        """Return a Connection for the named database."""
-        connection = self.connections.get(database_name)
-        if connection is None:
-            new_con = self._db.databases[database_name].open()
-            self.connections.update(new_con.connections)
-            new_con.connections = self.connections
-            connection = new_con
-        return connection
-
-    def get(self, oid):
-        """Return the persistent object with oid 'oid'."""
-        if self._storage is None:
-            raise ConnectionStateError("The database connection is closed")
-
-        obj = self._cache.get(oid, None)
-        if obj is not None:
-            return obj
-        obj = self._added.get(oid, None)
-        if obj is not None:
-            return obj
-
-        p, serial = self._storage.load(oid, self._version)
-        obj = self._reader.getGhost(p)
-
-        obj._p_oid = oid
-        obj._p_jar = self
-        obj._p_changed = None
-        obj._p_serial = serial
-
-        self._cache[oid] = obj
-        return obj
-
-    def add(self, obj):
-        """Add a new object 'obj' to the database and assign it an oid."""
-        if self._storage is None:
-            raise ConnectionStateError("The database connection is closed")
-
-        marker = object()
-        oid = getattr(obj, "_p_oid", marker)
-        if oid is marker:
-            raise TypeError("Only first-class persistent objects may be"
-                            " added to a Connection.", obj)
-        elif obj._p_jar is None:
-            assert obj._p_oid is None
-            oid = obj._p_oid = self._storage.new_oid()
-            obj._p_jar = self
-            if self._added_during_commit is not None:
-                self._added_during_commit.append(obj)
-            self._register(obj)
-            # Add to _added after calling register(), so that _added
-            # can be used as a test for whether the object has been
-            # registered with the transaction.
-            self._added[oid] = obj
-        elif obj._p_jar is not self:
-            raise InvalidObjectReference(obj, obj._p_jar)
-
-    def sortKey(self):
-        """Return a consistent sort key for this connection."""
-        return "%s:%s" % (self._storage.sortKey(), id(self))
-
-    def abort(self, transaction):
-        """Abort a transaction and forget all changes."""
-        for obj in self._registered_objects:
-            oid = obj._p_oid
-            assert oid is not None
-            if oid in self._added:
-                del self._added[oid]
-                del obj._p_jar
-                del obj._p_oid
-            else:
-                self._cache.invalidate(oid)
-
-        self._tpc_cleanup()
-
-    # TODO: we should test what happens when cacheGC is called mid-transaction.
-
-    def cacheGC(self):
-        """Reduce cache size to target size."""
-        self._cache.incrgc()
-
-    __onCloseCallbacks = None
-
-    def onCloseCallback(self, f):
-        """Register a callable, f, to be called by close()."""
-        if self.__onCloseCallbacks is None:
-            self.__onCloseCallbacks = []
-        self.__onCloseCallbacks.append(f)
-
-    def close(self):
-        """Close the Connection."""
-        if not self._needs_to_join:
-            # We're currently joined to a transaction.
-            raise ConnectionStateError("Cannot close a connection joined to "
-                                       "a transaction")
-
-        if self._tmp is not None:
-            # There are no direct modifications pending, but a subtransaction
-            # is pending.
-            raise ConnectionStateError("Cannot close a connection with a "
-                                       "pending subtransaction")
-
-        if self._cache is not None:
-            self._cache.incrgc() # This is a good time to do some GC
-
-        # Call the close callbacks.
-        if self.__onCloseCallbacks is not None:
-            for f in self.__onCloseCallbacks:
-                try:
-                    f()
-                except: # except what?
-                    f = getattr(f, 'im_self', f)
-                    self._log.error("Close callback failed for %s", f,
-                                    exc_info=sys.exc_info())
-            self.__onCloseCallbacks = None
-        self._storage = self._tmp = self.new_oid = None
-        self._debug_info = ()
-        self._opened = None
-        # Return the connection to the pool.
-        if self._db is not None:
-            if self._synch:
-                self._txn_mgr.unregisterSynch(self)
-            self._db._closeConnection(self)
-            # _closeConnection() set self._db to None.  However, we can't
-            # assert that here, because self may have been reused (by
-            # another thread) by the time we get back here.
-
-    # transaction.interfaces.IDataManager
-
-    def commit(self, transaction):
-        """Commit changes to an object"""
-        if self._import:
-            # TODO:  This code seems important for Zope, but needs docs
-            # to explain why.
-            self._importDuringCommit(transaction, *self._import)
-            self._import = None
-
-        # Just in case an object is added as a side-effect of storing
-        # a modified object.  If, for example, a __getstate__() method
-        # calls add(), the newly added objects will show up in
-        # _added_during_commit.  This sounds insane, but has actually
-        # happened.
-
-        self._added_during_commit = []
-
-        for obj in self._registered_objects:
-            oid = obj._p_oid
-            assert oid
-            if oid in self._conflicts:
-                raise ReadConflictError(object=obj)
-
-            if obj._p_jar is not self:
-                raise InvalidObjectReference(obj, obj._p_jar)
-            elif oid in self._added:
-                assert obj._p_serial == z64
-            elif obj._p_changed:
-                if oid in self._invalidated:
-                    resolve = getattr(obj, "_p_resolveConflict", None)
-                    if resolve is None:
-                        raise ConflictError(object=obj)
-                self._modified.append(oid)
-            else:
-                # Nothing to do.  It's been said that it's legal, e.g., for
-                # an object to set _p_changed to false after it's been
-                # changed and registered.
-                continue
-
-            self._store_objects(ObjectWriter(obj), transaction)
-
-        for obj in self._added_during_commit:
-            self._store_objects(ObjectWriter(obj), transaction)
-        self._added_during_commit = None
-
-    def _store_objects(self, writer, transaction):
-        for obj in writer:
-            oid = obj._p_oid
-            serial = getattr(obj, "_p_serial", z64)
-
-            if serial == z64:
-                # obj is a new object
-                self._creating.append(oid)
-                # Because obj was added, it is now in _creating, so it can
-                # be removed from _added.
-                self._added.pop(oid, None)
-            else:
-                if (oid in self._invalidated
-                    and not hasattr(obj, '_p_resolveConflict')):
-                    raise ConflictError(object=obj)
-                self._modified.append(oid)
-            p = writer.serialize(obj)  # This calls __getstate__ of obj
-            s = self._storage.store(oid, serial, p, self._version, transaction)
-            self._store_count += 1
-            # Put the object in the cache before handling the
-            # response, just in case the response contains the
-            # serial number for a newly created object
-            try:
-                self._cache[oid] = obj
-            except:
-                # Dang, I bet it's wrapped:
-                # TODO:  Deprecate, then remove, this.
-                if hasattr(obj, 'aq_base'):
-                    self._cache[oid] = obj.aq_base
-                else:
-                    raise
-
-            self._handle_serial(s, oid)
-
-    def commit_sub(self, t):
-        """Commit all changes made in subtransactions and begin 2-phase commit
-        """
-        if self._tmp is None:
-            return
-        src = self._storage
-        self._storage = self._tmp
-        self._tmp = None
-
-        self._log.debug("Commiting subtransaction of size %s", src.getSize())
-        oids = src._index.keys()
-        self._storage.tpc_begin(t)
-
-        # Copy invalidating and creating info from temporary storage:
-        self._modified.extend(oids)
-        self._creating.extend(src._creating)
-
-        for oid in oids:
-            data, serial = src.load(oid, src)
-            s = self._storage.store(oid, serial, data, self._version, t)
-            self._handle_serial(s, oid, change=False)
-
-    def abort_sub(self, t):
-        """Discard all subtransaction data."""
-        if self._tmp is None:
-            return
-        src = self._storage
-        self._storage = self._tmp
-        self._tmp = None
-
-        self._cache.invalidate(src._index.keys())
-        self._invalidate_creating(src._creating)
-
-    def _invalidate_creating(self, creating=None):
-        """Disown any objects newly saved in an uncommitted transaction."""
-        if creating is None:
-            creating = self._creating
-            self._creating = []
-
-        for oid in creating:
-            o = self._cache.get(oid)
-            if o is not None:
-                del self._cache[oid]
-                del o._p_jar
-                del o._p_oid
-
-    # The next two methods are callbacks for transaction synchronization.
-
-    def beforeCompletion(self, txn):
-        # We don't do anything before a commit starts.
-        pass
-
-    def afterCompletion(self, txn):
-        self._flush_invalidations()
-
-    def _flush_invalidations(self):
-        self._inv_lock.acquire()
-        try:
-            self._cache.invalidate(self._invalidated)
-            self._invalidated.clear()
-            self._txn_time = None
-        finally:
-            self._inv_lock.release()
-        # Now is a good time to collect some garbage
-        self._cache.incrgc()
-
-    def root(self):
-        """Return the database root object."""
-        return self.get(z64)
-
-    def db(self):
-        """Returns a handle to the database this connection belongs to."""
-        return self._db
-
-    def isReadOnly(self):
-        """Returns True if the storage for this connection is read only."""
-        if self._storage is None:
-            raise ConnectionStateError("The database connection is closed")
-        return self._storage.isReadOnly()
-
-    def invalidate(self, tid, oids):
-        """Notify the Connection that transaction 'tid' invalidated oids."""
-        self._inv_lock.acquire()
-        try:
-            if self._txn_time is None:
-                self._txn_time = tid
-            self._invalidated.update(oids)
-        finally:
-            self._inv_lock.release()
-
-    # IDataManager
-
-    def tpc_begin(self, transaction, sub=False):
-        """Begin commit of a transaction, starting the two-phase commit."""
-        self._modified = []
-
-        # _creating is a list of oids of new objects, which is used to
-        # remove them from the cache if a transaction aborts.
-        self._creating = []
-        if sub and self._tmp is None:
-            # Sub-transaction!
-            self._tmp = self._storage
-            self._storage = TmpStore(self._version, self._storage)
-
-        self._storage.tpc_begin(transaction)
-
-    def tpc_vote(self, transaction):
-        """Verify that a data manager can commit the transaction."""
-        try:
-            vote = self._storage.tpc_vote
-        except AttributeError:
-            return
-        s = vote(transaction)
-        self._handle_serial(s)
-
-    def _handle_serial(self, store_return, oid=None, change=1):
-        """Handle the returns from store() and tpc_vote() calls."""
-
-        # These calls can return different types depending on whether
-        # ZEO is used.  ZEO uses asynchronous returns that may be
-        # returned in batches by the ClientStorage.  ZEO1 can also
-        # return an exception object and expect that the Connection
-        # will raise the exception.
-
-        # When commit_sub() exceutes a store, there is no need to
-        # update the _p_changed flag, because the subtransaction
-        # tpc_vote() calls already did this.  The change=1 argument
-        # exists to allow commit_sub() to avoid setting the flag
-        # again.
-
-        # When conflict resolution occurs, the object state held by
-        # the connection does not match what is written to the
-        # database.  Invalidate the object here to guarantee that
-        # the new state is read the next time the object is used.
-
-        if not store_return:
-            return
-        if isinstance(store_return, str):
-            assert oid is not None
-            self._handle_one_serial(oid, store_return, change)
-        else:
-            for oid, serial in store_return:
-                self._handle_one_serial(oid, serial, change)
-
-    def _handle_one_serial(self, oid, serial, change):
-        if not isinstance(serial, str):
-            raise serial
-        obj = self._cache.get(oid, None)
-        if obj is None:
-            return
-        if serial == ResolvedSerial:
-            del obj._p_changed # transition from changed to ghost
-        else:
-            if change:
-                obj._p_changed = 0 # transition from changed to up-to-date
-            obj._p_serial = serial
-
-    def tpc_finish(self, transaction):
-        """Indicate confirmation that the transaction is done."""
-        if self._tmp is not None:
-            # Commiting a subtransaction!
-            # There is no need to invalidate anything.
-            self._storage.tpc_finish(transaction)
-            self._storage._creating[:0]=self._creating
-            del self._creating[:]
-        else:
-            def callback(tid):
-                d = {}
-                for oid in self._modified:
-                    d[oid] = 1
-                self._db.invalidate(tid, d, self)
-            self._storage.tpc_finish(transaction, callback)
-        self._tpc_cleanup()
-
-    def tpc_abort(self, transaction):
-        """Abort a transaction."""
-        if self._import:
-            self._import = None
-        self._storage.tpc_abort(transaction)
-        self._cache.invalidate(self._modified)
-        self._invalidate_creating()
-        while self._added:
-            oid, obj = self._added.popitem()
-            del obj._p_oid
-            del obj._p_jar
-        self._tpc_cleanup()
-
-    def _tpc_cleanup(self):
-        """Performs cleanup operations to support tpc_finish and tpc_abort."""
-        self._conflicts.clear()
-        if not self._synch:
-            self._flush_invalidations()
-        self._needs_to_join = True
-        self._registered_objects = []
-
-    def sync(self):
-        """Manually update the view on the database."""
-        self._txn_mgr.get().abort()
-        sync = getattr(self._storage, 'sync', 0)
-        if sync:
-            sync()
-        self._flush_invalidations()
-
-    def getDebugInfo(self):
-        """Returns a tuple with different items for debugging the
-        connection.
-        """ 
-        return self._debug_info
-
-    def setDebugInfo(self, *args):
-        """Add the given items to the debug information of this connection."""
-        self._debug_info = self._debug_info + args
-
-    def getTransferCounts(self, clear=False):
-        """Returns the number of objects loaded and stored."""
-        res = self._load_count, self._store_count
-        if clear:
-            self._load_count = 0
-            self._store_count = 0
-        return res
-
-    ##############################################
-    # persistent.interfaces.IPersistentDatamanager
-
-    def oldstate(self, obj, tid):
-        """Return copy of 'obj' that was written by transaction 'tid'."""
-        assert obj._p_jar is self
-        p = self._storage.loadSerial(obj._p_oid, tid)
-        return self._reader.getState(p)
-
-    def setstate(self, obj):
-        """Turns the ghost 'obj' into a real object by loading it's from the
-        database."""
-        oid = obj._p_oid
-
-        if self._storage is None:
-            msg = ("Shouldn't load state for %s "
-                   "when the connection is closed" % oid_repr(oid))
-            self._log.error(msg)
-            raise ConnectionStateError(msg)
-
-        try:
-            self._setstate(obj)
-        except ConflictError:
-            raise
-        except:
-            self._log.error("Couldn't load state for %s", oid_repr(oid),
-                            exc_info=sys.exc_info())
-            raise
-
-    def _setstate(self, obj):
-        # Helper for setstate(), which provides logging of failures.
-
-        # The control flow is complicated here to avoid loading an
-        # object revision that we are sure we aren't going to use.  As
-        # a result, invalidation tests occur before and after the
-        # load.  We can only be sure about invalidations after the
-        # load.
-
-        # If an object has been invalidated, there are several cases
-        # to consider:
-        # 1. Check _p_independent()
-        # 2. Try MVCC
-        # 3. Raise ConflictError.
-
-        # Does anything actually use _p_independent()?  It would simplify
-        # the code if we could drop support for it.
-
-        # There is a harmless data race with self._invalidated.  A
-        # dict update could go on in another thread, but we don't care
-        # because we have to check again after the load anyway.
-
-        if (obj._p_oid in self._invalidated
-            and not myhasattr(obj, "_p_independent")):
-            # If the object has _p_independent(), we will handle it below.
-            self._load_before_or_conflict(obj)
-            return
-
-        p, serial = self._storage.load(obj._p_oid, self._version)
-        self._load_count += 1
-
-        self._inv_lock.acquire()
-        try:
-            invalid = obj._p_oid in self._invalidated
-        finally:
-            self._inv_lock.release()
-
-        if invalid:
-            if myhasattr(obj, "_p_independent"):
-                # This call will raise a ReadConflictError if something
-                # goes wrong
-                self._handle_independent(obj)
-            else:
-                self._load_before_or_conflict(obj)
-                return
-
-        self._reader.setGhostState(obj, p)
-        obj._p_serial = serial
-
-    def _load_before_or_conflict(self, obj):
-        """Load non-current state for obj or raise ReadConflictError."""
-        if not (self._mvcc and self._setstate_noncurrent(obj)):
-            self._register(obj)
-            self._conflicts[obj._p_oid] = True
-            raise ReadConflictError(object=obj)
-
-    def _setstate_noncurrent(self, obj):
-        """Set state using non-current data.
-
-        Return True if state was available, False if not.
-        """
-        try:
-            # Load data that was current before the commit at txn_time.
-            t = self._storage.loadBefore(obj._p_oid, self._txn_time)
-        except KeyError:
-            return False
-        if t is None:
-            return False
-        data, start, end = t
-        # The non-current transaction must have been written before
-        # txn_time.  It must be current at txn_time, but could have
-        # been modified at txn_time.
-
-        assert start < self._txn_time, (u64(start), u64(self._txn_time))
-        assert end is not None
-        assert self._txn_time <= end, (u64(self._txn_time), u64(end))
-        self._reader.setGhostState(obj, data)
-        obj._p_serial = start
-        return True
-
-    def _handle_independent(self, obj):
-        # Helper method for setstate() handles possibly independent objects
-        # Call _p_independent(), if it returns True, setstate() wins.
-        # Otherwise, raise a ConflictError.
-
-        if obj._p_independent():
-            self._inv_lock.acquire()
-            try:
-                try:
-                    del self._invalidated[obj._p_oid]
-                except KeyError:
-                    pass
-            finally:
-                self._inv_lock.release()
-        else:
-            self._conflicts[obj._p_oid] = 1
-            self._register(obj)
-            raise ReadConflictError(object=obj)
-
-    def register(self, obj):
-        """Register obj with the current transaction manager.
-
-        A subclass could override this method to customize the default
-        policy of one transaction manager for each thread.
-
-        obj must be an object loaded from this Connection.
-        """
-        assert obj._p_jar is self
-        if obj._p_oid is None:
-            # There is some old Zope code that assigns _p_jar
-            # directly.  That is no longer allowed, but we need to
-            # provide support for old code that still does it.
-
-            # The actual complaint here is that an object without
-            # an oid is being registered.  I can't think of any way to
-            # achieve that without assignment to _p_jar.  If there is
-            # a way, this will be a very confusing warning.
-            deprecated36("Assigning to _p_jar is deprecated, and will be "
-                         "changed to raise an exception.")
-        elif obj._p_oid in self._added:
-            # It was registered before it was added to _added.
-            return
-        self._register(obj)
-
-    def _register(self, obj=None):
-        if obj is not None:
-            self._registered_objects.append(obj)
-        if self._needs_to_join:
-            self._txn_mgr.get().join(self)
-            self._needs_to_join = False
-
-    # PROTECTED stuff (used by e.g. ZODB.DB.DB)
-
-    def _cache_items(self):
-        # find all items on the lru list
-        items = self._cache.lru_items()
-        # fine everything. some on the lru list, some not
-        everything = self._cache.cache_data
-        # remove those items that are on the lru list
-        for k,v in items:
-            del everything[k]
-        # return a list of [ghosts....not recently used.....recently used]
-        return everything.items() + items
-
-    def _setDB(self, odb, mvcc=None, txn_mgr=None, synch=None):
-        """Register odb, the DB that this Connection uses.
-
-        This method is called by the DB every time a Connection
-        is opened.  Any invalidations received while the Connection
-        was closed will be processed.
-
-        If the global module function resetCaches() was called, the
-        cache will be cleared.
-
-        Parameters:
-        odb: database that owns the Connection
-        mvcc: boolean indicating whether MVCC is enabled
-        txn_mgr: transaction manager to use.  None means
-            used the default transaction manager.
-        synch: boolean indicating whether Connection should
-        register for afterCompletion() calls.
-        """
-
-        # TODO:  Why do we go to all the trouble of setting _db and
-        # other attributes on open and clearing them on close?
-        # A Connection is only ever associated with a single DB
-        # and Storage.
-
-        self._db = odb
-        self._storage = odb._storage
-        self.new_oid = odb._storage.new_oid
-        self._opened = time()
-        if synch is not None:
-            self._synch = synch
-        if mvcc is not None:
-            self._mvcc = mvcc
-        self._txn_mgr = txn_mgr or transaction.manager
-        if self._reset_counter != global_reset_counter:
-            # New code is in place.  Start a new cache.
-            self._resetCache()
-        else:
-            self._flush_invalidations()
-        if self._synch:
-            self._txn_mgr.registerSynch(self)
-        self._reader = ConnectionObjectReader(self, self._cache,
-                                              self._db.classFactory)
-
-        # Multi-database support
-        self.connections = {self._db.database_name: self}
-
-    def _resetCache(self):
-        """Creates a new cache, discarding the old one.
-
-        See the docstring for the resetCaches() function.
-        """
-        self._reset_counter = global_reset_counter
-        self._invalidated.clear()
-        cache_size = self._cache.cache_size
-        self._cache = cache = PickleCache(self, cache_size)
-
-    # Python protocol
-
-    def __repr__(self):
-        if self._version:
-            ver = ' (in version %s)' % `self._version`
-        else:
-            ver = ''
-        return '<Connection at %08x%s>' % (positive_id(self), ver)
-
-    # DEPRECATION candidates
-
-    __getitem__ = get
-
-    def modifiedInVersion(self, oid):
-        """Returns the version the object with the given oid was modified in.
-
-        If it wasn't modified in a version, the current version of this 
-        connection is returned.
-        """
-        try:
-            return self._db.modifiedInVersion(oid)
-        except KeyError:
-            import pdb; pdb.set_trace() 
-            return self.getVersion()
-
-    def getVersion(self):
-        """Returns the version this connection is attached to."""
-        if self._storage is None:
-            raise ConnectionStateError("The database connection is closed")
-        return self._version
-
-    def setklassstate(self, obj):
-        # Special case code to handle ZClasses, I think.
-        # Called the cache when an object of type type is invalidated.
-        try:
-            oid = obj._p_oid
-            p, serial = self._storage.load(oid, self._version)
-
-            # We call getGhost(), but we actually get a non-ghost back.
-            # The object is a class, which can't actually be ghosted.
-            copy = self._reader.getGhost(p)
-            obj.__dict__.clear()
-            obj.__dict__.update(copy.__dict__)
-
-            obj._p_oid = oid
-            obj._p_jar = self
-            obj._p_changed = 0
-            obj._p_serial = serial
-        except:
-            self._log.error("setklassstate failed", exc_info=sys.exc_info())
-            raise
-
-    def exchange(self, old, new):
-        # called by a ZClasses method that isn't executed by the test suite
-        oid = old._p_oid
-        new._p_oid = oid
-        new._p_jar = self
-        new._p_changed = 1
-        self._register(new)
-        self._cache[oid] = new
-
-    # DEPRECATED methods
-
-    def getTransaction(self):
-        """Get the current transaction for this connection.
-
-        :deprecated:
-
-        The transaction manager's get method works the same as this
-        method.  You can pass a transaction manager (TM) to DB.open()
-        to control which TM the Connection uses.
-        """
-        deprecated36("getTransaction() is deprecated. "
-                     "Use the txn_mgr argument to DB.open() instead.")
-        return self._txn_mgr.get()
-
-    def setLocalTransaction(self):
-        """Use a transaction bound to the connection rather than the thread.
-
-        :deprecated:
-
-        Returns the transaction manager used by the connection.  You
-        can pass a transaction manager (TM) to DB.open() to control
-        which TM the Connection uses.
-        """
-        deprecated36("setLocalTransaction() is deprecated. "
-                     "Use the txn_mgr argument to DB.open() instead.")
-        if self._txn_mgr is transaction.manager:
-            if self._synch:
-                self._txn_mgr.unregisterSynch(self)
-            self._txn_mgr = transaction.TransactionManager()
-            if self._synch:
-                self._txn_mgr.registerSynch(self)
-        return self._txn_mgr
-
-    def cacheFullSweep(self, dt=None):
-        deprecated36("cacheFullSweep is deprecated. "
-                     "Use cacheMinimize instead.")
-        if dt is None:
-            self._cache.full_sweep()
-        else:
-            self._cache.full_sweep(dt)
-
-    def cacheMinimize(self, dt=DEPRECATED_ARGUMENT):
-        """Deactivate all unmodified objects in the cache."""
-        if dt is not DEPRECATED_ARGUMENT:
-            deprecated36("cacheMinimize() dt= is ignored.")
-        self._cache.minimize()
-
diff --git a/branches/bug1734/src/ZODB/DB.py b/branches/bug1734/src/ZODB/DB.py
deleted file mode 100644
index b5fb360b..00000000
--- a/branches/bug1734/src/ZODB/DB.py
+++ /dev/null
@@ -1,785 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Database objects
-
-$Id$"""
-
-import cPickle, cStringIO, sys
-import threading
-from time import time, ctime
-import logging
-
-from ZODB.broken import find_global
-from ZODB.utils import z64
-from ZODB.Connection import Connection
-from ZODB.serialize import referencesf
-from ZODB.utils import WeakSet
-from ZODB.utils import DEPRECATED_ARGUMENT, deprecated36
-
-from zope.interface import implements
-from ZODB.interfaces import IDatabase
-
-import transaction
-
-logger = logging.getLogger('ZODB.DB')
-
-class _ConnectionPool(object):
-    """Manage a pool of connections.
-
-    CAUTION:  Methods should be called under the protection of a lock.
-    This class does no locking of its own.
-
-    There's no limit on the number of connections this can keep track of,
-    but a warning is logged if there are more than pool_size active
-    connections, and a critical problem if more than twice pool_size.
-
-    New connections are registered via push().  This will log a message if
-    "too many" connections are active.
-
-    When a connection is explicitly closed, tell the pool via repush().
-    That adds the connection to a stack of connections available for
-    reuse, and throws away the oldest stack entries if the stack is too large.
-    pop() pops this stack.
-
-    When a connection is obtained via pop(), the pool holds only a weak
-    reference to it thereafter.  It's not necessary to inform the pool
-    if the connection goes away.  A connection handed out by pop() counts
-    against pool_size only so long as it exists, and provided it isn't
-    repush()'ed.  A weak reference is retained so that DB methods like
-    connectionDebugInfo() can still gather statistics.
-    """
-
-    def __init__(self, pool_size):
-        # The largest # of connections we expect to see alive simultaneously.
-        self.pool_size = pool_size
-
-        # A weak set of all connections we've seen.  A connection vanishes
-        # from this set if pop() hands it out, it's not reregistered via
-        # repush(), and it becomes unreachable.
-        self.all = WeakSet()
-
-        # A stack of connections available to hand out.  This is a subset
-        # of self.all.  push() and repush() add to this, and may remove
-        # the oldest available connections if the pool is too large.
-        # pop() pops this stack.  There are never more than pool_size entries
-        # in this stack.
-        # In Python 2.4, a collections.deque would make more sense than
-        # a list (we push only "on the right", but may pop from both ends).
-        self.available = []
-
-    # Change our belief about the expected maximum # of live connections.
-    # If the pool_size is smaller than the current value, this may discard
-    # the oldest available connections.
-    def set_pool_size(self, pool_size):
-        self.pool_size = pool_size
-        self._reduce_size()
-
-    # Register a new available connection.  We must not know about c already.
-    # c will be pushed onto the available stack even if we're over the
-    # pool size limit.
-    def push(self, c):
-        assert c not in self.all
-        assert c not in self.available
-        self._reduce_size(strictly_less=True)
-        self.all.add(c)
-        self.available.append(c)
-        n, limit = len(self.all), self.pool_size
-        if n > limit:
-            reporter = logger.warn
-            if n > 2 * limit:
-                reporter = logger.critical
-            reporter("DB.open() has %s open connections with a pool_size "
-                     "of %s", n, limit)
-
-    # Reregister an available connection formerly obtained via pop().  This
-    # pushes it on the stack of available connections, and may discard
-    # older available connections.
-    def repush(self, c):
-        assert c in self.all
-        assert c not in self.available
-        self._reduce_size(strictly_less=True)
-        self.available.append(c)
-
-    # Throw away the oldest available connections until we're under our
-    # target size (strictly_less=False) or no more than that (strictly_less=
-    # True, the default).
-    def _reduce_size(self, strictly_less=False):
-        target = self.pool_size - bool(strictly_less)
-        while len(self.available) > target:
-            c = self.available.pop(0)
-            self.all.remove(c)
-
-    # Pop an available connection and return it, or return None if none are
-    # available.  In the latter case, the caller should create a new
-    # connection, register it via push(), and call pop() again.  The
-    # caller is responsible for serializing this sequence.
-    def pop(self):
-        result = None
-        if self.available:
-            result = self.available.pop()
-            # Leave it in self.all, so we can still get at it for statistics
-            # while it's alive.
-            assert result in self.all
-        return result
-
-    # For every live connection c, invoke f(c).
-    def map(self, f):
-        self.all.map(f)
-
-class DB(object):
-    """The Object Database
-    -------------------
-
-    The DB class coordinates the activities of multiple database
-    Connection instances.  Most of the work is done by the
-    Connections created via the open method.
-
-    The DB instance manages a pool of connections.  If a connection is
-    closed, it is returned to the pool and its object cache is
-    preserved.  A subsequent call to open() will reuse the connection.
-    There is no hard limit on the pool size.  If more than `pool_size`
-    connections are opened, a warning is logged, and if more than twice
-    that many, a critical problem is logged.
-
-    The class variable 'klass' is used by open() to create database
-    connections.  It is set to Connection, but a subclass could override
-    it to provide a different connection implementation.
-
-    The database provides a few methods intended for application code
-    -- open, close, undo, and pack -- and a large collection of
-    methods for inspecting the database and its connections' caches.
-
-    :Cvariables:
-      - `klass`: Class used by L{open} to create database connections
-
-    :Groups:
-      - `User Methods`: __init__, open, close, undo, pack, classFactory
-      - `Inspection Methods`: getName, getSize, objectCount,
-        getActivityMonitor, setActivityMonitor
-      - `Connection Pool Methods`: getPoolSize, getVersionPoolSize,
-        removeVersionPool, setPoolSize, setVersionPoolSize
-      - `Transaction Methods`: invalidate
-      - `Other Methods`: lastTransaction, connectionDebugInfo
-      - `Version Methods`: modifiedInVersion, abortVersion, commitVersion,
-        versionEmpty
-      - `Cache Inspection Methods`: cacheDetail, cacheExtremeDetail,
-        cacheFullSweep, cacheLastGCTime, cacheMinimize, cacheSize,
-        cacheDetailSize, getCacheSize, getVersionCacheSize, setCacheSize,
-        setVersionCacheSize
-      - `Deprecated Methods`: getCacheDeactivateAfter,
-        setCacheDeactivateAfter,
-        getVersionCacheDeactivateAfter, setVersionCacheDeactivateAfter
-    """
-    implements(IDatabase)
-
-    klass = Connection  # Class to use for connections
-    _activity_monitor = None
-
-    def __init__(self, storage,
-                 pool_size=7,
-                 cache_size=400,
-                 cache_deactivate_after=DEPRECATED_ARGUMENT,
-                 version_pool_size=3,
-                 version_cache_size=100,
-                 database_name='unnamed',
-                 databases=None,
-                 version_cache_deactivate_after=DEPRECATED_ARGUMENT,
-                 ):
-        """Create an object database.
-
-        :Parameters:
-          - `storage`: the storage used by the database, e.g. FileStorage
-          - `pool_size`: expected maximum number of open connections
-          - `cache_size`: target size of Connection object cache
-          - `version_pool_size`: expected maximum number of connections (per
-            version)
-          - `version_cache_size`: target size of Connection object cache for
-            version connections
-          - `cache_deactivate_after`: ignored
-          - `version_cache_deactivate_after`: ignored
-        """
-        # Allocate lock.
-        x = threading.RLock()
-        self._a = x.acquire
-        self._r = x.release
-
-        # Setup connection pools and cache info
-        # _pools maps a version string to a _ConnectionPool object.
-        self._pools = {}
-        self._pool_size = pool_size
-        self._cache_size = cache_size
-        self._version_pool_size = version_pool_size
-        self._version_cache_size = version_cache_size
-
-        # warn about use of deprecated arguments
-        if cache_deactivate_after is not DEPRECATED_ARGUMENT:
-            deprecated36("cache_deactivate_after has no effect")
-        if version_cache_deactivate_after is not DEPRECATED_ARGUMENT:
-            deprecated36("version_cache_deactivate_after has no effect")
-
-        self._miv_cache = {}
-
-        # Setup storage
-        self._storage=storage
-        storage.registerDB(self, None)
-        if not hasattr(storage,'tpc_vote'):
-            storage.tpc_vote = lambda *args: None
-        try:
-            storage.load(z64,'')
-        except KeyError:
-            # Create the database's root in the storage if it doesn't exist
-            from persistent.mapping import PersistentMapping
-            root = PersistentMapping()
-            # Manually create a pickle for the root to put in the storage.
-            # The pickle must be in the special ZODB format.
-            file = cStringIO.StringIO()
-            p = cPickle.Pickler(file, 1)
-            p.dump((root.__class__, None))
-            p.dump(root.__getstate__())
-            t = transaction.Transaction()
-            t.description = 'initial database creation'
-            storage.tpc_begin(t)
-            storage.store(z64, None, file.getvalue(), '', t)
-            storage.tpc_vote(t)
-            storage.tpc_finish(t)
-
-        # Multi-database setup.
-        if databases is None:
-            databases = {}
-        self.databases = databases
-        self.database_name = database_name
-        if database_name in databases:
-            raise ValueError("database_name %r already in databases" %
-                             database_name)
-        databases[database_name] = self
-
-        # Pass through methods:
-        for m in ['history', 'supportsUndo', 'supportsVersions', 'undoLog',
-                  'versionEmpty', 'versions']:
-            setattr(self, m, getattr(storage, m))
-
-        if hasattr(storage, 'undoInfo'):
-            self.undoInfo = storage.undoInfo
-
-    # This is called by Connection.close().
-    def _closeConnection(self, connection):
-        """Return a connection to the pool.
-
-        connection._db must be self on entry.
-        """
-
-        self._a()
-        try:
-            assert connection._db is self
-            connection._db = None
-
-            am = self._activity_monitor
-            if am is not None:
-                am.closedConnection(connection)
-
-            version = connection._version
-            try:
-                pool = self._pools[version]
-            except KeyError:
-                # No such version. We must have deleted the pool.
-                # Just let the connection go.
-
-                # We need to break circular refs to make it really go.
-                # TODO:  Figure out exactly which objects are involved in the
-                # cycle.
-                connection.__dict__.clear()
-                return
-            pool.repush(connection)
-
-        finally:
-            self._r()
-
-    # Call f(c) for all connections c in all pools in all versions.
-    def _connectionMap(self, f):
-        self._a()
-        try:
-            for pool in self._pools.values():
-                pool.map(f)
-        finally:
-            self._r()
-
-    def abortVersion(self, version, txn=None):
-        if txn is None:
-            txn = transaction.get()
-        txn.register(AbortVersion(self, version))
-
-    def cacheDetail(self):
-        """Return information on objects in the various caches
-
-        Organized by class.
-        """
-
-        detail = {}
-        def f(con, detail=detail):
-            for oid, ob in con._cache.items():
-                module = getattr(ob.__class__, '__module__', '')
-                module = module and '%s.' % module or ''
-                c = "%s%s" % (module, ob.__class__.__name__)
-                if c in detail:
-                    detail[c] += 1
-                else:
-                    detail[c] = 1
-
-        self._connectionMap(f)
-        detail = detail.items()
-        detail.sort()
-        return detail
-
-    def cacheExtremeDetail(self):
-        detail = []
-        conn_no = [0]  # A mutable reference to a counter
-        def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no):
-            conn_no[0] += 1
-            cn = conn_no[0]
-            for oid, ob in con._cache_items():
-                id = ''
-                if hasattr(ob, '__dict__'):
-                    d = ob.__dict__
-                    if d.has_key('id'):
-                        id = d['id']
-                    elif d.has_key('__name__'):
-                        id = d['__name__']
-
-                module = getattr(ob.__class__, '__module__', '')
-                module = module and ('%s.' % module) or ''
-
-                # What refcount ('rc') should we return?  The intent is
-                # that we return the true Python refcount, but as if the
-                # cache didn't exist.  This routine adds 3 to the true
-                # refcount:  1 for binding to name 'ob', another because
-                # ob lives in the con._cache_items() list we're iterating
-                # over, and calling sys.getrefcount(ob) boosts ob's
-                # count by 1 too.  So the true refcount is 3 less than
-                # sys.getrefcount(ob) returns.  But, in addition to that,
-                # the cache holds an extra reference on non-ghost objects,
-                # and we also want to pretend that doesn't exist.
-                detail.append({
-                    'conn_no': cn,
-                    'oid': oid,
-                    'id': id,
-                    'klass': "%s%s" % (module, ob.__class__.__name__),
-                    'rc': rc(ob) - 3 - (ob._p_changed is not None),
-                    'state': ob._p_changed,
-                    #'references': con.references(oid),
-                    })
-
-        self._connectionMap(f)
-        return detail
-
-    def cacheFullSweep(self):
-        self._connectionMap(lambda c: c._cache.full_sweep())
-
-    def cacheLastGCTime(self):
-        m = [0]
-        def f(con, m=m):
-            t = con._cache.cache_last_gc_time
-            if t > m[0]:
-                m[0] = t
-
-        self._connectionMap(f)
-        return m[0]
-
-    def cacheMinimize(self):
-        self._connectionMap(lambda c: c._cache.minimize())
-
-    def cacheSize(self):
-        m = [0]
-        def f(con, m=m):
-            m[0] += con._cache.cache_non_ghost_count
-
-        self._connectionMap(f)
-        return m[0]
-
-    def cacheDetailSize(self):
-        m = []
-        def f(con, m=m):
-            m.append({'connection': repr(con),
-                      'ngsize': con._cache.cache_non_ghost_count,
-                      'size': len(con._cache)})
-        self._connectionMap(f)
-        m.sort()
-        return m
-
-    def close(self):
-        """Close the database and its underlying storage.
-
-        It is important to close the database, because the storage may
-        flush in-memory data structures to disk when it is closed.
-        Leaving the storage open with the process exits can cause the
-        next open to be slow.
-
-        What effect does closing the database have on existing
-        connections?  Technically, they remain open, but their storage
-        is closed, so they stop behaving usefully.  Perhaps close()
-        should also close all the Connections.
-        """
-        self._storage.close()
-
-    def commitVersion(self, source, destination='', txn=None):
-        if txn is None:
-            txn = transaction.get()
-        txn.register(CommitVersion(self, source, destination))
-
-    def getCacheSize(self):
-        return self._cache_size
-
-    def lastTransaction(self):
-        return self._storage.lastTransaction()
-
-    def getName(self):
-        return self._storage.getName()
-
-    def getPoolSize(self):
-        return self._pool_size
-
-    def getSize(self):
-        return self._storage.getSize()
-
-    def getVersionCacheSize(self):
-        return self._version_cache_size
-
-    def getVersionPoolSize(self):
-        return self._version_pool_size
-
-    def invalidate(self, tid, oids, connection=None, version=''):
-        """Invalidate references to a given oid.
-
-        This is used to indicate that one of the connections has committed a
-        change to the object.  The connection commiting the change should be
-        passed in to prevent useless (but harmless) messages to the
-        connection.
-        """
-        if connection is not None:
-            version = connection._version
-        # Update modified in version cache
-        for oid in oids.keys():
-            h = hash(oid) % 131
-            o = self._miv_cache.get(h, None)
-            if o is not None and o[0]==oid:
-                del self._miv_cache[h]
-
-        # Notify connections.
-        def inval(c):
-            if (c is not connection and
-                  (not version or c._version == version)):
-                c.invalidate(tid, oids)
-        self._connectionMap(inval)
-
-    def modifiedInVersion(self, oid):
-        h = hash(oid) % 131
-        cache = self._miv_cache
-        o = cache.get(h, None)
-        if o and o[0] == oid:
-            return o[1]
-        v = self._storage.modifiedInVersion(oid)
-        cache[h] = oid, v
-        return v
-
-    def objectCount(self):
-        return len(self._storage)
-
-    def open(self, version='',
-             transaction=DEPRECATED_ARGUMENT, temporary=DEPRECATED_ARGUMENT,
-             force=DEPRECATED_ARGUMENT, waitflag=DEPRECATED_ARGUMENT,
-             mvcc=True, txn_mgr=None, synch=True):
-        """Return a database Connection for use by application code.
-
-        The optional `version` argument can be used to specify that a
-        version connection is desired.
-
-        Note that the connection pool is managed as a stack, to
-        increase the likelihood that the connection's stack will
-        include useful objects.
-
-        :Parameters:
-          - `version`: the "version" that all changes will be made
-             in, defaults to no version.
-          - `mvcc`: boolean indicating whether MVCC is enabled
-          - `txn_mgr`: transaction manager to use.  None means
-             used the default transaction manager.
-          - `synch`: boolean indicating whether Connection should
-             register for afterCompletion() calls.
-        """
-
-        if temporary is not DEPRECATED_ARGUMENT:
-            deprecated36("DB.open() temporary= ignored. "
-                         "open() no longer blocks.")
-
-        if force is not DEPRECATED_ARGUMENT:
-            deprecated36("DB.open() force= ignored. "
-                         "open() no longer blocks.")
-
-        if waitflag is not DEPRECATED_ARGUMENT:
-            deprecated36("DB.open() waitflag= ignored. "
-                         "open() no longer blocks.")
-
-        if transaction is not DEPRECATED_ARGUMENT:
-            deprecated36("DB.open() transaction= ignored.")
-
-        self._a()
-        try:
-            # pool <- the _ConnectionPool for this version
-            pool = self._pools.get(version)
-            if pool is None:
-                if version:
-                    size = self._version_pool_size
-                else:
-                    size = self._pool_size
-                self._pools[version] = pool = _ConnectionPool(size)
-            assert pool is not None
-
-            # result <- a connection
-            result = pool.pop()
-            if result is None:
-                if version:
-                    size = self._version_cache_size
-                else:
-                    size = self._cache_size
-                c = self.klass(version=version, cache_size=size,
-                               mvcc=mvcc, txn_mgr=txn_mgr)
-                pool.push(c)
-                result = pool.pop()
-            assert result is not None
-
-            # Tell the connection it belongs to self.
-            result._setDB(self, mvcc=mvcc, txn_mgr=txn_mgr, synch=synch)
-
-            # A good time to do some cache cleanup.
-            self._connectionMap(lambda c: c.cacheGC())
-
-            return result
-
-        finally:
-            self._r()
-
-    def removeVersionPool(self, version):
-        try:
-            del self._pools[version]
-        except KeyError:
-            pass
-
-    def connectionDebugInfo(self):
-        result = []
-        t = time()
-
-        def get_info(c):
-            # `result`, `time` and `version` are lexically inherited.
-            o = c._opened
-            d = c.getDebugInfo()
-            if d:
-                if len(d) == 1:
-                    d = d[0]
-            else:
-                d = ''
-            d = "%s (%s)" % (d, len(c._cache))
-
-            result.append({
-                'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
-                'info': d,
-                'version': version,
-                })
-
-        for version, pool in self._pools.items():
-            pool.map(get_info)
-        return result
-
-    def getActivityMonitor(self):
-        return self._activity_monitor
-
-    def pack(self, t=None, days=0):
-        """Pack the storage, deleting unused object revisions.
-
-        A pack is always performed relative to a particular time, by
-        default the current time.  All object revisions that are not
-        reachable as of the pack time are deleted from the storage.
-
-        The cost of this operation varies by storage, but it is
-        usually an expensive operation.
-
-        There are two optional arguments that can be used to set the
-        pack time: t, pack time in seconds since the epcoh, and days,
-        the number of days to subtract from t or from the current
-        time if t is not specified.
-        """
-        if t is None:
-            t = time()
-        t -= days * 86400
-        try:
-            self._storage.pack(t, referencesf)
-        except:
-            logger.error("packing", exc_info=True)
-            raise
-
-    def setActivityMonitor(self, am):
-        self._activity_monitor = am
-
-    def classFactory(self, connection, modulename, globalname):
-        # Zope will rebind this method to arbitrary user code at runtime.
-        return find_global(modulename, globalname)
-
-    def setCacheSize(self, size):
-        self._a()
-        try:
-            self._cache_size = size
-            pool = self._pools.get('')
-            if pool is not None:
-                def setsize(c):
-                    c._cache.cache_size = size
-                pool.map(setsize)
-        finally:
-            self._r()
-
-    def setVersionCacheSize(self, size):
-        self._a()
-        try:
-            self._version_cache_size = size
-            def setsize(c):
-                c._cache.cache_size = size
-            for version, pool in self._pools.items():
-                if version:
-                    pool.map(setsize)
-        finally:
-            self._r()
-
-    def setPoolSize(self, size):
-        self._pool_size = size
-        self._reset_pool_sizes(size, for_versions=False)
-
-    def setVersionPoolSize(self, size):
-        self._version_pool_size = size
-        self._reset_pool_sizes(size, for_versions=True)
-
-    def _reset_pool_sizes(self, size, for_versions=False):
-        self._a()
-        try:
-            for version, pool in self._pools.items():
-                if (version != '') == for_versions:
-                    pool.set_pool_size(size)
-        finally:
-            self._r()
-
-    def undo(self, id, txn=None):
-        """Undo a transaction identified by id.
-
-        A transaction can be undone if all of the objects involved in
-        the transaction were not modified subsequently, if any
-        modifications can be resolved by conflict resolution, or if
-        subsequent changes resulted in the same object state.
-
-        The value of id should be generated by calling undoLog()
-        or undoInfo().  The value of id is not the same as a
-        transaction id used by other methods; it is unique to undo().
-
-        :Parameters:
-          - `id`: a storage-specific transaction identifier
-          - `txn`: transaction context to use for undo().
-            By default, uses the current transaction.
-        """
-        if txn is None:
-            txn = transaction.get()
-        txn.register(TransactionalUndo(self, id))
-
-    def versionEmpty(self, version):
-        return self._storage.versionEmpty(version)
-
-    # The following methods are deprecated and have no effect
-
-    def getCacheDeactivateAfter(self):
-        """Deprecated"""
-        deprecated36("getCacheDeactivateAfter has no effect")
-
-    def getVersionCacheDeactivateAfter(self):
-        """Deprecated"""
-        deprecated36("getVersionCacheDeactivateAfter has no effect")
-
-    def setCacheDeactivateAfter(self, v):
-        """Deprecated"""
-        deprecated36("setCacheDeactivateAfter has no effect")
-
-    def setVersionCacheDeactivateAfter(self, v):
-        """Deprecated"""
-        deprecated36("setVersionCacheDeactivateAfter has no effect")
-
-class ResourceManager(object):
-    """Transaction participation for a version or undo resource."""
-
-    def __init__(self, db):
-        self._db = db
-        # Delegate the actual 2PC methods to the storage
-        self.tpc_vote = self._db._storage.tpc_vote
-        self.tpc_finish = self._db._storage.tpc_finish
-        self.tpc_abort = self._db._storage.tpc_abort
-
-    def sortKey(self):
-        return "%s:%s" % (self._db._storage.sortKey(), id(self))
-
-    def tpc_begin(self, txn, sub=False):
-        if sub:
-            raise ValueError("doesn't support sub-transactions")
-        self._db._storage.tpc_begin(txn)
-
-    # The object registers itself with the txn manager, so the ob
-    # argument to the methods below is self.
-
-    def abort(self, obj, txn):
-        pass
-
-    def commit(self, obj, txn):
-        pass
-
-class CommitVersion(ResourceManager):
-
-    def __init__(self, db, version, dest=''):
-        super(CommitVersion, self).__init__(db)
-        self._version = version
-        self._dest = dest
-
-    def commit(self, ob, t):
-        dest = self._dest
-        tid, oids = self._db._storage.commitVersion(self._version,
-                                                    self._dest,
-                                                    t)
-        oids = dict.fromkeys(oids, 1)
-        self._db.invalidate(tid, oids, version=self._dest)
-        if self._dest:
-            # the code above just invalidated the dest version.
-            # now we need to invalidate the source!
-            self._db.invalidate(tid, oids, version=self._version)
-
-class AbortVersion(ResourceManager):
-
-    def __init__(self, db, version):
-        super(AbortVersion, self).__init__(db)
-        self._version = version
-
-    def commit(self, ob, t):
-        tid, oids = self._db._storage.abortVersion(self._version, t)
-        self._db.invalidate(tid,
-                            dict.fromkeys(oids, 1),
-                            version=self._version)
-
-class TransactionalUndo(ResourceManager):
-
-    def __init__(self, db, tid):
-        super(TransactionalUndo, self).__init__(db)
-        self._tid = tid
-
-    def commit(self, ob, t):
-        tid, oids = self._db._storage.undo(self._tid, t)
-        self._db.invalidate(tid, dict.fromkeys(oids, 1))
diff --git a/branches/bug1734/src/ZODB/DEPENDENCIES.cfg b/branches/bug1734/src/ZODB/DEPENDENCIES.cfg
deleted file mode 100644
index 5fb9f34e..00000000
--- a/branches/bug1734/src/ZODB/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-BTrees
-ZConfig
-persistent
-transaction
-# referenced by ZODB.config and related tests
-ZEO
diff --git a/branches/bug1734/src/ZODB/DemoStorage.py b/branches/bug1734/src/ZODB/DemoStorage.py
deleted file mode 100644
index 87bdbd5f..00000000
--- a/branches/bug1734/src/ZODB/DemoStorage.py
+++ /dev/null
@@ -1,539 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Demo ZODB storage
-
-The Demo storage serves two purposes:
-
-  - Provide an example implementation of a full storage without
-    distracting storage details,
-
-  - Provide a volatile storage that is useful for giving demonstrations.
-
-The demo storage can have a "base" storage that is used in a
-read-only fashion. The base storage must not not to contain version
-data.
-
-There are three main data structures:
-
-  _data -- Transaction logging information necessary for undo
-
-      This is a mapping from transaction id to transaction, where
-      a transaction is simply a 5-tuple:
-
-        packed, user, description, extension_data, records
-
-      where extension_data is a dictionary or None and records are the
-      actual records in chronological order. Packed is a flag
-      indicating whethe the transaction has been packed or not
-
-  _index -- A mapping from oid to record
-
-  _vindex -- A mapping from version name to version data
-
-      where version data is a mapping from oid to record
-
-A record is a tuple:
-
-  oid, pre, vdata, p, tid
-
-where:
-
-     oid -- object id
-
-     pre -- The previous record for this object (or None)
-
-     vdata -- version data
-
-        None if not a version, ortherwise:
-           version, non-version-record
-
-     p -- the pickle data or None
-
-     tid -- the transaction id that wrote the record
-
-The pickle data will be None for a record for an object created in
-an aborted version.
-
-It is instructive to watch what happens to the internal data structures
-as changes are made.  For example, in Zope, you can create an external
-method::
-
-  import Zope2
-
-  def info(RESPONSE):
-      RESPONSE['Content-type']= 'text/plain'
-
-      return Zope2.DB._storage._splat()
-
-and call it to monitor the storage.
-
-"""
-
-import base64, time
-from ZODB import POSException, BaseStorage
-from ZODB.utils import z64, oid_repr
-from persistent.TimeStamp import TimeStamp
-from cPickle import loads
-from BTrees import OOBTree
-
-class DemoStorage(BaseStorage.BaseStorage):
-
-    def __init__(self, name='Demo Storage', base=None, quota=None):
-        BaseStorage.BaseStorage.__init__(self, name, base)
-
-        # We use a BTree because the items are sorted!
-        self._data = OOBTree.OOBTree()
-        self._index = {}
-        self._vindex = {}
-        self._base = base
-        self._size = 0
-        self._quota = quota
-        self._ltid = None
-        self._clear_temp()
-        if base is not None and base.versions():
-            raise POSException.StorageError, (
-                "Demo base storage has version data")
-
-
-    def __len__(self):
-        base=self._base
-        return (base and len(base) or 0) + len(self._index)
-
-    def getSize(self):
-        s=100
-        for tid, (p, u, d, e, t) in self._data.items():
-            s=s+16+24+12+4+16+len(u)+16+len(d)+16+len(e)+16
-            for oid, pre, vdata, p, tid in t:
-                s=s+16+24+24+4+4+(p and (16+len(p)) or 4)
-                if vdata: s=s+12+16+len(vdata[0])+4
-
-        s=s+16*len(self._index)
-
-        for v in self._vindex.values():
-            s=s+32+16*len(v)
-
-        self._size=s
-        return s
-
-    def abortVersion(self, src, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-        if not src:
-            raise POSException.VersionCommitError("Invalid version")
-
-        self._lock_acquire()
-        try:
-            v = self._vindex.get(src, None)
-            if not v:
-                return
-
-            oids = []
-            for r in v.values():
-                oid, pre, (version, nv), p, tid = r
-                oids.append(oid)
-                if nv:
-                    oid, pre, vdata, p, tid = nv
-                    self._tindex.append([oid, r, None, p, self._tid])
-                else:
-                    # effectively, delete the thing
-                    self._tindex.append([oid, r, None, None, self._tid])
-
-            return self._tid, oids
-
-        finally: self._lock_release()
-
-    def commitVersion(self, src, dest, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        if not src:
-            raise POSException.VersionCommitError("Invalid source version")
-        if src == dest:
-            raise POSException.VersionCommitError(
-                "Can't commit to same version: %s" % repr(src))
-
-        self._lock_acquire()
-        try:
-            v = self._vindex.get(src)
-            if v is None:
-                return
-
-            newserial = self._tid
-            tindex = self._tindex
-            oids = []
-            for r in v.values():
-                oid, pre, vdata, p, tid = r
-                assert vdata is not None
-                oids.append(oid)
-                if dest:
-                    new_vdata = dest, vdata[1]
-                else:
-                    new_vdata = None
-                tindex.append([oid, r, new_vdata, p, self._tid])
-
-            return self._tid, oids
-
-        finally:
-            self._lock_release()
-
-    def loadEx(self, oid, version):
-        self._lock_acquire()
-        try:
-            try:
-                oid, pre, vdata, p, tid = self._index[oid]
-            except KeyError:
-                if self._base:
-                    return self._base.load(oid, '')
-                raise KeyError, oid
-
-            ver = ""
-            if vdata:
-                oversion, nv = vdata
-                if oversion != version:
-                    if nv:
-                        # Return the current txn's tid with the non-version
-                        # data.
-                        oid, pre, vdata, p, skiptid = nv
-                    else:
-                        raise KeyError, oid
-                ver = oversion
-
-            if p is None:
-                raise KeyError, oid
-
-            return p, tid, ver
-        finally: self._lock_release()
-
-    def load(self, oid, version):
-        return self.loadEx(oid, version)[:2]
-
-    def modifiedInVersion(self, oid):
-        self._lock_acquire()
-        try:
-            try:
-                oid, pre, vdata, p, tid = self._index[oid]
-                if vdata: return vdata[0]
-                return ''
-            except: return ''
-        finally: self._lock_release()
-
-    def store(self, oid, serial, data, version, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        self._lock_acquire()
-        try:
-            old = self._index.get(oid, None)
-            if old is None:
-                # Hm, nothing here, check the base version:
-                if self._base:
-                    try:
-                        p, tid = self._base.load(oid, '')
-                    except KeyError:
-                        pass
-                    else:
-                        old = oid, None, None, p, tid
-
-            nv=None
-            if old:
-                oid, pre, vdata, p, tid = old
-
-                if vdata:
-                    if vdata[0] != version:
-                        raise POSException.VersionLockError, oid
-
-                    nv=vdata[1]
-                else:
-                    nv=old
-
-                if serial != tid:
-                    raise POSException.ConflictError(
-                        oid=oid, serials=(tid, serial), data=data)
-
-            r = [oid, old, version and (version, nv) or None, data, self._tid]
-            self._tindex.append(r)
-
-            s=self._tsize
-            s=s+72+(data and (16+len(data)) or 4)
-            if version: s=s+32+len(version)
-
-            if self._quota is not None and s > self._quota:
-                raise POSException.StorageError, (
-                    '''<b>Quota Exceeded</b><br>
-                    The maximum quota for this demonstration storage
-                    has been exceeded.<br>Have a nice day.''')
-
-        finally: self._lock_release()
-        return self._tid
-
-    def supportsVersions(self):
-        return 1
-
-    def _clear_temp(self):
-        self._tindex = []
-        self._tsize = self._size + 160
-
-    def lastTransaction(self):
-        return self._ltid
-
-    def _begin(self, tid, u, d, e):
-        self._tsize = self._size + 120 + len(u) + len(d) + len(e)
-
-    def _finish(self, tid, user, desc, ext):
-        self._size = self._tsize
-
-        self._data[tid] = None, user, desc, ext, tuple(self._tindex)
-        for r in self._tindex:
-            oid, pre, vdata, p, tid = r
-            old = self._index.get(oid)
-            # If the object had version data, remove the version data.
-            if old is not None:
-                oldvdata = old[2]
-                if oldvdata:
-                    v = self._vindex[oldvdata[0]]
-                    del v[oid]
-                    if not v:
-                        # If the version info is now empty, remove it.
-                        del self._vindex[oldvdata[0]]
-
-            self._index[oid] = r
-
-            # If there is version data, then udpate self._vindex, too.
-            if vdata:
-                version = vdata[0]
-                v = self._vindex.get(version)
-                if v is None:
-                    v = self._vindex[version] = {}
-                v[oid] = r
-        self._ltid = self._tid
-
-    def undoLog(self, first, last, filter=None):
-        if last < 0:
-            last = first - last + 1
-        self._lock_acquire()
-        try:
-            # Unsure:  shouldn we sort this?
-            transactions = self._data.items()
-            pos = len(transactions)
-            r = []
-            i = 0
-            while i < last and pos:
-                pos = pos - 1
-                if i < first:
-                    i = i + 1
-                    continue
-                tid, (p, u, d, e, t) = transactions[pos]
-                if p:
-                    continue
-                d = {'id': base64.encodestring(tid)[:-1],
-                     'time': TimeStamp(tid).timeTime(),
-                     'user_name': u, 'description': d}
-                if e:
-                    d.update(loads(e))
-
-                if filter is None or filter(d):
-                    r.append(d)
-                    i = i + 1
-            return r
-        finally:
-            self._lock_release()
-
-    def versionEmpty(self, version):
-        return not self._vindex.get(version, None)
-
-    def versions(self, max=None):
-        r = []
-        for v in self._vindex.keys():
-            if self.versionEmpty(v):
-                continue
-            r.append(v)
-            if max is not None and len(r) >= max:
-                break
-        return r
-
-    def _build_indexes(self, stop='\377\377\377\377\377\377\377\377'):
-        # Rebuild index structures from transaction data
-        index = {}
-        vindex = {}
-        for tid, (p, u, d, e, t) in self._data.items():
-            if tid >= stop:
-                break
-            for r in t:
-                oid, pre, vdata, p, tid = r
-                old=index.get(oid, None)
-
-                if old is not None:
-                    oldvdata=old[2]
-                    if oldvdata:
-                        v=vindex[oldvdata[0]]
-                        del v[oid]
-                        if not v: del vindex[oldvdata[0]]
-
-                index[oid]=r
-
-                if vdata:
-                    version=vdata[0]
-                    v=vindex.get(version, None)
-                    if v is None: v=vindex[version]={}
-                    vindex[vdata[0]][oid]=r
-
-        return index, vindex
-
-    def pack(self, t, referencesf):
-        # Packing is hard, at least when undo is supported.
-        # Even for a simple storage like this one, packing
-        # is pretty complex.
-
-        self._lock_acquire()
-        try:
-
-            stop=`TimeStamp(*time.gmtime(t)[:5]+(t%60,))`
-
-            # Build indexes up to the pack time:
-            index, vindex = self._build_indexes(stop)
-
-
-            # TODO:  This packing algorithm is flawed. It ignores
-            # references from non-current records after the pack
-            # time.
-
-            # Now build an index of *only* those objects reachable
-            # from the root.
-            rootl = [z64]
-            pindex = {}
-            while rootl:
-                oid = rootl.pop()
-                if oid in pindex:
-                    continue
-
-                # Scan non-version pickle for references
-                r = index.get(oid, None)
-                if r is None:
-                    if self._base:
-                        p, s = self._base.load(oid, '')
-                        referencesf(p, rootl)
-                else:
-                    pindex[oid] = r
-                    oid, pre, vdata, p, tid = r
-                    referencesf(p, rootl)
-                    if vdata:
-                        nv = vdata[1]
-                        if nv:
-                            oid, pre, vdata, p, tid = nv
-                            referencesf(p, rootl)
-
-            # Now we're ready to do the actual packing.
-            # We'll simply edit the transaction data in place.
-            # We'll defer deleting transactions till the end
-            # to avoid messing up the BTree items.
-            deleted = []
-            for tid, (p, u, d, e, records) in self._data.items():
-                if tid >= stop:
-                    break
-                o = []
-                for r in records:
-                    c = pindex.get(r[0])
-                    if c is None:
-                        # GC this record, no longer referenced
-                        continue
-                    if c == r:
-                        # This is the most recent revision.
-                        o.append(r)
-                    else:
-                        # This record is not the indexed record,
-                        # so it may not be current. Let's see.
-                        vdata = r[3]
-                        if vdata:
-                            # Version record are current *only* if they
-                            # are indexed
-                            continue
-                        else:
-                            # OK, this isn't a version record, so it may be the
-                            # non-version record for the indexed record.
-                            vdata = c[3]
-                            if vdata:
-                                if vdata[1] != r:
-                                    # This record is not the non-version
-                                    # record for the indexed record
-                                    continue
-                            else:
-                                # The indexed record is not a version record,
-                                # so this record can not be the non-version
-                                # record for it.
-                                continue
-                        o.append(r)
-
-                if o:
-                    if len(o) != len(records):
-                        self._data[tid] = 1, u, d, e, tuple(o) # Reset data
-                else:
-                    deleted.append(tid)
-
-            # Now delete empty transactions
-            for tid in deleted:
-                del self._data[tid]
-
-            # Now reset previous pointers for "current" records:
-            for r in pindex.values():
-                r[1] = None # Previous record
-                if r[2] and r[2][1]: # vdata
-                    # If this record contains version data and
-                    # non-version data, then clear it out.
-                    r[2][1][2] = None
-
-            # Finally, rebuild indexes from transaction data:
-            self._index, self._vindex = self._build_indexes()
-
-        finally:
-            self._lock_release()
-        self.getSize()
-
-    def _splat(self):
-        """Spit out a string showing state.
-        """
-        o=[]
-
-        o.append('Transactions:')
-        for tid, (p, u, d, e, t) in self._data.items():
-            o.append("  %s %s" % (TimeStamp(tid), p))
-            for r in t:
-                oid, pre, vdata, p, tid = r
-                oid = oid_repr(oid)
-                tid = oid_repr(tid)
-##                if serial is not None: serial=str(TimeStamp(serial))
-                pre=id(pre)
-                if vdata and vdata[1]: vdata=vdata[0], id(vdata[1])
-                if p: p=''
-                o.append('    %s: %s' %
-                         (id(r), `(oid, pre, vdata, p, tid)`))
-
-        o.append('\nIndex:')
-        items=self._index.items()
-        items.sort()
-        for oid, r in items:
-            if r: r=id(r)
-            o.append('  %s: %s' % (oid_repr(oid), r))
-
-        o.append('\nVersion Index:')
-        items=self._vindex.items()
-        items.sort()
-        for version, v in items:
-            o.append('  '+version)
-            vitems=v.items()
-            vitems.sort()
-            for oid, r in vitems:
-                if r: r=id(r)
-                o.append('    %s: %s' % (oid_repr(oid), r))
-
-        return '\n'.join(o)
diff --git a/branches/bug1734/src/ZODB/ExportImport.py b/branches/bug1734/src/ZODB/ExportImport.py
deleted file mode 100644
index 1786b5a6..00000000
--- a/branches/bug1734/src/ZODB/ExportImport.py
+++ /dev/null
@@ -1,155 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Support for database export and import."""
-
-from cStringIO import StringIO
-from cPickle import Pickler, Unpickler
-from tempfile import TemporaryFile
-import logging
-
-from ZODB.POSException import ExportError
-from ZODB.utils import p64, u64
-from ZODB.serialize import referencesf
-
-logger = logging.getLogger('ZODB.ExportImport')
-
-class ExportImport:
-
-    def exportFile(self, oid, f=None):
-        if f is None:
-            f = TemporaryFile()
-        elif isinstance(f, str):
-            f = open(f,'w+b')
-        f.write('ZEXP')
-        oids = [oid]
-        done_oids = {}
-        done=done_oids.has_key
-        load=self._storage.load
-        while oids:
-            oid = oids.pop(0)
-            if oid in done_oids:
-                continue
-            done_oids[oid] = True
-            try:
-                p, serial = load(oid, self._version)
-            except:
-                logger.debug("broken reference for oid %s", repr(oid),
-                             exc_info=True)
-            else:
-                referencesf(p, oids)
-                f.writelines([oid, p64(len(p)), p])
-        f.write(export_end_marker)
-        return f
-
-    def importFile(self, f, clue='', customImporters=None):
-        # This is tricky, because we need to work in a transaction!
-
-        if isinstance(f, str):
-            f = open(f,'rb')
-
-        magic = f.read(4)
-        if magic != 'ZEXP':
-            if customImporters and customImporters.has_key(magic):
-                f.seek(0)
-                return customImporters[magic](self, f, clue)
-            raise ExportError("Invalid export header")
-
-        t = self._txn_mgr.get()
-        if clue:
-            t.note(clue)
-
-        return_oid_list = []
-        self._import = f, return_oid_list
-        self._register()
-        t.commit(1)
-        # Return the root imported object.
-        if return_oid_list:
-            return self.get(return_oid_list[0])
-        else:
-            return None
-
-    def _importDuringCommit(self, transaction, f, return_oid_list):
-        """Import data during two-phase commit.
-
-        Invoked by the transaction manager mid commit.
-        Appends one item, the OID of the first object created,
-        to return_oid_list.
-        """
-        oids = {}
-
-        def persistent_load(ooid):
-            """Remap a persistent id to a new ID and create a ghost for it."""
-
-            klass = None
-            if isinstance(ooid, tuple):
-                ooid, klass = ooid
-
-            if ooid in oids:
-                oid = oids[ooid]
-            else:
-                if klass is None:
-                    oid = self._storage.new_oid()
-                else:
-                    oid = self._storage.new_oid(), klass
-                oids[ooid] = oid
-
-            return Ghost(oid)
-
-        version = self._version
-
-        while 1:
-            h = f.read(16)
-            if h == export_end_marker:
-                break
-            if len(h) != 16:
-                raise ExportError("Truncated export file")
-            l = u64(h[8:16])
-            p = f.read(l)
-            if len(p) != l:
-                raise ExportError("Truncated export file")
-
-            ooid = h[:8]
-            if oids:
-                oid = oids[ooid]
-                if isinstance(oid, tuple):
-                    oid = oid[0]
-            else:
-                oids[ooid] = oid = self._storage.new_oid()
-                return_oid_list.append(oid)
-
-            pfile = StringIO(p)
-            unpickler = Unpickler(pfile)
-            unpickler.persistent_load = persistent_load
-
-            newp = StringIO()
-            pickler = Pickler(newp, 1)
-            pickler.persistent_id = persistent_id
-
-            pickler.dump(unpickler.load())
-            pickler.dump(unpickler.load())
-            p = newp.getvalue()
-
-            self._storage.store(oid, None, p, version, transaction)
-
-
-export_end_marker = '\377'*16
-
-class Ghost(object):
-    __slots__ = ("oid",)
-    def __init__(self, oid):
-        self.oid = oid
-
-def persistent_id(obj):
-    if isinstance(obj, Ghost):
-        return obj.oid
diff --git a/branches/bug1734/src/ZODB/FileStorage/FileStorage.py b/branches/bug1734/src/ZODB/FileStorage/FileStorage.py
deleted file mode 100644
index 54b0d3a9..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/FileStorage.py
+++ /dev/null
@@ -1,2082 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Storage implementation using a log written to a single file.
-
-$Revision: 1.16 $
-"""
-
-import base64
-from cPickle import Pickler, Unpickler, loads
-import errno
-import os
-import sys
-import time
-import logging
-from types import StringType
-from struct import pack, unpack
-
-# Not all platforms have fsync
-fsync = getattr(os, "fsync", None)
-
-from ZODB import BaseStorage, ConflictResolution, POSException
-from ZODB.POSException \
-     import UndoError, POSKeyError, MultipleUndoErrors, VersionLockError
-from persistent.TimeStamp import TimeStamp
-from ZODB.lock_file import LockFile
-from ZODB.utils import p64, u64, cp, z64
-from ZODB.FileStorage.fspack import FileStoragePacker
-from ZODB.FileStorage.format \
-     import FileStorageFormatter, DataHeader, TxnHeader, DATA_HDR, \
-     DATA_HDR_LEN, TRANS_HDR, TRANS_HDR_LEN, CorruptedDataError
-from ZODB.loglevels import BLATHER
-from ZODB.fsIndex import fsIndex
-
-t32 = 1L << 32
-
-packed_version = "FS21"
-
-logger = logging.getLogger('ZODB.FileStorage')
-
-
-def panic(message, *data):
-    logger.critical(message, *data)
-    raise CorruptedTransactionError(message)
-
-class FileStorageError(POSException.StorageError):
-    pass
-
-class PackError(FileStorageError):
-    pass
-
-class FileStorageFormatError(FileStorageError):
-    """Invalid file format
-
-    The format of the given file is not valid.
-    """
-
-class CorruptedFileStorageError(FileStorageError,
-                                POSException.StorageSystemError):
-    """Corrupted file storage."""
-
-class CorruptedTransactionError(CorruptedFileStorageError):
-    pass
-
-class FileStorageQuotaError(FileStorageError,
-                            POSException.StorageSystemError):
-    """File storage quota exceeded."""
-
-# Intended to be raised only in fspack.py, and ignored here.
-class RedundantPackWarning(FileStorageError):
-    pass
-
-class TempFormatter(FileStorageFormatter):
-    """Helper class used to read formatted FileStorage data."""
-
-    def __init__(self, afile):
-        self._file = afile
-
-class FileStorage(BaseStorage.BaseStorage,
-                  ConflictResolution.ConflictResolvingStorage,
-                  FileStorageFormatter):
-
-    # Set True while a pack is in progress; undo is blocked for the duration.
-    _pack_is_in_progress = False
-
-    _records_before_save = 10000
-
-    def __init__(self, file_name, create=False, read_only=False, stop=None,
-                 quota=None):
-
-        if read_only:
-            self._is_read_only = True
-            if create:
-                raise ValueError("can't create a read-only file")
-        elif stop is not None:
-            raise ValueError("time-travel only supported in read-only mode")
-
-        if stop is None:
-            stop='\377'*8
-
-        # Lock the database and set up the temp file.
-        if not read_only:
-            # Create the lock file
-            self._lock_file = LockFile(file_name + '.lock')
-            self._tfile = open(file_name + '.tmp', 'w+b')
-            self._tfmt = TempFormatter(self._tfile)
-        else:
-            self._tfile = None
-
-        self._file_name = file_name
-
-        BaseStorage.BaseStorage.__init__(self, file_name)
-
-        (index, vindex, tindex, tvindex,
-         oid2tid, toid2tid, toid2tid_delete) = self._newIndexes()
-        self._initIndex(index, vindex, tindex, tvindex,
-                        oid2tid, toid2tid, toid2tid_delete)
-
-        # Now open the file
-
-        self._file = None
-        if not create:
-            try:
-                self._file = open(file_name, read_only and 'rb' or 'r+b')
-            except IOError, exc:
-                if exc.errno == errno.EFBIG:
-                    # The file is too big to open.  Fail visibly.
-                    raise
-                if exc.errno == errno.ENOENT:
-                    # The file doesn't exist.  Create it.
-                    create = 1
-                # If something else went wrong, it's hard to guess
-                # what the problem was.  If the file does not exist,
-                # create it.  Otherwise, fail.
-                if os.path.exists(file_name):
-                    raise
-                else:
-                    create = 1
-
-        if self._file is None and create:
-            if os.path.exists(file_name):
-                os.remove(file_name)
-            self._file = open(file_name, 'w+b')
-            self._file.write(packed_version)
-
-        r = self._restore_index()
-        if r is not None:
-            self._used_index = 1 # Marker for testing
-            index, vindex, start, ltid = r
-
-            self._initIndex(index, vindex, tindex, tvindex,
-                            oid2tid, toid2tid, toid2tid_delete)
-            self._pos, self._oid, tid = read_index(
-                self._file, file_name, index, vindex, tindex, stop,
-                ltid=ltid, start=start, read_only=read_only,
-                )
-        else:
-            self._used_index = 0 # Marker for testing
-            self._pos, self._oid, tid = read_index(
-                self._file, file_name, index, vindex, tindex, stop,
-                read_only=read_only,
-                )
-            self._save_index()
-
-        self._records_before_save = max(self._records_before_save,
-                                        len(self._index))
-        self._ltid = tid
-
-        # self._pos should always point just past the last
-        # transaction.  During 2PC, data is written after _pos.
-        # invariant is restored at tpc_abort() or tpc_finish().
-
-        self._ts = tid = TimeStamp(tid)
-        t = time.time()
-        t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
-        if tid > t:
-            seconds = tid.timeTime() - t.timeTime()
-            complainer = logger.warning
-            if seconds > 30 * 60:   # 30 minutes -- way screwed up
-                complainer = logger.critical
-            complainer("%s Database records %d seconds in the future",
-                       file_name, seconds)
-
-        self._quota = quota
-
-        # tid cache statistics.
-        self._oid2tid_nlookups = self._oid2tid_nhits = 0
-
-    def _initIndex(self, index, vindex, tindex, tvindex,
-                   oid2tid, toid2tid, toid2tid_delete):
-        self._index=index
-        self._vindex=vindex
-        self._tindex=tindex
-        self._tvindex=tvindex
-        self._index_get=index.get
-        self._vindex_get=vindex.get
-
-        # .store() needs to compare the passed-in serial to the
-        # current tid in the database.  _oid2tid caches the oid ->
-        # current tid mapping for non-version data (if the current
-        # record for oid is version data, the oid is not a key in
-        # _oid2tid).  The point is that otherwise seeking into the
-        # storage is needed to extract the current tid, and that's
-        # an expensive operation.  For example, if a transaction
-        # stores 4000 objects, and each random seek + read takes 7ms
-        # (that was approximately true on Linux and Windows tests in
-        # mid-2003), that's 28 seconds just to find the old tids.
-        # TODO:  Probably better to junk this and redefine _index as mapping
-        # oid to (offset, tid) pair, via a new memory-efficient BTree type.
-        self._oid2tid = oid2tid
-        # oid->tid map to transactionally add to _oid2tid.
-        self._toid2tid = toid2tid
-        # Set of oids to transactionally delete from _oid2tid (e.g.,
-        # oids reverted by undo, or for which the most recent record
-        # becomes version data).
-        self._toid2tid_delete = toid2tid_delete
-
-    def __len__(self):
-        return len(self._index)
-
-    def _newIndexes(self):
-        # hook to use something other than builtin dict
-        return fsIndex(), {}, {}, {}, {}, {}, {}
-
-    _saved = 0
-    def _save_index(self):
-        """Write the database index to a file to support quick startup."""
-
-        if self._is_read_only:
-            return
-
-        index_name = self.__name__ + '.index'
-        tmp_name = index_name + '.index_tmp'
-
-        f=open(tmp_name,'wb')
-        p=Pickler(f,1)
-
-        # Note:  starting with ZODB 3.2.6, the 'oid' value stored is ignored
-        # by the code that reads the index.  We still write it, so that
-        # .index files can still be read by older ZODBs.
-        info={'index': self._index, 'pos': self._pos,
-              'oid': self._oid, 'vindex': self._vindex}
-
-        p.dump(info)
-        f.flush()
-        f.close()
-
-        try:
-            try:
-                os.remove(index_name)
-            except OSError:
-                pass
-            os.rename(tmp_name, index_name)
-        except: pass
-
-        self._saved += 1
-
-    def _clear_index(self):
-        index_name = self.__name__ + '.index'
-        if os.path.exists(index_name):
-            try:
-                os.remove(index_name)
-            except OSError:
-                pass
-
-    def _sane(self, index, pos):
-        """Sanity check saved index data by reading the last undone trans
-
-        Basically, we read the last not undone transaction and
-        check to see that the included records are consistent
-        with the index.  Any invalid record records or inconsistent
-        object positions cause zero to be returned.
-        """
-        r = self._check_sanity(index, pos)
-        if not r:
-            logger.warning("Ignoring index for %s", self._file_name)
-        return r
-
-    def _check_sanity(self, index, pos):
-
-        if pos < 100:
-            return 0 # insane
-        self._file.seek(0, 2)
-        if self._file.tell() < pos:
-            return 0 # insane
-        ltid = None
-
-        max_checked = 5
-        checked = 0
-
-        while checked < max_checked:
-            self._file.seek(pos - 8)
-            rstl = self._file.read(8)
-            tl = u64(rstl)
-            pos = pos - tl - 8
-            if pos < 4:
-                return 0 # insane
-            h = self._read_txn_header(pos)
-            if not ltid:
-                ltid = h.tid
-            if h.tlen != tl:
-                return 0 # inconsistent lengths
-            if h.status == 'u':
-                continue # undone trans, search back
-            if h.status not in ' p':
-                return 0 # insane
-            if tl < h.headerlen():
-                return 0 # insane
-            tend = pos + tl
-            opos = pos + h.headerlen()
-            if opos == tend:
-                continue # empty trans
-
-            while opos < tend and checked < max_checked:
-                # Read the data records for this transaction
-                h = self._read_data_header(opos)
-
-                if opos + h.recordlen() > tend or h.tloc != pos:
-                    return 0
-
-                if index.get(h.oid, 0) != opos:
-                    return 0 # insane
-
-                checked += 1
-
-                opos = opos + h.recordlen()
-
-            return ltid
-
-    def _restore_index(self):
-        """Load database index to support quick startup."""
-        # Returns (index, vindex, pos, tid), or None in case of
-        # error.
-        # Starting with ZODB 3.2.6, the 'oid' value stored in the index
-        # is ignored.
-        # The index returned is always an instance of fsIndex.  If the
-        # index cached in the file is a Python dict, it's converted to
-        # fsIndex here, and, if we're not in read-only mode, the .index
-        # file is rewritten with the converted fsIndex so we don't need to
-        # convert it again the next time.
-        file_name=self.__name__
-        index_name=file_name+'.index'
-
-        try:
-            f = open(index_name, 'rb')
-        except:
-            return None
-
-        p=Unpickler(f)
-
-        try:
-            info=p.load()
-        except:
-            exc, err = sys.exc_info()[:2]
-            logger.warning("Failed to load database index: %s: %s", exc, err)
-            return None
-        index = info.get('index')
-        pos = info.get('pos')
-        vindex = info.get('vindex')
-        if index is None or pos is None or vindex is None:
-            return None
-        pos = long(pos)
-
-        if (isinstance(index, dict) or
-                (isinstance(index, fsIndex) and
-                 isinstance(index._data, dict))):
-            # Convert dictionary indexes to fsIndexes *or* convert fsIndexes
-            # which have a dict `_data` attribute to a new fsIndex (newer
-            # fsIndexes have an OOBTree as `_data`).
-            newindex = fsIndex()
-            newindex.update(index)
-            index = newindex
-            if not self._is_read_only:
-                # Save the converted index.
-                f = open(index_name, 'wb')
-                p = Pickler(f, 1)
-                info['index'] = index
-                p.dump(info)
-                f.close()
-                # Now call this method again to get the new data.
-                return self._restore_index()
-
-        tid = self._sane(index, pos)
-        if not tid:
-            return None
-
-        return index, vindex, pos, tid
-
-    def close(self):
-        self._file.close()
-        if hasattr(self,'_lock_file'):
-            self._lock_file.close()
-        if self._tfile:
-            self._tfile.close()
-        try:
-            self._save_index()
-        except:
-            # Log the error and continue
-            logger.error("Error saving index on close()", exc_info=True)
-
-    # Return tid of most recent record for oid if that's in the
-    # _oid2tid cache.  Else return None.  It's important to use this
-    # instead of indexing _oid2tid directly so that cache statistics
-    # can be logged.
-    def _get_cached_tid(self, oid):
-        self._oid2tid_nlookups += 1
-        result = self._oid2tid.get(oid)
-        if result is not None:
-            self._oid2tid_nhits += 1
-
-        # Log a msg every ~8000 tries.
-        if self._oid2tid_nlookups & 0x1fff == 0:
-            logger.log(BLATHER,
-                    "_oid2tid size %s lookups %s hits %s rate %.1f%%",
-                    len(self._oid2tid),
-                    self._oid2tid_nlookups,
-                    self._oid2tid_nhits,
-                    100.0 * self._oid2tid_nhits / self._oid2tid_nlookups)
-
-        return result
-
-    def abortVersion(self, src, transaction):
-        return self.commitVersion(src, '', transaction, abort=True)
-
-    def commitVersion(self, src, dest, transaction, abort=False):
-        # We are going to commit by simply storing back pointers.
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if not (src and isinstance(src, StringType)
-                and isinstance(dest, StringType)):
-            raise POSException.VersionCommitError('Invalid source version')
-
-        if src == dest:
-            raise POSException.VersionCommitError(
-                "Can't commit to same version: %s" % repr(src))
-
-        if dest and abort:
-            raise POSException.VersionCommitError(
-                "Internal error, can't abort to a version")
-
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        self._lock_acquire()
-        try:
-            return self._commitVersion(src, dest, transaction, abort)
-        finally:
-            self._lock_release()
-
-    def _commitVersion(self, src, dest, transaction, abort=False):
-        # call after checking arguments and acquiring lock
-        srcpos = self._vindex_get(src, 0)
-        spos = p64(srcpos)
-        # middle holds bytes 16:34 of a data record:
-        #    pos of transaction, len of version name, data length
-        #    commit version never writes data, so data length is always 0
-        middle = pack(">8sH8s", p64(self._pos), len(dest), z64)
-
-        if dest:
-            sd = p64(self._vindex_get(dest, 0))
-            heredelta = 66 + len(dest)
-        else:
-            sd = ''
-            heredelta = 50
-
-        here = self._pos + (self._tfile.tell() + self._thl)
-        oids = []
-        current_oids = {}
-
-        while srcpos:
-            h = self._read_data_header(srcpos)
-
-            if self._index.get(h.oid) == srcpos:
-                # This is a current record!
-                self._tindex[h.oid] = here
-                oids.append(h.oid)
-                self._tfile.write(h.oid + self._tid + spos + middle)
-                if dest:
-                    self._tvindex[dest] = here
-                    self._tfile.write(p64(h.pnv) + sd + dest)
-                    sd = p64(here)
-
-                self._tfile.write(abort and p64(h.pnv) or spos)
-                # data backpointer to src data
-                here += heredelta
-
-                current_oids[h.oid] = 1
-            else:
-                # Hm.  This is a non-current record.  Is there a
-                # current record for this oid?
-                if not current_oids.has_key(h.oid):
-                    break
-
-            srcpos = h.vprev
-            spos = p64(srcpos)
-        self._toid2tid_delete.update(current_oids)
-        return self._tid, oids
-
-    def getSize(self):
-        return self._pos
-
-    def _lookup_pos(self, oid):
-        try:
-            return self._index[oid]
-        except KeyError:
-            raise POSKeyError(oid)
-        except TypeError:
-            raise TypeError("invalid oid %r" % (oid,))
-
-    def loadEx(self, oid, version):
-        # A variant of load() that also returns a transaction id.
-        # ZEO wants this for managing its cache.
-        self._lock_acquire()
-        try:
-            pos = self._lookup_pos(oid)
-            h = self._read_data_header(pos, oid)
-            if h.version and h.version != version:
-                # Return data and tid from pnv (non-version data).
-
-                # If we return the old record's transaction id, then
-                # it will look to the cache like old data is current.
-                # The tid for the current data must always be greater
-                # than any non-current data.
-                data = self._loadBack_impl(oid, h.pnv)[0]
-                return data, h.tid, ""
-            if h.plen:
-                data = self._file.read(h.plen)
-                return data, h.tid, h.version
-            else:
-                # Get the data from the backpointer, but tid from
-                # currnt txn.
-                data, _, _, _ = self._loadBack_impl(oid, h.back)
-                th = self._read_txn_header(h.tloc)
-                return data, h.tid, h.version
-        finally:
-            self._lock_release()
-
-    def load(self, oid, version):
-        """Return pickle data and serial number."""
-        self._lock_acquire()
-        try:
-            pos = self._lookup_pos(oid)
-            h = self._read_data_header(pos, oid)
-            if h.version and h.version != version:
-                data = self._loadBack_impl(oid, h.pnv)[0]
-                return data, h.tid
-            if h.plen:
-                return self._file.read(h.plen), h.tid
-            else:
-                data = self._loadBack_impl(oid, h.back)[0]
-                return data, h.tid
-        finally:
-            self._lock_release()
-
-    def loadSerial(self, oid, serial):
-        # loadSerial must always return non-version data, because it
-        # is used by conflict resolution.
-        self._lock_acquire()
-        try:
-            pos = self._lookup_pos(oid)
-            while 1:
-                h = self._read_data_header(pos, oid)
-                if h.tid == serial:
-                    break
-                pos = h.prev
-                if not pos:
-                    raise POSKeyError(oid)
-            if h.version:
-                return self._loadBack_impl(oid, h.pnv)[0]
-            if h.plen:
-                return self._file.read(h.plen)
-            else:
-                return self._loadBack_impl(oid, h.back)[0]
-        finally:
-            self._lock_release()
-
-    def loadBefore(self, oid, tid):
-        self._lock_acquire()
-        try:
-            pos = self._lookup_pos(oid)
-            end_tid = None
-            while True:
-                h = self._read_data_header(pos, oid)
-                if h.version:
-                    # Just follow the pnv pointer to the previous
-                    # non-version data.
-                    if not h.pnv:
-                        # Object was created in version.  There is no
-                        # before data to find.
-                        return None
-                    pos = h.pnv
-                    # The end_tid for the non-version data is not affected
-                    # by versioned data records.
-                    continue
-
-                if h.tid < tid:
-                    break
-
-                pos = h.prev
-                end_tid = h.tid
-                if not pos:
-                    return None
-
-            if h.back:
-                data, _, _, _ = self._loadBack_impl(oid, h.back)
-                return data, h.tid, end_tid
-            else:
-                return self._file.read(h.plen), h.tid, end_tid
-
-        finally:
-            self._lock_release()
-
-    def modifiedInVersion(self, oid):
-        self._lock_acquire()
-        try:
-            pos = self._lookup_pos(oid)
-            h = self._read_data_header(pos, oid)
-            return h.version
-        finally:
-            self._lock_release()
-
-    def store(self, oid, serial, data, version, transaction):
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        self._lock_acquire()
-        try:
-            if oid > self._oid:
-                self.set_max_oid(oid)
-            old = self._index_get(oid, 0)
-            cached_tid = None
-            pnv = None
-            if old:
-                cached_tid = self._get_cached_tid(oid)
-                if cached_tid is None:
-                    h = self._read_data_header(old, oid)
-                    if h.version:
-                        if h.version != version:
-                            raise VersionLockError(oid, h.version)
-                        pnv = h.pnv
-                    cached_tid = h.tid
-
-                if serial != cached_tid:
-                    rdata = self.tryToResolveConflict(oid, cached_tid,
-                                                     serial, data)
-                    if rdata is None:
-                        raise POSException.ConflictError(
-                            oid=oid, serials=(cached_tid, serial), data=data)
-                    else:
-                        data = rdata
-
-            pos = self._pos
-            here = pos + self._tfile.tell() + self._thl
-            self._tindex[oid] = here
-            new = DataHeader(oid, self._tid, old, pos, len(version),
-                             len(data))
-
-            if version:
-                # Link to last record for this version:
-                pv = (self._tvindex.get(version, 0)
-                      or self._vindex.get(version, 0))
-                if pnv is None:
-                    pnv = old
-                new.setVersion(version, pnv, pv)
-                self._tvindex[version] = here
-                self._toid2tid_delete[oid] = 1
-            else:
-                self._toid2tid[oid] = self._tid
-
-            self._tfile.write(new.asString())
-            self._tfile.write(data)
-
-            # Check quota
-            if self._quota is not None and here > self._quota:
-                raise FileStorageQuotaError(
-                    "The storage quota has been exceeded.")
-
-            if old and serial != cached_tid:
-                return ConflictResolution.ResolvedSerial
-            else:
-                return self._tid
-
-        finally:
-            self._lock_release()
-
-    def _data_find(self, tpos, oid, data):
-        # Return backpointer for oid.  Must call with the lock held.
-        # This is a file offset to oid's data record if found, else 0.
-        # The data records in the transaction at tpos are searched for oid.
-        # If a data record for oid isn't found, returns 0.
-        # Else if oid's data record contains a backpointer, that
-        # backpointer is returned.
-        # Else oid's data record contains the data, and the file offset of
-        # oid's data record is returned.  This data record should contain
-        # a pickle identical to the 'data' argument.
-
-        # Unclear:  If the length of the stored data doesn't match len(data),
-        # an exception is raised.  If the lengths match but the data isn't
-        # the same, 0 is returned.  Why the discrepancy?
-        self._file.seek(tpos)
-        h = self._file.read(TRANS_HDR_LEN)
-        tid, tl, status, ul, dl, el = unpack(TRANS_HDR, h)
-        self._file.read(ul + dl + el)
-        tend = tpos + tl + 8
-        pos = self._file.tell()
-        while pos < tend:
-            h = self._read_data_header(pos)
-            if h.oid == oid:
-                # Make sure this looks like the right data record
-                if h.plen == 0:
-                    # This is also a backpointer.  Gotta trust it.
-                    return pos
-                if h.plen != len(data):
-                    # The expected data doesn't match what's in the
-                    # backpointer.  Something is wrong.
-                    logger.error("Mismatch between data and"
-                                 " backpointer at %d", pos)
-                    return 0
-                _data = self._file.read(h.plen)
-                if data != _data:
-                    return 0
-                return pos
-            pos += h.recordlen()
-            self._file.seek(pos)
-        return 0
-
-    def restore(self, oid, serial, data, version, prev_txn, transaction):
-        # A lot like store() but without all the consistency checks.  This
-        # should only be used when we /know/ the data is good, hence the
-        # method name.  While the signature looks like store() there are some
-        # differences:
-        #
-        # - serial is the serial number of /this/ revision, not of the
-        #   previous revision.  It is used instead of self._tid, which is
-        #   ignored.
-        #
-        # - Nothing is returned
-        #
-        # - data can be None, which indicates a George Bailey object
-        #   (i.e. one who's creation has been transactionally undone).
-        #
-        # prev_txn is a backpointer.  In the original database, it's possible
-        # that the data was actually living in a previous transaction.  This
-        # can happen for transactional undo and other operations, and is used
-        # as a space saving optimization.  Under some circumstances the
-        # prev_txn may not actually exist in the target database (i.e. self)
-        # for example, if it's been packed away.  In that case, the prev_txn
-        # should be considered just a hint, and is ignored if the transaction
-        # doesn't exist.
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        self._lock_acquire()
-        try:
-            if oid > self._oid:
-                self.set_max_oid(oid)
-            prev_pos = 0
-            if prev_txn is not None:
-                prev_txn_pos = self._txn_find(prev_txn, 0)
-                if prev_txn_pos:
-                    prev_pos = self._data_find(prev_txn_pos, oid, data)
-            old = self._index_get(oid, 0)
-            # Calculate the file position in the temporary file
-            here = self._pos + self._tfile.tell() + self._thl
-            # And update the temp file index
-            self._tindex[oid] = here
-            if prev_pos:
-                # If there is a valid prev_pos, don't write data.
-                data = None
-            if data is None:
-                dlen = 0
-            else:
-                dlen = len(data)
-
-            # Write the recovery data record
-            new = DataHeader(oid, serial, old, self._pos, len(version), dlen)
-            if version:
-                pnv = self._restore_pnv(oid, old, version, prev_pos) or old
-                vprev = self._tvindex.get(version, 0)
-                if not vprev:
-                    vprev = self._vindex.get(version, 0)
-                new.setVersion(version, pnv, vprev)
-                self._tvindex[version] = here
-                self._toid2tid_delete[oid] = 1
-            else:
-                self._toid2tid[oid] = serial
-
-            self._tfile.write(new.asString())
-
-            # Finally, write the data or a backpointer.
-            if data is None:
-                if prev_pos:
-                    self._tfile.write(p64(prev_pos))
-                else:
-                    # Write a zero backpointer, which indicates an
-                    # un-creation transaction.
-                    self._tfile.write(z64)
-            else:
-                self._tfile.write(data)
-        finally:
-            self._lock_release()
-
-    def _restore_pnv(self, oid, prev, version, bp):
-        # Find a valid pnv (previous non-version) pointer for this version.
-
-        # If there is no previous record, there can't be a pnv.
-        if not prev:
-            return None
-
-        # Load the record pointed to be prev
-        h = self._read_data_header(prev, oid)
-        if h.version:
-            return h.pnv
-        if h.back:
-            # TODO:  Not sure the following is always true:
-            # The previous record is not for this version, yet we
-            # have a backpointer to it.  The current record must
-            # be an undo of an abort or commit, so the backpointer
-            # must be to a version record with a pnv.
-            h2 = self._read_data_header(h.back, oid)
-            if h2.version:
-                return h2.pnv
-
-        return None
-
-    def supportsUndo(self):
-        return 1
-
-    def supportsVersions(self):
-        return 1
-
-    def _clear_temp(self):
-        self._tindex.clear()
-        self._tvindex.clear()
-        self._toid2tid.clear()
-        self._toid2tid_delete.clear()
-        if self._tfile is not None:
-            self._tfile.seek(0)
-
-    def _begin(self, tid, u, d, e):
-        self._nextpos = 0
-        self._thl = TRANS_HDR_LEN + len(u) + len(d) + len(e)
-        if self._thl > 65535:
-            # one of u, d, or e may be > 65535
-            # We have to check lengths here because struct.pack
-            # doesn't raise an exception on overflow!
-            if len(u) > 65535:
-                raise FileStorageError('user name too long')
-            if len(d) > 65535:
-                raise FileStorageError('description too long')
-            if len(e) > 65535:
-                raise FileStorageError('too much extension data')
-
-
-    def tpc_vote(self, transaction):
-        self._lock_acquire()
-        try:
-            if transaction is not self._transaction:
-                return
-            dlen = self._tfile.tell()
-            if not dlen:
-                return # No data in this trans
-            self._tfile.seek(0)
-            user, descr, ext = self._ude
-
-            self._file.seek(self._pos)
-            tl = self._thl + dlen
-
-            try:
-                h = TxnHeader(self._tid, tl, "c", len(user),
-                              len(descr), len(ext))
-                h.user = user
-                h.descr = descr
-                h.ext = ext
-                self._file.write(h.asString())
-                cp(self._tfile, self._file, dlen)
-                self._file.write(p64(tl))
-                self._file.flush()
-            except:
-                # Hm, an error occured writing out the data. Maybe the
-                # disk is full. We don't want any turd at the end.
-                self._file.truncate(self._pos)
-                raise
-            self._nextpos = self._pos + (tl + 8)
-        finally:
-            self._lock_release()
-
-    # Keep track of the number of records that we've written
-    _records_written = 0
-
-    def _finish(self, tid, u, d, e):
-        nextpos=self._nextpos
-        if nextpos:
-            file=self._file
-
-            # Clear the checkpoint flag
-            file.seek(self._pos+16)
-            file.write(self._tstatus)
-            file.flush()
-
-            if fsync is not None: fsync(file.fileno())
-
-            self._pos = nextpos
-
-            self._index.update(self._tindex)
-            self._vindex.update(self._tvindex)
-            self._oid2tid.update(self._toid2tid)
-            for oid in self._toid2tid_delete.keys():
-                try:
-                    del self._oid2tid[oid]
-                except KeyError:
-                    pass
-
-            # Update the number of records that we've written
-            # +1 for the transaction record
-            self._records_written += len(self._tindex) + 1
-            if self._records_written >= self._records_before_save:
-                self._save_index()
-                self._records_written = 0
-                self._records_before_save = max(self._records_before_save,
-                                                len(self._index))
-
-        self._ltid = tid
-
-    def _abort(self):
-        if self._nextpos:
-            self._file.truncate(self._pos)
-            self._nextpos=0
-
-    def supportsTransactionalUndo(self):
-        return 1
-
-    def _undoDataInfo(self, oid, pos, tpos):
-        """Return the tid, data pointer, data, and version for the oid
-        record at pos"""
-        if tpos:
-            pos = tpos - self._pos - self._thl
-            tpos = self._tfile.tell()
-            h = self._tfmt._read_data_header(pos, oid)
-            afile = self._tfile
-        else:
-            h = self._read_data_header(pos, oid)
-            afile = self._file
-        if h.oid != oid:
-            raise UndoError("Invalid undo transaction id", oid)
-
-        if h.plen:
-            data = afile.read(h.plen)
-        else:
-            data = ''
-            pos = h.back
-
-        if tpos:
-            self._tfile.seek(tpos) # Restore temp file to end
-
-        return h.tid, pos, data, h.version
-
-    def getTid(self, oid):
-        self._lock_acquire()
-        try:
-            result = self._get_cached_tid(oid)
-            if result is None:
-                pos = self._lookup_pos(oid)
-                result = self._getTid(oid, pos)
-            return result
-        finally:
-            self._lock_release()
-
-    def _getTid(self, oid, pos):
-        self._file.seek(pos)
-        h = self._file.read(16)
-        assert oid == h[:8]
-        return h[8:]
-
-    def _getVersion(self, oid, pos):
-        h = self._read_data_header(pos, oid)
-        if h.version:
-            return h.version, h.pnv
-        else:
-            return "", None
-
-    def _transactionalUndoRecord(self, oid, pos, tid, pre, version):
-        """Get the indo information for a data record
-
-        Return a 5-tuple consisting of a pickle, data pointer,
-        version, packed non-version data pointer, and current
-        position.  If the pickle is true, then the data pointer must
-        be 0, but the pickle can be empty *and* the pointer 0.
-        """
-
-        copy = 1 # Can we just copy a data pointer
-
-        # First check if it is possible to undo this record.
-        tpos = self._tindex.get(oid, 0)
-        ipos = self._index.get(oid, 0)
-        tipos = tpos or ipos
-
-        if tipos != pos:
-            # Eek, a later transaction modified the data, but,
-            # maybe it is pointing at the same data we are.
-            ctid, cdataptr, cdata, cver = self._undoDataInfo(oid, ipos, tpos)
-            # Versions of undone record and current record *must* match!
-            if cver != version:
-                raise UndoError('Current and undone versions differ', oid)
-
-            if cdataptr != pos:
-                # We aren't sure if we are talking about the same data
-                try:
-                    if (
-                        # The current record wrote a new pickle
-                        cdataptr == tipos
-                        or
-                        # Backpointers are different
-                        self._loadBackPOS(oid, pos) !=
-                        self._loadBackPOS(oid, cdataptr)
-                        ):
-                        if pre and not tpos:
-                            copy = 0 # we'll try to do conflict resolution
-                        else:
-                            # We bail if:
-                            # - We don't have a previous record, which should
-                            #   be impossible.
-                            raise UndoError("no previous record", oid)
-                except KeyError:
-                    # LoadBack gave us a key error. Bail.
-                    raise UndoError("_loadBack() failed", oid)
-
-        # Return the data that should be written in the undo record.
-        if not pre:
-            # There is no previous revision, because the object creation
-            # is being undone.
-            return "", 0, "", "", ipos
-
-        version, snv = self._getVersion(oid, pre)
-        if copy:
-            # we can just copy our previous-record pointer forward
-            return "", pre, version, snv, ipos
-
-        try:
-            bdata = self._loadBack_impl(oid, pre)[0]
-        except KeyError:
-            # couldn't find oid; what's the real explanation for this?
-            raise UndoError("_loadBack() failed for %s", oid)
-        data = self.tryToResolveConflict(oid, ctid, tid, bdata, cdata)
-
-        if data:
-            return data, 0, version, snv, ipos
-
-        raise UndoError("Some data were modified by a later transaction", oid)
-
-    # undoLog() returns a description dict that includes an id entry.
-    # The id is opaque to the client, but contains the transaction id.
-    # The transactionalUndo() implementation does a simple linear
-    # search through the file (from the end) to find the transaction.
-
-    def undoLog(self, first=0, last=-20, filter=None):
-        if last < 0:
-            last = first - last + 1
-        self._lock_acquire()
-        try:
-            if self._pack_is_in_progress:
-                raise UndoError(
-                    'Undo is currently disabled for database maintenance.<p>')
-            us = UndoSearch(self._file, self._pos, first, last, filter)
-            while not us.finished():
-                # Hold lock for batches of 20 searches, so default search
-                # parameters will finish without letting another thread run.
-                for i in range(20):
-                    if us.finished():
-                        break
-                    us.search()
-                # Give another thread a chance, so that a long undoLog()
-                # operation doesn't block all other activity.
-                self._lock_release()
-                self._lock_acquire()
-            return us.results
-        finally:
-            self._lock_release()
-
-    def undo(self, transaction_id, transaction):
-        """Undo a transaction, given by transaction_id.
-
-        Do so by writing new data that reverses the action taken by
-        the transaction.
-
-        Usually, we can get by with just copying a data pointer, by
-        writing a file position rather than a pickle. Sometimes, we
-        may do conflict resolution, in which case we actually copy
-        new data that results from resolution.
-        """
-
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        self._lock_acquire()
-        try:
-            return self._txn_undo(transaction_id)
-        finally:
-            self._lock_release()
-
-    def _txn_undo(self, transaction_id):
-        # Find the right transaction to undo and call _txn_undo_write().
-        tid = base64.decodestring(transaction_id + '\n')
-        assert len(tid) == 8
-        tpos = self._txn_find(tid, 1)
-        tindex = self._txn_undo_write(tpos)
-        self._tindex.update(tindex)
-        # Arrange to clear the affected oids from the oid2tid cache.
-        # It's too painful to try to update them to correct current
-        # values instead.
-        self._toid2tid_delete.update(tindex)
-        return self._tid, tindex.keys()
-
-    def _txn_find(self, tid, stop_at_pack):
-        pos = self._pos
-        while pos > 39:
-            self._file.seek(pos - 8)
-            pos = pos - u64(self._file.read(8)) - 8
-            self._file.seek(pos)
-            h = self._file.read(TRANS_HDR_LEN)
-            _tid = h[:8]
-            if _tid == tid:
-                return pos
-            if stop_at_pack:
-                # check the status field of the transaction header
-                if h[16] == 'p':
-                    break
-        raise UndoError("Invalid transaction id")
-
-    def _txn_undo_write(self, tpos):
-        # a helper function to write the data records for transactional undo
-
-        otloc = self._pos
-        here = self._pos + self._tfile.tell() + self._thl
-        base = here - self._tfile.tell()
-        # Let's move the file pointer back to the start of the txn record.
-        th = self._read_txn_header(tpos)
-        if th.status != " ":
-            raise UndoError('non-undoable transaction')
-        tend = tpos + th.tlen
-        pos = tpos + th.headerlen()
-        tindex = {}
-
-        # keep track of failures, cause we may succeed later
-        failures = {}
-        # Read the data records for this transaction
-        while pos < tend:
-            h = self._read_data_header(pos)
-            if h.oid in failures:
-                del failures[h.oid] # second chance!
-
-            assert base + self._tfile.tell() == here, (here, base,
-                                                       self._tfile.tell())
-            try:
-                p, prev, v, snv, ipos = self._transactionalUndoRecord(
-                    h.oid, pos, h.tid, h.prev, h.version)
-            except UndoError, v:
-                # Don't fail right away. We may be redeemed later!
-                failures[h.oid] = v
-            else:
-                new = DataHeader(h.oid, self._tid, ipos, otloc, len(v),
-                                 len(p))
-                if v:
-                    vprev = self._tvindex.get(v, 0) or self._vindex.get(v, 0)
-                    new.setVersion(v, snv, vprev)
-                    self._tvindex[v] = here
-
-                # TODO:  This seek shouldn't be necessary, but some other
-                # bit of code is messing with the file pointer.
-                assert self._tfile.tell() == here - base, (here, base,
-                                                           self._tfile.tell())
-                self._tfile.write(new.asString())
-                if p:
-                    self._tfile.write(p)
-                else:
-                    self._tfile.write(p64(prev))
-                tindex[h.oid] = here
-                here += new.recordlen()
-
-            pos += h.recordlen()
-            if pos > tend:
-                raise UndoError("non-undoable transaction")
-
-        if failures:
-            raise MultipleUndoErrors(failures.items())
-
-        return tindex
-
-
-    def versionEmpty(self, version):
-        if not version:
-            # The interface is silent on this case. I think that this should
-            # be an error, but Barry thinks this should return 1 if we have
-            # any non-version data. This would be excruciatingly painful to
-            # test, so I must be right. ;)
-            raise POSException.VersionError(
-                'The version must be an non-empty string')
-        self._lock_acquire()
-        try:
-            index=self._index
-            file=self._file
-            seek=file.seek
-            read=file.read
-            srcpos=self._vindex_get(version, 0)
-            t=tstatus=None
-            while srcpos:
-                seek(srcpos)
-                oid=read(8)
-                if index[oid]==srcpos: return 0
-                h=read(50) # serial, prev(oid), tloc, vlen, plen, pnv, pv
-                tloc=h[16:24]
-                if t != tloc:
-                    # We haven't checked this transaction before,
-                    # get its status.
-                    t=tloc
-                    seek(u64(t)+16)
-                    tstatus=read(1)
-
-                if tstatus != 'u': return 1
-
-                spos=h[-8:]
-                srcpos=u64(spos)
-
-            return 1
-        finally: self._lock_release()
-
-    def versions(self, max=None):
-        r=[]
-        a=r.append
-        keys=self._vindex.keys()
-        if max is not None: keys=keys[:max]
-        for version in keys:
-            if self.versionEmpty(version): continue
-            a(version)
-            if max and len(r) >= max: return r
-
-        return r
-
-    def history(self, oid, version=None, size=1, filter=None):
-        self._lock_acquire()
-        try:
-            r = []
-            pos = self._lookup_pos(oid)
-            wantver = version
-
-            while 1:
-                if len(r) >= size: return r
-                h = self._read_data_header(pos)
-
-                if h.version:
-                    if wantver is not None and h.version != wantver:
-                        if h.prev:
-                            pos = h.prev
-                            continue
-                        else:
-                            return r
-                else:
-                    version = ""
-                    wantver = None
-
-                th = self._read_txn_header(h.tloc)
-                if th.ext:
-                    d = loads(th.ext)
-                else:
-                    d = {}
-
-                d.update({"time": TimeStamp(h.tid).timeTime(),
-                          "user_name": th.user,
-                          "description": th.descr,
-                          "tid": h.tid,
-                          "version": h.version,
-                          "size": h.plen,
-                          })
-
-                if filter is None or filter(d):
-                    r.append(d)
-
-                if h.prev:
-                    pos = h.prev
-                else:
-                    return r
-        finally:
-            self._lock_release()
-
-    def _redundant_pack(self, file, pos):
-        assert pos > 8, pos
-        file.seek(pos - 8)
-        p = u64(file.read(8))
-        file.seek(pos - p + 8)
-        return file.read(1) not in ' u'
-
-    def pack(self, t, referencesf):
-        """Copy data from the current database file to a packed file
-
-        Non-current records from transactions with time-stamp strings less
-        than packtss are ommitted. As are all undone records.
-
-        Also, data back pointers that point before packtss are resolved and
-        the associated data are copied, since the old records are not copied.
-        """
-        if self._is_read_only:
-            raise POSException.ReadOnlyError()
-
-        stop=`TimeStamp(*time.gmtime(t)[:5]+(t%60,))`
-        if stop==z64: raise FileStorageError, 'Invalid pack time'
-
-        # If the storage is empty, there's nothing to do.
-        if not self._index:
-            return
-
-        self._lock_acquire()
-        try:
-            if self._pack_is_in_progress:
-                raise FileStorageError, 'Already packing'
-            self._pack_is_in_progress = True
-            current_size = self.getSize()
-        finally:
-            self._lock_release()
-
-        p = FileStoragePacker(self._file_name, stop,
-                              self._lock_acquire, self._lock_release,
-                              self._commit_lock_acquire,
-                              self._commit_lock_release,
-                              current_size)
-        try:
-            opos = None
-            try:
-                opos = p.pack()
-            except RedundantPackWarning, detail:
-                logger.info(str(detail))
-            if opos is None:
-                return
-            oldpath = self._file_name + ".old"
-            self._lock_acquire()
-            try:
-                self._file.close()
-                try:
-                    if os.path.exists(oldpath):
-                        os.remove(oldpath)
-                    os.rename(self._file_name, oldpath)
-                except Exception:
-                    self._file = open(self._file_name, 'r+b')
-                    raise
-
-                # OK, we're beyond the point of no return
-                os.rename(self._file_name + '.pack', self._file_name)
-                self._file = open(self._file_name, 'r+b')
-                self._initIndex(p.index, p.vindex, p.tindex, p.tvindex,
-                                p.oid2tid, p.toid2tid,
-                                p.toid2tid_delete)
-                self._pos = opos
-                self._save_index()
-            finally:
-                self._lock_release()
-        finally:
-            if p.locked:
-                self._commit_lock_release()
-            self._lock_acquire()
-            self._pack_is_in_progress = False
-            self._lock_release()
-
-    def iterator(self, start=None, stop=None):
-        return FileIterator(self._file_name, start, stop)
-
-    def lastTransaction(self):
-        """Return transaction id for last committed transaction"""
-        return self._ltid
-
-    def lastTid(self, oid):
-        """Return last serialno committed for object oid.
-
-        If there is no serialno for this oid -- which can only occur
-        if it is a new object -- return None.
-        """
-        try:
-            return self.getTid(oid)
-        except KeyError:
-            return None
-
-    def cleanup(self):
-        """Remove all files created by this storage."""
-        for ext in '', '.old', '.tmp', '.lock', '.index', '.pack':
-            try:
-                os.remove(self._file_name + ext)
-            except OSError, e:
-                if e.errno != errno.ENOENT:
-                    raise
-
-    def record_iternext(self, next=None):
-        index = self._index
-        oid = index.minKey(next)
-
-        try:
-            next_oid = index.minKey(self.new_oid(oid))
-        except ValueError: # "empty tree" error
-            next_oid = None
-
-        data, tid = self.load(oid, "") # ignore versions
-        return oid, tid, data, next_oid
-
-
-
-def shift_transactions_forward(index, vindex, tindex, file, pos, opos):
-    """Copy transactions forward in the data file
-
-    This might be done as part of a recovery effort
-    """
-
-    # Cache a bunch of methods
-    seek=file.seek
-    read=file.read
-    write=file.write
-
-    index_get=index.get
-    vindex_get=vindex.get
-
-    # Initialize,
-    pv=z64
-    p1=opos
-    p2=pos
-    offset=p2-p1
-
-    # Copy the data in two stages.  In the packing stage,
-    # we skip records that are non-current or that are for
-    # unreferenced objects. We also skip undone transactions.
-    #
-    # After the packing stage, we copy everything but undone
-    # transactions, however, we have to update various back pointers.
-    # We have to have the storage lock in the second phase to keep
-    # data from being changed while we're copying.
-    pnv=None
-    while 1:
-
-        # Read the transaction record
-        seek(pos)
-        h=read(TRANS_HDR_LEN)
-        if len(h) < TRANS_HDR_LEN: break
-        tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h)
-        if status=='c': break # Oops. we found a checkpoint flag.
-        tl=u64(stl)
-        tpos=pos
-        tend=tpos+tl
-
-        otpos=opos # start pos of output trans
-
-        thl=ul+dl+el
-        h2=read(thl)
-        if len(h2) != thl:
-            raise PackError(opos)
-
-        # write out the transaction record
-        seek(opos)
-        write(h)
-        write(h2)
-
-        thl=TRANS_HDR_LEN+thl
-        pos=tpos+thl
-        opos=otpos+thl
-
-        while pos < tend:
-            # Read the data records for this transaction
-            seek(pos)
-            h=read(DATA_HDR_LEN)
-            oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h)
-            plen=u64(splen)
-            dlen=DATA_HDR_LEN+(plen or 8)
-
-            if vlen:
-                dlen=dlen+(16+vlen)
-                pnv=u64(read(8))
-                # skip position of previous version record
-                seek(8,1)
-                version=read(vlen)
-                pv=p64(vindex_get(version, 0))
-                if status != 'u': vindex[version]=opos
-
-            tindex[oid]=opos
-
-            if plen: p=read(plen)
-            else:
-                p=read(8)
-                p=u64(p)
-                if p >= p2: p=p-offset
-                elif p >= p1:
-                    # Ick, we're in trouble. Let's bail
-                    # to the index and hope for the best
-                    p=index_get(oid, 0)
-                p=p64(p)
-
-            # WRITE
-            seek(opos)
-            sprev=p64(index_get(oid, 0))
-            write(pack(DATA_HDR,
-                       oid,serial,sprev,p64(otpos),vlen,splen))
-            if vlen:
-                if not pnv: write(z64)
-                else:
-                    if pnv >= p2: pnv=pnv-offset
-                    elif pnv >= p1:
-                        pnv=index_get(oid, 0)
-
-                    write(p64(pnv))
-                write(pv)
-                write(version)
-
-            write(p)
-
-            opos=opos+dlen
-            pos=pos+dlen
-
-        # skip the (intentionally redundant) transaction length
-        pos=pos+8
-
-        if status != 'u':
-            index.update(tindex) # Record the position
-
-        tindex.clear()
-
-        write(stl)
-        opos=opos+8
-
-    return opos
-
-def search_back(file, pos):
-    seek=file.seek
-    read=file.read
-    seek(0,2)
-    s=p=file.tell()
-    while p > pos:
-        seek(p-8)
-        l=u64(read(8))
-        if l <= 0: break
-        p=p-l-8
-
-    return p, s
-
-def recover(file_name):
-    file=open(file_name, 'r+b')
-    index={}
-    vindex={}
-    tindex={}
-
-    pos, oid, tid = read_index(
-        file, file_name, index, vindex, tindex, recover=1)
-    if oid is not None:
-        print "Nothing to recover"
-        return
-
-    opos=pos
-    pos, sz = search_back(file, pos)
-    if pos < sz:
-        npos = shift_transactions_forward(
-            index, vindex, tindex, file, pos, opos,
-            )
-
-    file.truncate(npos)
-
-    print "Recovered file, lost %s, ended up with %s bytes" % (
-        pos-opos, npos)
-
-
-
-def read_index(file, name, index, vindex, tindex, stop='\377'*8,
-               ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0):
-    """Scan the file storage and update the index.
-
-    Returns file position, max oid, and last transaction id.  It also
-    stores index information in the three dictionary arguments.
-
-    Arguments:
-    file -- a file object (the Data.fs)
-    name -- the name of the file (presumably file.name)
-    index -- fsIndex, oid -> data record file offset
-    vindex -- dictionary, oid -> data record offset for version data
-    tindex -- dictionary, oid -> data record offset
-              tindex is cleared before return
-
-    There are several default arguments that affect the scan or the
-    return values.  TODO:  document them.
-
-    start -- the file position at which to start scanning for oids added
-             beyond the ones the passed-in indices know about.  The .index
-             file caches the highest ._pos FileStorage knew about when the
-             the .index file was last saved, and that's the intended value
-             to pass in for start; accept the default (and pass empty
-             indices) to recreate the index from scratch
-    maxoid -- ignored (it meant something prior to ZODB 3.2.6; the argument
-              still exists just so the signature of read_index() stayed the
-              same)
-
-    The file position returned is the position just after the last
-    valid transaction record.  The oid returned is the maximum object
-    id in `index`, or z64 if the index is empty.  The transaction id is the
-    tid of the last transaction, or ltid if the index is empty.
-    """
-
-    read = file.read
-    seek = file.seek
-    seek(0, 2)
-    file_size=file.tell()
-    fmt = TempFormatter(file)
-
-    if file_size:
-        if file_size < start:
-            raise FileStorageFormatError, file.name
-        seek(0)
-        if read(4) != packed_version:
-            raise FileStorageFormatError, name
-    else:
-        if not read_only:
-            file.write(packed_version)
-        return 4L, z64, ltid
-
-    index_get=index.get
-
-    pos=start
-    seek(start)
-    tid='\0'*7+'\1'
-
-    while 1:
-        # Read the transaction record
-        h=read(TRANS_HDR_LEN)
-        if not h: break
-        if len(h) != TRANS_HDR_LEN:
-            if not read_only:
-                logger.warning('%s truncated at %s', name, pos)
-                seek(pos)
-                file.truncate()
-            break
-
-        tid, tl, status, ul, dl, el = unpack(TRANS_HDR,h)
-        if el < 0: el=t32-el
-
-        if tid <= ltid:
-            logger.warning("%s time-stamp reduction at %s", name, pos)
-        ltid = tid
-
-        if pos+(tl+8) > file_size or status=='c':
-            # Hm, the data were truncated or the checkpoint flag wasn't
-            # cleared.  They may also be corrupted,
-            # in which case, we don't want to totally lose the data.
-            if not read_only:
-                logger.warning("%s truncated, possibly due to damaged"
-                               " records at %s", name, pos)
-                _truncate(file, name, pos)
-            break
-
-        if status not in ' up':
-            logger.warning('%s has invalid status, %s, at %s',
-                           name, status, pos)
-
-        if tl < (TRANS_HDR_LEN+ul+dl+el):
-            # We're in trouble. Find out if this is bad data in the
-            # middle of the file, or just a turd that Win 9x dropped
-            # at the end when the system crashed.
-            # Skip to the end and read what should be the transaction length
-            # of the last transaction.
-            seek(-8, 2)
-            rtl=u64(read(8))
-            # Now check to see if the redundant transaction length is
-            # reasonable:
-            if file_size - rtl < pos or rtl < TRANS_HDR_LEN:
-                logger.critical('%s has invalid transaction header at %s',
-                                name, pos)
-                if not read_only:
-                    logger.warning(
-                         "It appears that there is invalid data at the end of "
-                         "the file, possibly due to a system crash.  %s "
-                         "truncated to recover from bad data at end." % name)
-                    _truncate(file, name, pos)
-                break
-            else:
-                if recover: return pos, None, None
-                panic('%s has invalid transaction header at %s', name, pos)
-
-        if tid >= stop:
-            break
-
-        tpos = pos
-        tend = tpos + tl
-
-        if status=='u':
-            # Undone transaction, skip it
-            seek(tend)
-            h = u64(read(8))
-            if h != tl:
-                if recover: return tpos, None, None
-                panic('%s has inconsistent transaction length at %s',
-                      name, pos)
-            pos = tend + 8
-            continue
-
-        pos = tpos+ TRANS_HDR_LEN + ul + dl + el
-        while pos < tend:
-            # Read the data records for this transaction
-            h = fmt._read_data_header(pos)
-            dlen = h.recordlen()
-            tindex[h.oid] = pos
-
-            if h.version:
-                vindex[h.version] = pos
-
-            if pos + dlen > tend or h.tloc != tpos:
-                if recover:
-                    return tpos, None, None
-                panic("%s data record exceeds transaction record at %s",
-                      name, pos)
-
-            if index_get(h.oid, 0) != h.prev:
-                if prev:
-                    if recover: return tpos, None, None
-                    logger.error("%s incorrect previous pointer at %s",
-                                 name, pos)
-                else:
-                    logger.warning("%s incorrect previous pointer at %s",
-                                   name, pos)
-
-            pos += dlen
-
-        if pos != tend:
-            if recover:
-                return tpos, None, None
-            panic("%s data records don't add up at %s",name,tpos)
-
-        # Read the (intentionally redundant) transaction length
-        seek(pos)
-        h = u64(read(8))
-        if h != tl:
-            if recover:
-                return tpos, None, None
-            panic("%s redundant transaction length check failed at %s",
-                  name, pos)
-        pos += 8
-
-        index.update(tindex)
-        tindex.clear()
-
-    # Caution:  fsIndex doesn't have an efficient __nonzero__ or __len__.
-    # That's why we do try/except instead.  fsIndex.maxKey() is fast.
-    try:
-        maxoid = index.maxKey()
-    except ValueError:
-        # The index is empty.
-        maxoid == z64
-
-    return pos, maxoid, ltid
-
-
-def _truncate(file, name, pos):
-    file.seek(0, 2)
-    file_size = file.tell()
-    try:
-        i = 0
-        while 1:
-            oname='%s.tr%s' % (name, i)
-            if os.path.exists(oname):
-                i += 1
-            else:
-                logger.warning("Writing truncated data from %s to %s",
-                               name, oname)
-                o = open(oname,'wb')
-                file.seek(pos)
-                cp(file, o, file_size-pos)
-                o.close()
-                break
-    except:
-        logger.error("couldn\'t write truncated data for %s", name,
-              exc_info=True)
-        raise POSException.StorageSystemError, (
-            "Couldn't save truncated data")
-
-    file.seek(pos)
-    file.truncate()
-
-class Iterator:
-    """A General simple iterator that uses the Python for-loop index protocol
-    """
-    __index=-1
-    __current=None
-
-    def __getitem__(self, i):
-        __index=self.__index
-        while i > __index:
-            __index=__index+1
-            self.__current=self.next(__index)
-
-        self.__index=__index
-        return self.__current
-
-
-class FileIterator(Iterator, FileStorageFormatter):
-    """Iterate over the transactions in a FileStorage file.
-    """
-    _ltid = z64
-    _file = None
-
-    def __init__(self, file, start=None, stop=None):
-        if isinstance(file, str):
-            file = open(file, 'rb')
-        self._file = file
-        if file.read(4) != packed_version:
-            raise FileStorageFormatError, file.name
-        file.seek(0,2)
-        self._file_size = file.tell()
-        self._pos = 4L
-        assert start is None or isinstance(start, str)
-        assert stop is None or isinstance(stop, str)
-        if start:
-            self._skip_to_start(start)
-        self._stop = stop
-
-    def __len__(self):
-        # Define a bogus __len__() to make the iterator work
-        # with code like builtin list() and tuple() in Python 2.1.
-        # There's a lot of C code that expects a sequence to have
-        # an __len__() but can cope with any sort of mistake in its
-        # implementation.  So just return 0.
-        return 0
-
-    # This allows us to pass an iterator as the `other' argument to
-    # copyTransactionsFrom() in BaseStorage.  The advantage here is that we
-    # can create the iterator manually, e.g. setting start and stop, and then
-    # just let copyTransactionsFrom() do its thing.
-    def iterator(self):
-        return self
-
-    def close(self):
-        file = self._file
-        if file is not None:
-            self._file = None
-            file.close()
-
-    def _skip_to_start(self, start):
-        # Scan through the transaction records doing almost no sanity
-        # checks.
-        file = self._file
-        read = file.read
-        seek = file.seek
-        while 1:
-            seek(self._pos)
-            h = read(16)
-            if len(h) < 16:
-                return
-            tid, stl = unpack(">8s8s", h)
-            if tid >= start:
-                return
-            tl = u64(stl)
-            try:
-                self._pos += tl + 8
-            except OverflowError:
-                self._pos = long(self._pos) + tl + 8
-            if __debug__:
-                # Sanity check
-                seek(self._pos - 8, 0)
-                rtl = read(8)
-                if rtl != stl:
-                    pos = file.tell() - 8
-                    panic("%s has inconsistent transaction length at %s "
-                          "(%s != %s)", file.name, pos, u64(rtl), u64(stl))
-
-    def next(self, index=0):
-        if self._file is None:
-            # A closed iterator.  Is IOError the best we can do?  For
-            # now, mimic a read on a closed file.
-            raise IOError, 'iterator is closed'
-
-        pos = self._pos
-        while 1:
-            # Read the transaction record
-            try:
-                h = self._read_txn_header(pos)
-            except CorruptedDataError, err:
-                # If buf is empty, we've reached EOF.
-                if not err.buf:
-                    break
-                raise
-
-            if h.tid <= self._ltid:
-                logger.warning("%s time-stamp reduction at %s",
-                               self._file.name, pos)
-            self._ltid = h.tid
-
-            if self._stop is not None and h.tid > self._stop:
-                raise IndexError, index
-
-            if h.status == "c":
-                # Assume we've hit the last, in-progress transaction
-                raise IndexError, index
-
-            if pos + h.tlen + 8 > self._file_size:
-                # Hm, the data were truncated or the checkpoint flag wasn't
-                # cleared.  They may also be corrupted,
-                # in which case, we don't want to totally lose the data.
-                logger.warning("%s truncated, possibly due to"
-                               " damaged records at %s", self._file.name, pos)
-                break
-
-            if h.status not in " up":
-                logger.warning('%s has invalid status,'
-                               ' %s, at %s', self._file.name, h.status, pos)
-
-            if h.tlen < h.headerlen():
-                # We're in trouble. Find out if this is bad data in
-                # the middle of the file, or just a turd that Win 9x
-                # dropped at the end when the system crashed.  Skip to
-                # the end and read what should be the transaction
-                # length of the last transaction.
-                self._file.seek(-8, 2)
-                rtl = u64(self._file.read(8))
-                # Now check to see if the redundant transaction length is
-                # reasonable:
-                if self._file_size - rtl < pos or rtl < TRANS_HDR_LEN:
-                    logger.critical("%s has invalid transaction header at %s",
-                                    self._file.name, pos)
-                    logger.warning(
-                         "It appears that there is invalid data at the end of "
-                         "the file, possibly due to a system crash.  %s "
-                         "truncated to recover from bad data at end."
-                         % self._file.name)
-                    break
-                else:
-                    logger.warning("%s has invalid transaction header at %s",
-                                   self._file.name, pos)
-                    break
-
-            tpos = pos
-            tend = tpos + h.tlen
-
-            if h.status != "u":
-                pos = tpos + h.headerlen()
-                e = {}
-                if h.elen:
-                    try:
-                        e = loads(h.ext)
-                    except:
-                        pass
-
-                result = RecordIterator(h.tid, h.status, h.user, h.descr,
-                                        e, pos, tend, self._file, tpos)
-
-            # Read the (intentionally redundant) transaction length
-            self._file.seek(tend)
-            rtl = u64(self._file.read(8))
-            if rtl != h.tlen:
-                logger.warning("%s redundant transaction length check"
-                               " failed at %s", self._file.name, tend)
-                break
-            self._pos = tend + 8
-
-            return result
-
-        raise IndexError, index
-
-class RecordIterator(Iterator, BaseStorage.TransactionRecord,
-                     FileStorageFormatter):
-    """Iterate over the transactions in a FileStorage file."""
-    def __init__(self, tid, status, user, desc, ext, pos, tend, file, tpos):
-        self.tid = tid
-        self.status = status
-        self.user = user
-        self.description = desc
-        self._extension = ext
-        self._pos = pos
-        self._tend = tend
-        self._file = file
-        self._tpos = tpos
-
-    def next(self, index=0):
-        pos = self._pos
-        while pos < self._tend:
-            # Read the data records for this transaction
-            h = self._read_data_header(pos)
-            dlen = h.recordlen()
-
-            if pos + dlen > self._tend or h.tloc != self._tpos:
-                logger.warning("%s data record exceeds transaction"
-                               " record at %s", file.name, pos)
-                break
-
-            self._pos = pos + dlen
-            prev_txn = None
-            if h.plen:
-                data = self._file.read(h.plen)
-            else:
-                if h.back == 0:
-                    # If the backpointer is 0, then this transaction
-                    # undoes the object creation.  It either aborts
-                    # the version that created the object or undid the
-                    # transaction that created it.  Return None
-                    # instead of a pickle to indicate this.
-                    data = None
-                else:
-                    data, tid = self._loadBackTxn(h.oid, h.back, False)
-                    # Caution:  :ooks like this only goes one link back.
-                    # Should it go to the original data like BDBFullStorage?
-                    prev_txn = self.getTxnFromData(h.oid, h.back)
-
-            r = Record(h.oid, h.tid, h.version, data, prev_txn, pos)
-            return r
-
-        raise IndexError, index
-
-class Record(BaseStorage.DataRecord):
-    """An abstract database record."""
-    def __init__(self, oid, tid, version, data, prev, pos):
-        self.oid = oid
-        self.tid = tid
-        self.version = version
-        self.data = data
-        self.data_txn = prev
-        self.pos = pos
-
-class UndoSearch:
-
-    def __init__(self, file, pos, first, last, filter=None):
-        self.file = file
-        self.pos = pos
-        self.first = first
-        self.last = last
-        self.filter = filter
-        self.i = 0
-        self.results = []
-        self.stop = 0
-
-    def finished(self):
-        """Return True if UndoSearch has found enough records."""
-        # BAW: Why 39 please?  This makes no sense (see also below).
-        return self.i >= self.last or self.pos < 39 or self.stop
-
-    def search(self):
-        """Search for another record."""
-        dict = self._readnext()
-        if dict is not None and (self.filter is None or self.filter(dict)):
-            if self.i >= self.first:
-                self.results.append(dict)
-            self.i += 1
-
-    def _readnext(self):
-        """Read the next record from the storage."""
-        self.file.seek(self.pos - 8)
-        self.pos -= u64(self.file.read(8)) + 8
-        self.file.seek(self.pos)
-        h = self.file.read(TRANS_HDR_LEN)
-        tid, tl, status, ul, dl, el = unpack(TRANS_HDR, h)
-        if status == 'p':
-            self.stop = 1
-            return None
-        if status != ' ':
-            return None
-        d = u = ''
-        if ul:
-            u = self.file.read(ul)
-        if dl:
-            d = self.file.read(dl)
-        e = {}
-        if el:
-            try:
-                e = loads(self.file.read(el))
-            except:
-                pass
-        d = {'id': base64.encodestring(tid).rstrip(),
-             'time': TimeStamp(tid).timeTime(),
-             'user_name': u,
-             'description': d}
-        d.update(e)
-        return d
diff --git a/branches/bug1734/src/ZODB/FileStorage/__init__.py b/branches/bug1734/src/ZODB/FileStorage/__init__.py
deleted file mode 100644
index 9f438d38..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# this is a package
-
-from ZODB.FileStorage.FileStorage \
-     import FileStorage, RecordIterator, FileIterator, Record, packed_version
diff --git a/branches/bug1734/src/ZODB/FileStorage/format.py b/branches/bug1734/src/ZODB/FileStorage/format.py
deleted file mode 100644
index 5f65d983..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/format.py
+++ /dev/null
@@ -1,354 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-#
-#  File-based ZODB storage
-#
-# Files are arranged as follows.
-#
-#   - The first 4 bytes are a file identifier.
-#
-#   - The rest of the file consists of a sequence of transaction
-#     "records".
-#
-# A transaction record consists of:
-#
-#   - 8-byte transaction id, which is also a time stamp.
-#
-#   - 8-byte transaction record length - 8.
-#
-#   - 1-byte status code
-#     ' '  (a blank) completed transaction that hasn't been packed
-#     'p'  completed transaction that has been packed
-#     'c'  checkpoint -- a transaction in progress, at the end of the file;
-#          it's been thru vote() but not finish(); if finish() completes
-#          normally, it will be overwritten with a blank; if finish() dies
-#          (e.g., out of disk space), cleanup code will try to truncate
-#          the file to chop off this incomplete transaction
-#     'u'  uncertain; no longer used; was previously used to record something
-#          about non-transactional undo
-#
-#   - 2-byte length of user name
-#
-#   - 2-byte length of description
-#
-#   - 2-byte length of extension attributes
-#
-#   -   user name
-#
-#   -   description
-#
-#   -   extension attributes
-#
-#   * A sequence of data records
-#
-#   - 8-byte redundant transaction length -8
-#
-# A data record consists of
-#
-#   - 8-byte oid.
-#
-#   - 8-byte tid, which matches the transaction id in the transaction record.
-#
-#   - 8-byte previous-record file-position.
-#
-#   - 8-byte beginning of transaction record file position.
-#
-#   - 2-byte version length
-#
-#   - 8-byte data length
-#
-#   ? 8-byte position of non-version data record
-#     (if version length > 0)
-#
-#   ? 8-byte position of previous record in this version
-#     (if version length > 0)
-#
-#   ? version string
-#     (if version length > 0)
-#
-#   ? data
-#     (data length > 0)
-#
-#   ? 8-byte position of data record containing data
-#     (data length == 0)
-#
-# Note that the lengths and positions are all big-endian.
-# Also, the object ids time stamps are big-endian, so comparisons
-# are meaningful.
-#
-# Version handling
-#
-#   There isn't a separate store for versions.  Each record has a
-#   version field, indicating what version it is in.  The records in a
-#   version form a linked list.  Each record that has a non-empty
-#   version string has a pointer to the previous record in the version.
-#   Version back pointers are retained *even* when versions are
-#   committed or aborted or when transactions are undone.
-#
-#   There is a notion of "current" version records, which are the
-#   records in a version that are the current records for their
-#   respective objects.  When a version is comitted, the current records
-#   are committed to the destination version.  When a version is
-#   aborted, the current records are aborted.
-#
-#   When committing or aborting, we search backward through the linked
-#   list until we find a record for an object that does not have a
-#   current record in the version.  If we find a record for which the
-#   non-version pointer is the same as the previous pointer, then we
-#   forget that the corresponding object had a current record in the
-#   version. This strategy allows us to avoid searching backward through
-#   previously committed or aborted version records.
-#
-#   Of course, we ignore records in undone transactions when committing
-#   or aborting.
-#
-# Backpointers
-#
-#   When we commit or abort a version, we don't copy (or delete)
-#   and data.  Instead, we write records with back pointers.
-#
-#   A version record *never* has a back pointer to a non-version
-#   record, because we never abort to a version.  A non-version record
-#   may have a back pointer to a version record or to a non-version
-#   record.
-
-import struct
-import logging
-
-from ZODB.POSException import POSKeyError
-from ZODB.utils import u64, oid_repr, t32
-
-
-class CorruptedError(Exception):
-    pass
-
-class CorruptedDataError(CorruptedError):
-
-    def __init__(self, oid=None, buf=None, pos=None):
-        self.oid = oid
-        self.buf = buf
-        self.pos = pos
-
-    def __str__(self):
-        if self.oid:
-            msg = "Error reading oid %s.  Found %r" % (oid_repr(self.oid),
-                                                       self.buf)
-        else:
-            msg = "Error reading unknown oid.  Found %r" % self.buf
-        if self.pos:
-            msg += " at %d" % self.pos
-        return msg
-
-# the struct formats for the headers
-TRANS_HDR = ">8sQcHHH"
-DATA_HDR = ">8s8sQQHQ"
-# constants to support various header sizes
-TRANS_HDR_LEN = 23
-DATA_HDR_LEN = 42
-DATA_VERSION_HDR_LEN = 58
-assert struct.calcsize(TRANS_HDR) == TRANS_HDR_LEN
-assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
-
-logger = logging.getLogger('ZODB.FileStorage.format')
-
-class FileStorageFormatter(object):
-    """Mixin class that can read and write the low-level format."""
-
-    # subclasses must provide _file
-
-    _metadata_size = 4L
-    _format_version = "21"
-
-    def _read_num(self, pos):
-        """Read an 8-byte number."""
-        self._file.seek(pos)
-        return u64(self._file.read(8))
-
-    def _read_data_header(self, pos, oid=None):
-        """Return a DataHeader object for data record at pos.
-
-        If ois is not None, raise CorruptedDataError if oid passed
-        does not match oid in file.
-
-        If there is version data, reads the version part of the header.
-        If there is no pickle data, reads the back pointer.
-        """
-        self._file.seek(pos)
-        s = self._file.read(DATA_HDR_LEN)
-        if len(s) != DATA_HDR_LEN:
-            raise CorruptedDataError(oid, s, pos)
-        h = DataHeaderFromString(s)
-        if oid is not None and oid != h.oid:
-            raise CorruptedDataError(oid, s, pos)
-        if h.vlen:
-            s = self._file.read(16 + h.vlen)
-            h.parseVersion(s)
-        if not h.plen:
-            h.back = u64(self._file.read(8))
-        return h
-
-    def _write_version_header(self, file, pnv, vprev, version):
-        s = struct.pack(">8s8s", pnv, vprev)
-        file.write(s + version)
-
-    def _read_txn_header(self, pos, tid=None):
-        self._file.seek(pos)
-        s = self._file.read(TRANS_HDR_LEN)
-        if len(s) != TRANS_HDR_LEN:
-            raise CorruptedDataError(tid, s, pos)
-        h = TxnHeaderFromString(s)
-        if tid is not None and tid != h.tid:
-            raise CorruptedDataError(tid, s, pos)
-        h.user = self._file.read(h.ulen)
-        h.descr = self._file.read(h.dlen)
-        h.ext = self._file.read(h.elen)
-        return h
-
-    def _loadBack_impl(self, oid, back, fail=True):
-        # shared implementation used by various _loadBack methods
-        #
-        # If the backpointer ultimately resolves to 0:
-        # If fail is True, raise KeyError for zero backpointer.
-        # If fail is False, return the empty data from the record
-        # with no backpointer.
-        while 1:
-            if not back:
-                # If backpointer is 0, object does not currently exist.
-                raise POSKeyError(oid)
-            h = self._read_data_header(back)
-            if h.plen:
-                return self._file.read(h.plen), h.tid, back, h.tloc
-            if h.back == 0 and not fail:
-                return None, h.tid, back, h.tloc
-            back = h.back
-
-    def _loadBackTxn(self, oid, back, fail=True):
-        """Return data and txn id for backpointer."""
-        return self._loadBack_impl(oid, back, fail)[:2]
-
-    def _loadBackPOS(self, oid, back):
-        return self._loadBack_impl(oid, back)[2]
-
-    def getTxnFromData(self, oid, back):
-        """Return transaction id for data at back."""
-        h = self._read_data_header(back, oid)
-        return h.tid
-
-    def fail(self, pos, msg, *args):
-        s = ("%s:%s:" + msg) % ((self._name, pos) + args)
-        logger.error(s)
-        raise CorruptedError(s)
-
-    def checkTxn(self, th, pos):
-        if th.tid <= self.ltid:
-            self.fail(pos, "time-stamp reduction: %s <= %s",
-                      oid_repr(th.tid), oid_repr(self.ltid))
-        self.ltid = th.tid
-        if th.status == "c":
-            self.fail(pos, "transaction with checkpoint flag set")
-        if not th.status in " pu": # recognize " ", "p", and "u" as valid
-            self.fail(pos, "invalid transaction status: %r", th.status)
-        if th.tlen < th.headerlen():
-            self.fail(pos, "invalid transaction header: "
-                      "txnlen (%d) < headerlen(%d)", th.tlen, th.headerlen())
-
-    def checkData(self, th, tpos, dh, pos):
-        if dh.tloc != tpos:
-            self.fail(pos, "data record does not point to transaction header"
-                      ": %d != %d", dh.tloc, tpos)
-        if pos + dh.recordlen() > tpos + th.tlen:
-            self.fail(pos, "data record size exceeds transaction size: "
-                      "%d > %d", pos + dh.recordlen(), tpos + th.tlen)
-        if dh.prev >= pos:
-            self.fail(pos, "invalid previous pointer: %d", dh.prev)
-        if dh.back:
-            if dh.back >= pos:
-                self.fail(pos, "invalid back pointer: %d", dh.prev)
-            if dh.plen:
-                self.fail(pos, "data record has back pointer and data")
-
-def DataHeaderFromString(s):
-    return DataHeader(*struct.unpack(DATA_HDR, s))
-
-class DataHeader(object):
-    """Header for a data record."""
-
-    __slots__ = (
-        "oid", "tid", "prev", "tloc", "vlen", "plen", "back",
-        # These three attributes are only defined when vlen > 0
-        "pnv", "vprev", "version")
-
-    def __init__(self, oid, tid, prev, tloc, vlen, plen):
-        self.back = 0 # default
-        self.version = "" # default
-        self.oid = oid
-        self.tid = tid
-        self.prev = prev
-        self.tloc = tloc
-        self.vlen = vlen
-        self.plen = plen
-
-    def asString(self):
-        s = struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
-                        self.tloc, self.vlen, self.plen)
-        if self.version:
-            v = struct.pack(">QQ", self.pnv, self.vprev)
-            return s + v + self.version
-        else:
-            return s
-
-    def setVersion(self, version, pnv, vprev):
-        self.version = version
-        self.vlen = len(version)
-        self.pnv = pnv
-        self.vprev = vprev
-
-    def parseVersion(self, buf):
-        pnv, vprev = struct.unpack(">QQ", buf[:16])
-        self.pnv = pnv
-        self.vprev = vprev
-        self.version = buf[16:]
-
-    def recordlen(self):
-        rlen = DATA_HDR_LEN + (self.plen or 8)
-        if self.version:
-            rlen += 16 + self.vlen
-        return rlen
-
-def TxnHeaderFromString(s):
-    return TxnHeader(*struct.unpack(TRANS_HDR, s))
-
-class TxnHeader(object):
-    """Header for a transaction record."""
-
-    __slots__ = ("tid", "tlen", "status", "user", "descr", "ext",
-                 "ulen", "dlen", "elen")
-
-    def __init__(self, tid, tlen, status, ulen, dlen, elen):
-        self.tid = tid
-        self.tlen = tlen
-        self.status = status
-        self.ulen = ulen
-        self.dlen = dlen
-        self.elen = elen
-        if elen < 0:
-            self.elen = t32 - elen
-
-    def asString(self):
-        s = struct.pack(TRANS_HDR, self.tid, self.tlen, self.status,
-                        self.ulen, self.dlen, self.elen)
-        return "".join(map(str, [s, self.user, self.descr, self.ext]))
-
-    def headerlen(self):
-        return TRANS_HDR_LEN + self.ulen + self.dlen + self.elen
diff --git a/branches/bug1734/src/ZODB/FileStorage/fsdump.py b/branches/bug1734/src/ZODB/FileStorage/fsdump.py
deleted file mode 100644
index 09138022..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/fsdump.py
+++ /dev/null
@@ -1,132 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import struct
-
-from ZODB.FileStorage import FileIterator
-from ZODB.FileStorage.format \
-     import TRANS_HDR, TRANS_HDR_LEN, DATA_HDR, DATA_HDR_LEN
-from ZODB.TimeStamp import TimeStamp
-from ZODB.utils import u64, get_pickle_metadata
-from ZODB.tests.StorageTestBase import zodb_unpickle
-
-def fsdump(path, file=None, with_offset=1):
-    iter = FileIterator(path)
-    for i, trans in enumerate(iter):
-        if with_offset:
-            print >> file, "Trans #%05d tid=%016x time=%s offset=%d" % \
-                  (i, u64(trans.tid), TimeStamp(trans.tid), trans._pos)
-        else:
-            print >> file, "Trans #%05d tid=%016x time=%s" % \
-                  (i, u64(trans.tid), TimeStamp(trans.tid))
-        print >> file, "    status=%r user=%r description=%r" % \
-              (trans.status, trans.user, trans.description)
-
-        for j, rec in enumerate(trans):
-            if rec.data is None:
-                fullclass = "undo or abort of object creation"
-                size = ""
-            else:
-                modname, classname = get_pickle_metadata(rec.data)
-                size = " size=%d" % len(rec.data)
-                fullclass = "%s.%s" % (modname, classname)
-
-            if rec.version:
-                version = " version=%r" % rec.version
-            else:
-                version = ""
-
-            if rec.data_txn:
-                # It would be nice to print the transaction number
-                # (i) but it would be expensive to keep track of.
-                bp = " bp=%016x" % u64(rec.data_txn)
-            else:
-                bp = ""
-
-            print >> file, "  data #%05d oid=%016x%s%s class=%s%s" % \
-                  (j, u64(rec.oid), version, size, fullclass, bp)
-    iter.close()
-
-def fmt(p64):
-    # Return a nicely formatted string for a packaged 64-bit value
-    return "%016x" % u64(p64)
-
-class Dumper:
-    """A very verbose dumper for debuggin FileStorage problems."""
-
-    # TODO:  Should revise this class to use FileStorageFormatter.
-
-    def __init__(self, path, dest=None):
-        self.file = open(path, "rb")
-        self.dest = dest
-
-    def dump(self):
-        fid = self.file.read(4)
-        print >> self.dest, "*" * 60
-        print >> self.dest, "file identifier: %r" % fid
-        while self.dump_txn():
-            pass
-
-    def dump_txn(self):
-        pos = self.file.tell()
-        h = self.file.read(TRANS_HDR_LEN)
-        if not h:
-            return False
-        tid, tlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h)
-        end = pos + tlen
-        print >> self.dest, "=" * 60
-        print >> self.dest, "offset: %d" % pos
-        print >> self.dest, "end pos: %d" % end
-        print >> self.dest, "transaction id: %s" % fmt(tid)
-        print >> self.dest, "trec len: %d" % tlen
-        print >> self.dest, "status: %r" % status
-        user = descr = extra = ""
-        if ul:
-            user = self.file.read(ul)
-        if dl:
-            descr = self.file.read(dl)
-        if el:
-            extra = self.file.read(el)
-        print >> self.dest, "user: %r" % user
-        print >> self.dest, "description: %r" % descr
-        print >> self.dest, "len(extra): %d" % el
-        while self.file.tell() < end:
-            self.dump_data(pos)
-        stlen = self.file.read(8)
-        print >> self.dest, "redundant trec len: %d" % u64(stlen)
-        return 1
-
-    def dump_data(self, tloc):
-        pos = self.file.tell()
-        h = self.file.read(DATA_HDR_LEN)
-        assert len(h) == DATA_HDR_LEN
-        oid, revid, prev, tloc, vlen, dlen = struct.unpack(DATA_HDR, h)
-        print >> self.dest, "-" * 60
-        print >> self.dest, "offset: %d" % pos
-        print >> self.dest, "oid: %s" % fmt(oid)
-        print >> self.dest, "revid: %s" % fmt(revid)
-        print >> self.dest, "previous record offset: %d" % prev
-        print >> self.dest, "transaction offset: %d" % tloc
-        if vlen:
-            pnv = self.file.read(8)
-            sprevdata = self.file.read(8)
-            version = self.file.read(vlen)
-            print >> self.dest, "version: %r" % version
-            print >> self.dest, "non-version data offset: %d" % u64(pnv)
-            print >> self.dest, \
-                  "previous version data offset: %d" % u64(sprevdata)
-        print >> self.dest, "len(data): %d" % dlen
-        self.file.read(dlen)
-        if not dlen:
-            sbp = self.file.read(8)
-            print >> self.dest, "backpointer: %d" % u64(sbp)
diff --git a/branches/bug1734/src/ZODB/FileStorage/fsoids.py b/branches/bug1734/src/ZODB/FileStorage/fsoids.py
deleted file mode 100644
index e3f86541..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/fsoids.py
+++ /dev/null
@@ -1,200 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import ZODB.FileStorage
-from ZODB.utils import get_pickle_metadata
-from ZODB.utils import p64, oid_repr, tid_repr, get_refs
-from ZODB.TimeStamp import TimeStamp
-
-# Extract module.class string from pickle.
-def get_class(pickle):
-    return "%s.%s" % get_pickle_metadata(pickle)
-
-# Shorten a string for display.
-def shorten(s, size=50):
-    if len(s) <= size:
-        return s
-    # Stick ... in the middle.
-    navail = size - 5
-    nleading = navail // 2
-    ntrailing = size - nleading
-    return s[:nleading] + " ... " + s[-ntrailing:]
-
-class Tracer(object):
-    """Trace all occurrences of a set of oids in a FileStorage.
-
-    Create passing a path to an existing FileStorage.
-    Call register_oids(oid, ...) one or more times to specify which oids to
-    investigate.
-    Call run() to do the analysis.  This isn't swift -- it has to read
-    every byte in the database, in order to find all references.
-    Call report() to display the results.
-    """
-
-    def __init__(self, path):
-        import os
-        if not os.path.isfile(path):
-            raise ValueError("must specify an existing FileStorage")
-        self.path = path
-        # Map an interesting tid to (status, user, description, pos).
-        self.tid2info = {}
-        # List of messages.  Each is a tuple of the form
-        #     (oid, tid, string)
-        # The order in the tuple is important, because it defines the
-        # sort order for grouping.
-        self.msgs = []
-        # The set of interesting oids, specified by register_oid() calls.
-        # Maps oid to # of revisions.
-        self.oids = {}
-        # Maps interesting oid to its module.class name.  If a creation
-        # record for an interesting oid is never seen, it won't appear
-        # in this mapping.
-        self.oid2name = {}
-
-    def register_oids(self, *oids):
-        """
-        Declare that oids (0 or more) are "interesting".
-
-        An oid can be given as a native 8-byte string, or as an
-        integer.
-
-        Info will be gathered about all appearances of this oid in the
-        entire database, including references.
-        """
-        for oid in oids:
-            if isinstance(oid, str):
-                assert len(oid) == 8
-            else:
-                oid = p64(oid)
-            self.oids[oid] = 0  # 0 revisions seen so far
-
-    def _msg(self, oid, tid, *args):
-        args = map(str, args)
-        self.msgs.append( (oid, tid, ' '.join(args)) )
-        self._produced_msg = True
-
-    def report(self):
-        """Show all msgs, grouped by oid and sub-grouped by tid."""
-
-        msgs = self.msgs
-        oids = self.oids
-        oid2name = self.oid2name
-        # First determine which oids weren't seen at all, and synthesize msgs
-        # for them.
-        NOT_SEEN = "this oid was not defined (no data record for it found)"
-        for oid in oids:
-            if oid not in oid2name:
-                msgs.append( (oid, None, NOT_SEEN) )
-
-        msgs.sort() # oids are primary key, tids secondary
-        current_oid = current_tid = None
-        for oid, tid, msg in msgs:
-            if oid != current_oid:
-                nrev = oids[oid]
-                revision = "revision" + (nrev != 1 and 's' or '')
-                name = oid2name.get(oid, "<unknown>")
-                print "oid", oid_repr(oid), name, nrev, revision
-                current_oid = oid
-                current_tid = None
-                if msg is NOT_SEEN:
-                    assert tid is None
-                    print "   ", msg
-                    continue
-            if tid != current_tid:
-                current_tid = tid
-                status, user, description, pos = self.tid2info[tid]
-                print "    tid %s offset=%d %s" % (tid_repr(tid),
-                                                   pos,
-                                                   TimeStamp(tid))
-                print "        tid user=%r" % shorten(user)
-                print "        tid description=%r" % shorten(description)
-            print "       ", msg
-
-    # Do the analysis.
-    def run(self):
-        """Find all occurrences of the registered oids in the database."""
-
-        # Maps oid of a reference to its module.class name.
-        self._ref2name = {}
-        for txn in ZODB.FileStorage.FileIterator(self.path):
-            self._check_trec(txn)
-
-    # Process next transaction record.
-    def _check_trec(self, txn):
-        # txn has members tid, status, user, description,
-        # _extension, _pos, _tend, _file, _tpos
-        self._produced_msg = False
-        # Map and list for save data records for current transaction.
-        self._records_map = {}
-        self._records = []
-        for drec in txn:
-            self._save_references(drec)
-        for drec in self._records:
-            self._check_drec(drec)
-        if self._produced_msg:
-            # Copy txn info for later output.
-            self.tid2info[txn.tid] = (txn.status, txn.user, txn.description,
-                                      txn._tpos)
-
-    def _save_references(self, drec):
-        # drec has members oid, tid, version, data, data_txn
-        tid, oid, pick, pos = drec.tid, drec.oid, drec.data, drec.pos
-        if pick:
-            if oid in self.oids:
-                klass = get_class(pick)
-                self._msg(oid, tid, "new revision", klass, "at", pos)
-                self.oids[oid] += 1
-                self.oid2name[oid] = self._ref2name[oid] = klass
-            self._records_map[oid] = drec
-            self._records.append(drec)
-        elif oid in self.oids:
-            # Or maybe it's a version abort.
-            self._msg(oid, tid, "creation undo at", pos)
-
-    # Process next data record.  If a message is produced, self._produced_msg
-    # will be set True.
-    def _check_drec(self, drec):
-        # drec has members oid, tid, version, data, data_txn
-        tid, oid, pick, pos = drec.tid, drec.oid, drec.data, drec.pos
-        ref2name = self._ref2name
-        ref2name_get = ref2name.get
-        records_map_get = self._records_map.get
-        if pick:
-            oid_in_oids = oid in self.oids
-            for ref, klass in get_refs(pick):
-                if ref in self.oids:
-                    oidclass = ref2name_get(oid, None)
-                    if oidclass is None:
-                        ref2name[oid] = oidclass = get_class(pick)
-                    self._msg(ref, tid, "referenced by", oid_repr(oid),
-                              oidclass, "at", pos)
-
-                if oid_in_oids:
-                    if klass is None:
-                        klass = ref2name_get(ref, None)
-                        if klass is None:
-                            r = records_map_get(ref, None)
-                            # For save memory we only save references
-                            # seen in one transaction with interesting
-                            # objects changes. So in some circumstances
-                            # we may still got "<unknown>" class name.
-                            if r is None:
-                                klass = "<unknown>"
-                            else:
-                                ref2name[ref] = klass = get_class(r.data)
-                    elif isinstance(klass, tuple):
-                        ref2name[ref] = klass = "%s.%s" % klass
-
-                    self._msg(oid, tid, "references", oid_repr(ref), klass,
-                              "at", pos)
diff --git a/branches/bug1734/src/ZODB/FileStorage/fspack.py b/branches/bug1734/src/ZODB/FileStorage/fspack.py
deleted file mode 100644
index 24eaa776..00000000
--- a/branches/bug1734/src/ZODB/FileStorage/fspack.py
+++ /dev/null
@@ -1,699 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""FileStorage helper to perform pack.
-
-A storage contains an ordered set of object revisions.  When a storage
-is packed, object revisions that are not reachable as of the pack time
-are deleted.  The notion of reachability is complicated by
-backpointers -- object revisions that point to earlier revisions of
-the same object.
-
-An object revisions is reachable at a certain time if it is reachable
-from the revision of the root at that time or if it is reachable from
-a backpointer after that time.
-"""
-
-import os
-
-from ZODB.serialize import referencesf
-from ZODB.utils import p64, u64, z64
-
-from ZODB.fsIndex import fsIndex
-from ZODB.FileStorage.format \
-     import FileStorageFormatter, CorruptedDataError, DataHeader, \
-     TRANS_HDR_LEN
-
-class DataCopier(FileStorageFormatter):
-    """Mixin class for copying transactions into a storage.
-
-    The restore() and pack() methods share a need to copy data records
-    and update pointers to data in earlier transaction records.  This
-    class provides the shared logic.
-
-    The mixin extends the FileStorageFormatter with a copy() method.
-    It also requires that the concrete class provides the following
-    attributes:
-
-    _file -- file with earlier destination data
-    _tfile -- destination file for copied data
-    _pos -- file pos of destination transaction
-    _tindex -- maps oid to data record file pos
-    _tvindex -- maps version name to data record file pos
-
-    _tindex and _tvindex are updated by copy().
-
-    The copy() method does not do any locking.
-    """
-
-    def _txn_find(self, tid, stop_at_pack):
-        # _pos always points just past the last transaction
-        pos = self._pos
-        while pos > 4:
-            self._file.seek(pos - 8)
-            pos = pos - u64(self._file.read(8)) - 8
-            self._file.seek(pos)
-            h = self._file.read(TRANS_HDR_LEN)
-            _tid = h[:8]
-            if _tid == tid:
-                return pos
-            if stop_at_pack:
-                if h[16] == 'p':
-                    break
-        raise UndoError(None, "Invalid transaction id")
-
-    def _data_find(self, tpos, oid, data):
-        # Return backpointer for oid.  Must call with the lock held.
-        # This is a file offset to oid's data record if found, else 0.
-        # The data records in the transaction at tpos are searched for oid.
-        # If a data record for oid isn't found, returns 0.
-        # Else if oid's data record contains a backpointer, that
-        # backpointer is returned.
-        # Else oid's data record contains the data, and the file offset of
-        # oid's data record is returned.  This data record should contain
-        # a pickle identical to the 'data' argument.
-
-        # Unclear:  If the length of the stored data doesn't match len(data),
-        # an exception is raised.  If the lengths match but the data isn't
-        # the same, 0 is returned.  Why the discrepancy?
-        h = self._read_txn_header(tpos)
-        tend = tpos + h.tlen
-        pos = self._file.tell()
-        while pos < tend:
-            h = self._read_data_header(pos)
-            if h.oid == oid:
-                # Make sure this looks like the right data record
-                if h.plen == 0:
-                    # This is also a backpointer.  Gotta trust it.
-                    return pos
-                if h.plen != len(data):
-                    # The expected data doesn't match what's in the
-                    # backpointer.  Something is wrong.
-                    error("Mismatch between data and backpointer at %d", pos)
-                    return 0
-                _data = self._file.read(h.plen)
-                if data != _data:
-                    return 0
-                return pos
-            pos += h.recordlen()
-        return 0
-
-    def _restore_pnv(self, oid, prev, version, bp):
-        # Find a valid pnv (previous non-version) pointer for this version.
-
-        # If there is no previous record, there can't be a pnv.
-        if not prev:
-            return None
-
-        pnv = None
-        h = self._read_data_header(prev, oid)
-        # If the previous record is for a version, it must have
-        # a valid pnv.
-        if h.version:
-            return h.pnv
-        elif bp:
-            # Unclear:  Not sure the following is always true:
-            # The previous record is not for this version, yet we
-            # have a backpointer to it.  The current record must
-            # be an undo of an abort or commit, so the backpointer
-            # must be to a version record with a pnv.
-            h2 = self._read_data_header(bp, oid)
-            if h2.version:
-                return h2.pnv
-            else:
-                warn("restore could not find previous non-version data "
-                     "at %d or %d", prev, bp)
-                return None
-
-    def _resolve_backpointer(self, prev_txn, oid, data):
-        prev_pos = 0
-        if prev_txn is not None:
-            prev_txn_pos = self._txn_find(prev_txn, 0)
-            if prev_txn_pos:
-                prev_pos = self._data_find(prev_txn_pos, oid, data)
-        return prev_pos
-
-    def copy(self, oid, serial, data, version, prev_txn,
-             txnpos, datapos):
-        prev_pos = self._resolve_backpointer(prev_txn, oid, data)
-        old = self._index.get(oid, 0)
-        # Calculate the pos the record will have in the storage.
-        here = datapos
-        # And update the temp file index
-        self._tindex[oid] = here
-        if prev_pos:
-            # If there is a valid prev_pos, don't write data.
-            data = None
-        if data is None:
-            dlen = 0
-        else:
-            dlen = len(data)
-        # Write the recovery data record
-        h = DataHeader(oid, serial, old, txnpos, len(version), dlen)
-        if version:
-            h.version = version
-            pnv = self._restore_pnv(oid, old, version, prev_pos)
-            if pnv is not None:
-                h.pnv = pnv
-            else:
-                h.pnv = old
-            # Link to the last record for this version
-            h.vprev = self._tvindex.get(version, 0)
-            if not h.vprev:
-                h.vprev = self._vindex.get(version, 0)
-            self._tvindex[version] = here
-
-        self._tfile.write(h.asString())
-        # Write the data or a backpointer
-        if data is None:
-            if prev_pos:
-                self._tfile.write(p64(prev_pos))
-            else:
-                # Write a zero backpointer, which indicates an
-                # un-creation transaction.
-                self._tfile.write(z64)
-        else:
-            self._tfile.write(data)
-
-class GC(FileStorageFormatter):
-
-    def __init__(self, file, eof, packtime):
-        self._file = file
-        self._name = file.name
-        self.eof = eof
-        self.packtime = packtime
-        # packpos: position of first txn header after pack time
-        self.packpos = None
-        self.oid2curpos = fsIndex() # maps oid to current data record position
-        self.oid2verpos = fsIndex() # maps oid to current version data
-
-        # The set of reachable revisions of each object.
-        #
-        # This set as managed using two data structures.  The first is
-        # an fsIndex mapping oids to one data record pos.  Since only
-        # a few objects will have more than one revision, we use this
-        # efficient data structure to handle the common case.  The
-        # second is a dictionary mapping objects to lists of
-        # positions; it is used to handle the same number of objects
-        # for which we must keep multiple revisions.
-
-        self.reachable = fsIndex()
-        self.reach_ex = {}
-
-        # keep ltid for consistency checks during initial scan
-        self.ltid = z64
-
-    def isReachable(self, oid, pos):
-        """Return 1 if revision of `oid` at `pos` is reachable."""
-
-        rpos = self.reachable.get(oid)
-        if rpos is None:
-            return 0
-        if rpos == pos:
-            return 1
-        return pos in self.reach_ex.get(oid, [])
-
-    def findReachable(self):
-        self.buildPackIndex()
-        self.findReachableAtPacktime([z64])
-        self.findReachableFromFuture()
-        # These mappings are no longer needed and may consume a lot
-        # of space.
-        del self.oid2verpos
-        del self.oid2curpos
-
-    def buildPackIndex(self):
-        pos = 4L
-        # We make the initial assumption that the database has been
-        # packed before and set unpacked to True only after seeing the
-        # first record with a status == " ".  If we get to the packtime
-        # and unpacked is still False, we need to watch for a redundant
-        # pack.
-        unpacked = False
-        while pos < self.eof:
-            th = self._read_txn_header(pos)
-            if th.tid > self.packtime:
-                break
-            self.checkTxn(th, pos)
-            if th.status != "p":
-                unpacked = True
-
-            tpos = pos
-            end = pos + th.tlen
-            pos += th.headerlen()
-
-            while pos < end:
-                dh = self._read_data_header(pos)
-                self.checkData(th, tpos, dh, pos)
-                if dh.version:
-                    self.oid2verpos[dh.oid] = pos
-                else:
-                    self.oid2curpos[dh.oid] = pos
-                pos += dh.recordlen()
-
-            tlen = self._read_num(pos)
-            if tlen != th.tlen:
-                self.fail(pos, "redundant transaction length does not "
-                          "match initial transaction length: %d != %d",
-                          tlen, th.tlen)
-            pos += 8
-
-        self.packpos = pos
-
-        if unpacked:
-            return
-        # check for a redundant pack.  If the first record following
-        # the newly computed packpos has status 'p', then it was
-        # packed earlier and the current pack is redudant.
-        try:
-            th = self._read_txn_header(pos)
-        except CorruptedDataError, err:
-            if err.buf != "":
-                raise
-        if th.status == 'p':
-            # Delayed import to cope with circular imports.
-            # TODO:  put exceptions in a separate module.
-            from ZODB.FileStorage.FileStorage import RedundantPackWarning
-            raise RedundantPackWarning(
-                "The database has already been packed to a later time"
-                " or no changes have been made since the last pack")
-
-    def findReachableAtPacktime(self, roots):
-        """Mark all objects reachable from the oids in roots as reachable."""
-        todo = list(roots)
-        while todo:
-            oid = todo.pop()
-            if self.reachable.has_key(oid):
-                continue
-
-            L = []
-
-            pos = self.oid2curpos.get(oid)
-            if pos is not None:
-                L.append(pos)
-                todo.extend(self.findrefs(pos))
-
-            pos = self.oid2verpos.get(oid)
-            if pos is not None:
-                L.append(pos)
-                todo.extend(self.findrefs(pos))
-
-            if not L:
-                continue
-
-            pos = L.pop()
-            self.reachable[oid] = pos
-            if L:
-                self.reach_ex[oid] = L
-
-    def findReachableFromFuture(self):
-        # In this pass, the roots are positions of object revisions.
-        # We add a pos to extra_roots when there is a backpointer to a
-        # revision that was not current at the packtime.  The
-        # non-current revision could refer to objects that were
-        # otherwise unreachable at the packtime.
-        extra_roots = []
-
-        pos = self.packpos
-        while pos < self.eof:
-            th = self._read_txn_header(pos)
-            self.checkTxn(th, pos)
-            tpos = pos
-            end = pos + th.tlen
-            pos += th.headerlen()
-
-            while pos < end:
-                dh = self._read_data_header(pos)
-                self.checkData(th, tpos, dh, pos)
-
-                if dh.back and dh.back < self.packpos:
-                    if self.reachable.has_key(dh.oid):
-                        L = self.reach_ex.setdefault(dh.oid, [])
-                        if dh.back not in L:
-                            L.append(dh.back)
-                            extra_roots.append(dh.back)
-                    else:
-                        self.reachable[dh.oid] = dh.back
-
-                if dh.version and dh.pnv:
-                    if self.reachable.has_key(dh.oid):
-                        L = self.reach_ex.setdefault(dh.oid, [])
-                        if dh.pnv not in L:
-                            L.append(dh.pnv)
-                            extra_roots.append(dh.pnv)
-                    else:
-                        self.reachable[dh.oid] = dh.back
-
-                pos += dh.recordlen()
-
-            tlen = self._read_num(pos)
-            if tlen != th.tlen:
-                self.fail(pos, "redundant transaction length does not "
-                          "match initial transaction length: %d != %d",
-                          tlen, th.tlen)
-            pos += 8
-
-        for pos in extra_roots:
-            refs = self.findrefs(pos)
-            self.findReachableAtPacktime(refs)
-
-    def findrefs(self, pos):
-        """Return a list of oids referenced as of packtime."""
-        dh = self._read_data_header(pos)
-        # Chase backpointers until we get to the record with the refs
-        while dh.back:
-            dh = self._read_data_header(dh.back)
-        if dh.plen:
-            return referencesf(self._file.read(dh.plen))
-        else:
-            return []
-
-class PackCopier(DataCopier):
-
-    # PackCopier has to cope with _file and _tfile being the
-    # same file.  The copy() implementation is written assuming
-    # that they are different, so that using one object doesn't
-    # mess up the file pointer for the other object.
-
-    # PackCopier overrides _resolve_backpointer() and _restore_pnv()
-    # to guarantee that they keep the file pointer for _tfile in
-    # the right place.
-
-    def __init__(self, f, index, vindex, tindex, tvindex):
-        self._file = f
-        self._tfile = f
-        self._index = index
-        self._vindex = vindex
-        self._tindex = tindex
-        self._tvindex = tvindex
-        self._pos = None
-
-    def setTxnPos(self, pos):
-        self._pos = pos
-
-    def _resolve_backpointer(self, prev_txn, oid, data):
-        pos = self._tfile.tell()
-        try:
-            return DataCopier._resolve_backpointer(self, prev_txn, oid, data)
-        finally:
-            self._tfile.seek(pos)
-
-    def _restore_pnv(self, oid, prev, version, bp):
-        pos = self._tfile.tell()
-        try:
-            return DataCopier._restore_pnv(self, oid, prev, version, bp)
-        finally:
-            self._tfile.seek(pos)
-
-class FileStoragePacker(FileStorageFormatter):
-
-    # path is the storage file path.
-    # stop is the pack time, as a TimeStamp.
-    # la and lr are the acquire() and release() methods of the storage's lock.
-    # cla and clr similarly, for the storage's commit lock.
-    # current_size is the storage's _pos.  All valid data at the start
-    # lives before that offset (there may be a checkpoint transaction in
-    # progress after it).
-    def __init__(self, path, stop, la, lr, cla, clr, current_size):
-        self._name = path
-        # We open our own handle on the storage so that much of pack can
-        # proceed in parallel.  It's important to close this file at every
-        # return point, else on Windows the caller won't be able to rename
-        # or remove the storage file.
-        self._file = open(path, "rb")
-        self._path = path
-        self._stop = stop
-        self.locked = 0
-        self.file_end = current_size
-
-        self.gc = GC(self._file, self.file_end, self._stop)
-
-        # The packer needs to acquire the parent's commit lock
-        # during the copying stage, so the two sets of lock acquire
-        # and release methods are passed to the constructor.
-        self._lock_acquire = la
-        self._lock_release = lr
-        self._commit_lock_acquire = cla
-        self._commit_lock_release = clr
-
-        # The packer will use several indexes.
-        # index: oid -> pos
-        # vindex: version -> pos
-        # tindex: oid -> pos, for current txn
-        # tvindex: version -> pos, for current txn
-        # oid2tid: not used by the packer
-
-        self.index = fsIndex()
-        self.vindex = {}
-        self.tindex = {}
-        self.tvindex = {}
-        self.oid2tid = {}
-        self.toid2tid = {}
-        self.toid2tid_delete = {}
-
-        # Index for non-version data.  This is a temporary structure
-        # to reduce I/O during packing
-        self.nvindex = fsIndex()
-
-    def pack(self):
-        # Pack copies all data reachable at the pack time or later.
-        #
-        # Copying occurs in two phases.  In the first phase, txns
-        # before the pack time are copied if the contain any reachable
-        # data.  In the second phase, all txns after the pack time
-        # are copied.
-        #
-        # Txn and data records contain pointers to previous records.
-        # Because these pointers are stored as file offsets, they
-        # must be updated when we copy data.
-
-        # TODO:  Should add sanity checking to pack.
-
-        self.gc.findReachable()
-
-        # Setup the destination file and copy the metadata.
-        # TODO:  rename from _tfile to something clearer.
-        self._tfile = open(self._name + ".pack", "w+b")
-        self._file.seek(0)
-        self._tfile.write(self._file.read(self._metadata_size))
-
-        self._copier = PackCopier(self._tfile, self.index, self.vindex,
-                                  self.tindex, self.tvindex)
-
-        ipos, opos = self.copyToPacktime()
-        assert ipos == self.gc.packpos
-        if ipos == opos:
-            # pack didn't free any data.  there's no point in continuing.
-            self._tfile.close()
-            self._file.close()
-            os.remove(self._name + ".pack")
-            return None
-        self._commit_lock_acquire()
-        self.locked = 1
-        self._lock_acquire()
-        try:
-            # Re-open the file in unbuffered mode.
-
-            # The main thread may write new transactions to the file,
-            # which creates the possibility that we will read a status
-            # 'c' transaction into the pack thread's stdio buffer even
-            # though we're acquiring the commit lock.  Transactions
-            # can still be in progress throughout much of packing, and
-            # are written to the same physical file but via a distinct
-            # Python file object.  The code used to leave off the
-            # trailing 0 argument, and then on every platform except
-            # native Windows it was observed that we could read stale
-            # data from the tail end of the file.
-            self._file.close()  # else self.gc keeps the original alive & open
-            self._file = open(self._path, "rb", 0)
-            self._file.seek(0, 2)
-            self.file_end = self._file.tell()
-        finally:
-            self._lock_release()
-        if ipos < self.file_end:
-            self.copyRest(ipos)
-
-        # OK, we've copied everything. Now we need to wrap things up.
-        pos = self._tfile.tell()
-        self._tfile.flush()
-        self._tfile.close()
-        self._file.close()
-
-        return pos
-
-    def copyToPacktime(self):
-        offset = 0L  # the amount of space freed by packing
-        pos = self._metadata_size
-        new_pos = pos
-
-        while pos < self.gc.packpos:
-            th = self._read_txn_header(pos)
-            new_tpos, pos = self.copyDataRecords(pos, th)
-
-            if new_tpos:
-                new_pos = self._tfile.tell() + 8
-                tlen = new_pos - new_tpos - 8
-                # Update the transaction length
-                self._tfile.seek(new_tpos + 8)
-                self._tfile.write(p64(tlen))
-                self._tfile.seek(new_pos - 8)
-                self._tfile.write(p64(tlen))
-
-
-            tlen = self._read_num(pos)
-            if tlen != th.tlen:
-                self.fail(pos, "redundant transaction length does not "
-                          "match initial transaction length: %d != %d",
-                          tlen, th.tlen)
-            pos += 8
-
-        return pos, new_pos
-
-    def fetchBackpointer(self, oid, back):
-        """Return data and refs backpointer `back` to object `oid.
-
-        If `back` is 0 or ultimately resolves to 0, return None
-        and None.  In this case, the transaction undoes the object
-        creation.
-        """
-        if back == 0:
-            return None
-        data, tid = self._loadBackTxn(oid, back, 0)
-        return data
-
-    def copyDataRecords(self, pos, th):
-        """Copy any current data records between pos and tend.
-
-        Returns position of txn header in output file and position
-        of next record in the input file.
-
-        If any data records are copied, also write txn header (th).
-        """
-        copy = 0
-        new_tpos = 0L
-        tend = pos + th.tlen
-        pos += th.headerlen()
-        while pos < tend:
-            h = self._read_data_header(pos)
-            if not self.gc.isReachable(h.oid, pos):
-                pos += h.recordlen()
-                continue
-            pos += h.recordlen()
-
-            # If we are going to copy any data, we need to copy
-            # the transaction header.  Note that we will need to
-            # patch up the transaction length when we are done.
-            if not copy:
-                th.status = "p"
-                s = th.asString()
-                new_tpos = self._tfile.tell()
-                self._tfile.write(s)
-                new_pos = new_tpos + len(s)
-                copy = 1
-
-            if h.plen:
-                data = self._file.read(h.plen)
-            else:
-                # If a current record has a backpointer, fetch
-                # refs and data from the backpointer.  We need
-                # to write the data in the new record.
-                data = self.fetchBackpointer(h.oid, h.back)
-
-            self.writePackedDataRecord(h, data, new_tpos)
-            new_pos = self._tfile.tell()
-
-        return new_tpos, pos
-
-    def writePackedDataRecord(self, h, data, new_tpos):
-        # Update the header to reflect current information, then write
-        # it to the output file.
-        if data is None:
-            data = ""
-        h.prev = 0
-        h.back = 0
-        h.plen = len(data)
-        h.tloc = new_tpos
-        pos = self._tfile.tell()
-        if h.version:
-            h.pnv = self.index.get(h.oid, 0)
-            h.vprev = self.vindex.get(h.version, 0)
-            self.vindex[h.version] = pos
-        self.index[h.oid] = pos
-        if h.version:
-            self.vindex[h.version] = pos
-        self._tfile.write(h.asString())
-        self._tfile.write(data)
-        if not data:
-            # Packed records never have backpointers (?).
-            # If there is no data, write a z64 backpointer.
-            # This is a George Bailey event.
-            self._tfile.write(z64)
-
-    def copyRest(self, ipos):
-        # After the pack time, all data records are copied.
-        # Copy one txn at a time, using copy() for data.
-
-        # Release the commit lock every 20 copies
-        self._lock_counter = 0
-
-        try:
-            while 1:
-                ipos = self.copyOne(ipos)
-        except CorruptedDataError, err:
-            # The last call to copyOne() will raise
-            # CorruptedDataError, because it will attempt to read past
-            # the end of the file.  Double-check that the exception
-            # occurred for this reason.
-            self._file.seek(0, 2)
-            endpos = self._file.tell()
-            if endpos != err.pos:
-                raise
-
-    def copyOne(self, ipos):
-        # The call below will raise CorruptedDataError at EOF.
-        th = self._read_txn_header(ipos)
-        self._lock_counter += 1
-        if self._lock_counter % 20 == 0:
-            self._commit_lock_release()
-        pos = self._tfile.tell()
-        self._copier.setTxnPos(pos)
-        self._tfile.write(th.asString())
-        tend = ipos + th.tlen
-        ipos += th.headerlen()
-
-        while ipos < tend:
-            h = self._read_data_header(ipos)
-            ipos += h.recordlen()
-            prev_txn = None
-            if h.plen:
-                data = self._file.read(h.plen)
-            else:
-                data = self.fetchBackpointer(h.oid, h.back)
-                if h.back:
-                    prev_txn = self.getTxnFromData(h.oid, h.back)
-
-            self._copier.copy(h.oid, h.tid, data, h.version,
-                              prev_txn, pos, self._tfile.tell())
-
-        tlen = self._tfile.tell() - pos
-        assert tlen == th.tlen
-        self._tfile.write(p64(tlen))
-        ipos += 8
-
-        self.index.update(self.tindex)
-        self.tindex.clear()
-        self.vindex.update(self.tvindex)
-        self.tvindex.clear()
-        if self._lock_counter % 20 == 0:
-            self._commit_lock_acquire()
-        return ipos
diff --git a/branches/bug1734/src/ZODB/MappingStorage.py b/branches/bug1734/src/ZODB/MappingStorage.py
deleted file mode 100644
index 4e7d4049..00000000
--- a/branches/bug1734/src/ZODB/MappingStorage.py
+++ /dev/null
@@ -1,149 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Very Simple Mapping ZODB storage
-
-The Mapping storage provides an extremely simple storage implementation that
-doesn't provide undo or version support.
-
-It is meant to illustrate the simplest possible storage.
-
-The Mapping storage uses a single data structure to map object ids to data.
-"""
-
-from ZODB.utils import u64, z64
-from ZODB.BaseStorage import BaseStorage
-from ZODB import POSException
-from persistent.TimeStamp import TimeStamp
-
-
-class MappingStorage(BaseStorage):
-
-    def __init__(self, name='Mapping Storage'):
-        BaseStorage.__init__(self, name)
-        self._index = {}
-        # FIXME: Why we don't use dict for _tindex?
-        self._tindex = []
-        self._ltid = None
-        # Note: If you subclass this and use a persistent mapping facility
-        # (e.g. a dbm file), you will need to get the maximum key and save it
-        # as self._oid.  See dbmStorage.
-
-    def __len__(self):
-        return len(self._index)
-
-    def getSize(self):
-        self._lock_acquire()
-        try:
-            # These constants are for Python object memory overheads
-            s = 32
-            for p in self._index.itervalues():
-                s += 56 + len(p)
-            return s
-        finally:
-            self._lock_release()
-
-    def load(self, oid, version):
-        self._lock_acquire()
-        try:
-            p = self._index[oid]
-            return p[8:], p[:8] # pickle, serial
-        finally:
-            self._lock_release()
-
-    def loadEx(self, oid, version):
-        self._lock_acquire()
-        try:
-            # Since this storage doesn't support versions, tid and
-            # serial will always be the same.
-            p = self._index[oid]
-            return p[8:], p[:8], "" # pickle, tid, version
-        finally:
-            self._lock_release()
-
-    def getTid(self, oid):
-        self._lock_acquire()
-        try:
-            # The tid is the first 8 bytes of the buffer.
-            return self._index[oid][:8]
-        finally:
-            self._lock_release()
-
-
-    def store(self, oid, serial, data, version, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-
-        if version:
-            raise POSException.Unsupported("Versions aren't supported")
-
-        self._lock_acquire()
-        try:
-            if oid in self._index:
-                oserial = self._index[oid][:8]
-                if serial != oserial:
-                    raise POSException.ConflictError(oid=oid,
-                                                     serials=(oserial, serial),
-                                                     data=data)
-
-            self._tindex.append((oid, self._tid + data))
-        finally:
-            self._lock_release()
-        return self._tid
-
-    def _clear_temp(self):
-        self._tindex = []
-
-    def _finish(self, tid, user, desc, ext):
-        self._index.update(dict(self._tindex))
-        self._ltid = self._tid
-
-    def lastTransaction(self):
-        return self._ltid
-
-    def pack(self, t, referencesf):
-        self._lock_acquire()
-        try:
-            if not self._index:
-                return
-            # Build an index of *only* those objects reachable from the root.
-            rootl = [z64]
-            pindex = {}
-            while rootl:
-                oid = rootl.pop()
-                if oid in pindex:
-                    continue
-                # Scan non-version pickle for references
-                r = self._index[oid]
-                pindex[oid] = r
-                referencesf(r[8:], rootl)
-
-            # Now delete any unreferenced entries:
-            for oid in self._index.keys():
-                if oid not in pindex:
-                    del self._index[oid]
-
-        finally:
-            self._lock_release()
-
-    def _splat(self):
-        """Spit out a string showing state."""
-        o = ['Index:']
-        keys = self._index.keys()
-        keys.sort()
-        for oid in keys:
-            r = self._index[oid]
-            o.append('  %s: %s, %s' %
-                     (u64(oid), TimeStamp(r[:8]), repr(r[8:])))
-
-        return '\n'.join(o)
diff --git a/branches/bug1734/src/ZODB/Mount.py b/branches/bug1734/src/ZODB/Mount.py
deleted file mode 100644
index e9eff2e3..00000000
--- a/branches/bug1734/src/ZODB/Mount.py
+++ /dev/null
@@ -1,304 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Mounted database support
-
-$Id$"""
-
-import time
-import thread
-import logging
-import persistent
-import Acquisition
-from Acquisition import aq_base
-from POSException import MountedStorageError
-
-logger = logging.getLogger('ZODB.Mount')
-
-# dbs is a holder for all DB objects, needed to overcome
-# threading issues.  It maps connection params to a DB object
-# and a mapping of mount points.
-dbs = {}
-
-# dblock is locked every time dbs is accessed.
-dblock=thread.allocate_lock()
-
-
-def parentClassFactory(jar, module, name):
-    # Use the class factory from the parent database.
-    parent_conn = getattr(jar, '_mount_parent_jar', None)
-    parent_db = getattr(parent_conn, '_db', None)
-    if parent_db is None:
-        _globals = {}
-        _silly = ('__doc__',)
-        return getattr(__import__(
-            module, _globals, _globals, _silly), name)
-    else:
-        return parent_db.classFactory(parent_conn, module, name)
-
-
-class MountPoint(persistent.Persistent, Acquisition.Implicit):
-    '''The base class for a Zope object which, when traversed,
-    accesses a different database.
-    '''
-
-    # Default values for non-persistent variables.
-    _v_db = None
-    _v_data = None
-    _v_connect_error = None
-
-    def __init__(self, path, params=None, classDefsFromRoot=1):
-        '''
-        @arg path The path within the mounted database from which
-        to derive the root.
-
-        @arg params The parameters used to connect to the database.
-        No particular format required.
-        If there is more than one mount point referring to a
-        database, MountPoint will detect the matching params
-        and use the existing database.  Include the class name of
-        the storage.  For example,
-        ZEO params might be "ZODB.ZEOClient localhost 1081".
-
-        @arg classDefsFromRoot If true (the default), MountPoint will
-        try to get ZClass definitions from the root database rather
-        than the mounted database.
-        '''
-        # The only reason we need a __mountpoint_id is to
-        # be sure we don't close a database prematurely when
-        # it is mounted more than once and one of the points
-        # is unmounted.
-        self.__mountpoint_id = '%s_%f' % (id(self), time.time())
-        if params is None:
-            # We still need something to use as a hash in
-            # the "dbs" dictionary.
-            params = self.__mountpoint_id
-        self._params = repr(params)
-        self._path = path
-        self._classDefsFromRoot = classDefsFromRoot
-
-    def _createDB(self):
-        '''Gets the database object, usually by creating a Storage object
-        and returning ZODB.DB(storage).
-        '''
-        raise NotImplementedError
-
-    def _getDB(self):
-        '''Creates or opens a DB object.
-        '''
-        newMount = 0
-        dblock.acquire()
-        try:
-            params = self._params
-            dbInfo = dbs.get(params, None)
-            if dbInfo is None:
-                logger.info('Opening database for mounting: %s', params)
-                db = self._createDB()
-                newMount = 1
-                dbs[params] = (db, {self.__mountpoint_id:1})
-
-                if getattr(self, '_classDefsFromRoot', 1):
-                    db.classFactory = parentClassFactory
-            else:
-                db, mounts = dbInfo
-                # Be sure this object is in the list of mount points.
-                if not mounts.has_key(self.__mountpoint_id):
-                    newMount = 1
-                    mounts[self.__mountpoint_id] = 1
-            self._v_db = db
-        finally:
-            dblock.release()
-        return db, newMount
-
-    def _getMountpointId(self):
-        return self.__mountpoint_id
-
-    def _getMountParams(self):
-        return self._params
-
-    def __repr__(self):
-        return "%s(%s, %s)" % (self.__class__.__name__, repr(self._path),
-                               self._params)
-
-    def _openMountableConnection(self, parent):
-        # Opens a new connection to the database.
-        db = self._v_db
-        if db is None:
-            self._v_close_db = 0
-            db, newMount = self._getDB()
-        else:
-            newMount = 0
-        jar = getattr(self, '_p_jar', None)
-        if jar is None:
-            # Get _p_jar from parent.
-            self._p_jar = jar = parent._p_jar
-        conn = db.open(version=jar.getVersion())
-
-        # Add an attribute to the connection which
-        # makes it possible for us to find the primary
-        # database connection.  See ClassFactoryForMount().
-        conn._mount_parent_jar = jar
-
-        mcc = MountedConnectionCloser(self, conn)
-        jar.onCloseCallback(mcc)
-        return conn, newMount, mcc
-
-    def _getObjectFromConnection(self, conn):
-        obj = self._getMountRoot(conn.root())
-        data = aq_base(obj)
-        # Store the data object in a tuple to hide from acquisition.
-        self._v_data = (data,)
-        return data
-
-    def _getOrOpenObject(self, parent):
-        t = self._v_data
-        if t is None:
-            self._v_connect_error = None
-            conn = None
-            newMount = 0
-            mcc = None
-            try:
-                conn, newMount, mcc = self._openMountableConnection(parent)
-                data = self._getObjectFromConnection(conn)
-            except:
-                # Possibly broken database.
-                if mcc is not None:
-                    # Note that the next line may be a little rash--
-                    # if, for example, a working database throws an
-                    # exception rather than wait for a new connection,
-                    # this will likely cause the database to be closed
-                    # prematurely.  Perhaps DB.py needs a
-                    # countActiveConnections() method.
-                    mcc.setCloseDb()
-                self._logConnectException()
-                raise
-            if newMount:
-                try: id = data.getId()
-                except: id = '???'  # data has no getId() method.  Bad.
-                p = '/'.join(parent.getPhysicalPath() + (id,))
-                logger.info('Mounted database %s at %s',
-                            self._getMountParams(), p)
-        else:
-            data = t[0]
-
-        return data.__of__(parent)
-
-    def __of__(self, parent):
-        # Accesses the database, returning an acquisition
-        # wrapper around the connected object rather than around self.
-        try:
-            return self._getOrOpenObject(parent)
-        except:
-            return Acquisition.ImplicitAcquisitionWrapper(
-                self, parent)
-
-    def _test(self, parent):
-        '''Tests the database connection.
-        '''
-        self._getOrOpenObject(parent)
-        return 1
-
-    def _getMountRoot(self, root):
-        '''Gets the object to be mounted.
-        Can be overridden to provide different behavior.
-        '''
-        try:
-            app = root['Application']
-        except:
-            raise MountedStorageError, (
-                "No 'Application' object exists in the mountable database.")
-        try:
-            return app.unrestrictedTraverse(self._path)
-        except:
-            raise MountedStorageError, (
-                "The path '%s' was not found in the mountable database."
-                % self._path)
-
-    def _logConnectException(self):
-        '''Records info about the exception that just occurred.
-        '''
-        try:
-            from cStringIO import StringIO
-        except:
-            from StringIO import StringIO
-        import traceback
-        logger.warning('Failed to mount database. %s (%s)', exc[:2],
-                       exc_info=True)
-        f=StringIO()
-        traceback.print_tb(exc[2], 100, f)
-        self._v_connect_error = (exc[0], exc[1], f.getvalue())
-        exc = None
-
-
-class MountedConnectionCloser:
-    '''Closes the connection used by the mounted database
-    while performing other cleanup.
-    '''
-    close_db = 0
-
-    def __init__(self, mountpoint, conn):
-        # conn is the child connection.
-        self.mp = mountpoint
-        self.conn = conn
-
-    def setCloseDb(self):
-        self.close_db = 1
-
-    def __call__(self):
-        # The onCloseCallback handler.
-        # Closes a single connection to the database
-        # and possibly the database itself.
-        conn = self.conn
-        close_db = 0
-        if conn is not None:
-            mp = self.mp
-            # Remove potential circular references.
-            self.conn = None
-            self.mp = None
-            # Detect whether we should close the database.
-            close_db = self.close_db
-            t = mp.__dict__.get('_v_data', None)
-            if t is not None:
-                del mp.__dict__['_v_data']
-                data = t[0]
-                if not close_db and data.__dict__.get(
-                    '_v__object_deleted__', 0):
-                    # This mount point has been deleted.
-                    del data.__dict__['_v__object_deleted__']
-                    close_db = 1
-            # Close the child connection.
-            try:
-                del conn._mount_parent_jar
-            except:
-                pass
-            conn.close()
-
-        if close_db:
-            # Stop using this database. Close it if no other
-            # MountPoint is using it.
-            dblock.acquire()
-            try:
-                params = mp._getMountParams()
-                mp._v_db = None
-                if dbs.has_key(params):
-                    dbInfo = dbs[params]
-                    db, mounts = dbInfo
-                    try: del mounts[mp._getMountpointId()]
-                    except: pass
-                    if len(mounts) < 1:
-                        # No more mount points are using this database.
-                        del dbs[params]
-                        db.close()
-                        logger.info('Closed database: %s', params)
-            finally:
-                dblock.release()
diff --git a/branches/bug1734/src/ZODB/POSException.py b/branches/bug1734/src/ZODB/POSException.py
deleted file mode 100644
index a137cb75..00000000
--- a/branches/bug1734/src/ZODB/POSException.py
+++ /dev/null
@@ -1,309 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""ZODB-defined exceptions
-
-$Id$"""
-
-from ZODB.utils import oid_repr, readable_tid_repr
-
-def _fmt_undo(oid, reason):
-    s = reason and (": %s" % reason) or ""
-    return "Undo error %s%s" % (oid_repr(oid), s)
-
-class POSError(StandardError):
-    """Persistent object system error."""
-
-class POSKeyError(KeyError, POSError):
-    """Key not found in database."""
-
-    def __str__(self):
-        return oid_repr(self.args[0])
-
-class TransactionError(POSError):
-    """An error occured due to normal transaction processing."""
-
-class TransactionFailedError(POSError):
-    """Cannot perform an operation on a transaction that previously failed.
-
-    An attempt was made to commit a transaction, or to join a transaction,
-    but this transaction previously raised an exception during an attempt
-    to commit it.  The transaction must be explicitly aborted, either by
-    invoking abort() on the transaction, or begin() on its transaction
-    manager.
-    """
-
-class ConflictError(TransactionError):
-    """Two transactions tried to modify the same object at once.
-
-    This transaction should be resubmitted.
-
-    Instance attributes:
-      oid : string
-        the OID (8-byte packed string) of the object in conflict
-      class_name : string
-        the fully-qualified name of that object's class
-      message : string
-        a human-readable explanation of the error
-      serials : (string, string)
-        a pair of 8-byte packed strings; these are the serial numbers
-        related to conflict.  The first is the revision of object that
-        is in conflict, the currently committed serial.  The second is
-        the revision the current transaction read when it started.
-      data : string
-        The database record that failed to commit, used to put the
-        class name in the error message.
-
-    The caller should pass either object or oid as a keyword argument,
-    but not both of them.  If object is passed, it should be a
-    persistent object with an _p_oid attribute.
-    """
-
-    def __init__(self, message=None, object=None, oid=None, serials=None,
-                 data=None):
-        if message is None:
-            self.message = "database conflict error"
-        else:
-            self.message = message
-
-        if object is None:
-            self.oid = None
-            self.class_name = None
-        else:
-            self.oid = object._p_oid
-            klass = object.__class__
-            self.class_name = klass.__module__ + "." + klass.__name__
-
-        if oid is not None:
-            assert self.oid is None
-            self.oid = oid
-
-        if data is not None:
-            # avoid circular import chain
-            from ZODB.utils import get_pickle_metadata
-            self.class_name = "%s.%s" % get_pickle_metadata(data)
-##        else:
-##            if message != "data read conflict error":
-##                raise RuntimeError
-
-        self.serials = serials
-
-    def __str__(self):
-        extras = []
-        if self.oid:
-            extras.append("oid %s" % oid_repr(self.oid))
-        if self.class_name:
-            extras.append("class %s" % self.class_name)
-        if self.serials:
-            current, old = self.serials
-            extras.append("serial this txn started with %s" %
-                          readable_tid_repr(old))
-            extras.append("serial currently committed %s" %
-                          readable_tid_repr(current))
-        if extras:
-            return "%s (%s)" % (self.message, ", ".join(extras))
-        else:
-            return self.message
-
-    def get_oid(self):
-        return self.oid
-
-    def get_class_name(self):
-        return self.class_name
-
-    def get_old_serial(self):
-        return self.serials[1]
-
-    def get_new_serial(self):
-        return self.serials[0]
-
-    def get_serials(self):
-        return self.serials
-
-class ReadConflictError(ConflictError):
-    """Conflict detected when object was loaded.
-
-    An attempt was made to read an object that has changed in another
-    transaction (eg. another thread or process).
-    """
-    def __init__(self, message=None, object=None, serials=None):
-        if message is None:
-            message = "database read conflict error"
-        ConflictError.__init__(self, message=message, object=object,
-                               serials=serials)
-
-class BTreesConflictError(ConflictError):
-    """A special subclass for BTrees conflict errors."""
-
-    msgs = [# 0; i2 or i3 bucket split; positions are all -1
-            'Conflicting bucket split',
-
-            # 1; keys the same, but i2 and i3 values differ, and both values
-            # differ from i1's value
-            'Conflicting changes',
-
-            # 2; i1's value changed in i2, but key+value deleted in i3
-            'Conflicting delete and change',
-
-            # 3; i1's value changed in i3, but key+value deleted in i2
-            'Conflicting delete and change',
-
-            # 4; i1 and i2 both added the same key, or both deleted the
-            # same key
-            'Conflicting inserts or deletes',
-
-            # 5;  i2 and i3 both deleted the same key
-            'Conflicting deletes',
-
-            # 6; i2 and i3 both added the same key
-            'Conflicting inserts',
-
-            # 7; i2 and i3 both deleted the same key, or i2 changed the value
-            # associated with a key and i3 deleted that key
-            'Conflicting deletes, or delete and change',
-
-            # 8; i2 and i3 both deleted the same key, or i3 changed the value
-            # associated with a key and i2 deleted that key
-            'Conflicting deletes, or delete and change',
-
-            # 9; i2 and i3 both deleted the same key
-            'Conflicting deletes',
-
-            # 10; i2 and i3 deleted all the keys, and didn't insert any,
-            # leaving an empty bucket; conflict resolution doesn't have
-            # enough info to unlink an empty bucket from its containing
-            # BTree correctly
-            'Empty bucket from deleting all keys',
-
-            # 11; conflicting changes in an internal BTree node
-            'Conflicting changes in an internal BTree node',
-
-            # 12; i2 or i3 was empty
-            'Empty bucket in a transaction',
-            ]
-
-    def __init__(self, p1, p2, p3, reason):
-        self.p1 = p1
-        self.p2 = p2
-        self.p3 = p3
-        self.reason = reason
-
-    def __repr__(self):
-        return "BTreesConflictError(%d, %d, %d, %d)" % (self.p1,
-                                                        self.p2,
-                                                        self.p3,
-                                                        self.reason)
-    def __str__(self):
-        return "BTrees conflict error at %d/%d/%d: %s" % (
-            self.p1, self.p2, self.p3, self.msgs[self.reason])
-
-class DanglingReferenceError(TransactionError):
-    """An object has a persistent reference to a missing object.
-
-    If an object is stored and it has a reference to another object
-    that does not exist (for example, it was deleted by pack), this
-    exception may be raised.  Whether a storage supports this feature,
-    it a quality of implementation issue.
-
-    Instance attributes:
-    referer: oid of the object being written
-    missing: referenced oid that does not have a corresponding object
-    """
-
-    def __init__(self, Aoid, Boid):
-        self.referer = Aoid
-        self.missing = Boid
-
-    def __str__(self):
-        return "from %s to %s" % (oid_repr(self.referer),
-                                  oid_repr(self.missing))
-
-class VersionError(POSError):
-    """An error in handling versions occurred."""
-
-class VersionCommitError(VersionError):
-    """An invalid combination of versions was used in a version commit."""
-
-class VersionLockError(VersionError, TransactionError):
-    """Modification to an object modified in an unsaved version.
-
-    An attempt was made to modify an object that has been modified in an
-    unsaved version.
-    """
-
-class UndoError(POSError):
-    """An attempt was made to undo a non-undoable transaction."""
-
-    def __init__(self, reason, oid=None):
-        self._reason = reason
-        self._oid = oid
-
-    def __str__(self):
-        return _fmt_undo(self._oid, self._reason)
-
-class MultipleUndoErrors(UndoError):
-    """Several undo errors occured during a single transaction."""
-
-    def __init__(self, errs):
-        # provide a reason and oid for clients that only look at that
-        UndoError.__init__(self, *errs[0])
-        self._errs = errs
-
-    def __str__(self):
-        return "\n".join([_fmt_undo(*pair) for pair in self._errs])
-
-class StorageError(POSError):
-    """Base class for storage based exceptions."""
-
-class StorageTransactionError(StorageError):
-    """An operation was invoked for an invalid transaction or state."""
-
-class StorageSystemError(StorageError):
-    """Panic! Internal storage error!"""
-
-class MountedStorageError(StorageError):
-    """Unable to access mounted storage."""
-
-class ReadOnlyError(StorageError):
-    """Unable to modify objects in a read-only storage."""
-
-class TransactionTooLargeError(StorageTransactionError):
-    """The transaction exhausted some finite storage resource."""
-
-class ExportError(POSError):
-    """An export file doesn't have the right format."""
-
-class Unsupported(POSError):
-    """A feature was used that is not supported by the storage."""
-
-class InvalidObjectReference(POSError):
-    """An object contains an invalid reference to another object.
-
-    An invalid reference may be one of:
-
-    o A reference to a wrapped persistent object.
-
-    o A reference to an object in a different database connection.
-
-    TODO:  The exception ought to have a member that is the invalid object.
-    """
-
-class ConnectionStateError(POSError):
-    """A Connection isn't in the required state for an operation.
-
-    o An operation such as a load is attempted on a closed connection.
-
-    o An attempt to close a connection is made while the connection is
-      still joined to a transaction (for example, a transaction is in
-      progress, with uncommitted modifications in the connection).
-    """
diff --git a/branches/bug1734/src/ZODB/SETUP.cfg b/branches/bug1734/src/ZODB/SETUP.cfg
deleted file mode 100644
index 14399d01..00000000
--- a/branches/bug1734/src/ZODB/SETUP.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-# Extension information for zpkg.
-
-<extension winlock>
-  source winlock.c
-</extension>
diff --git a/branches/bug1734/src/ZODB/TmpStore.py b/branches/bug1734/src/ZODB/TmpStore.py
deleted file mode 100644
index 3e0f1633..00000000
--- a/branches/bug1734/src/ZODB/TmpStore.py
+++ /dev/null
@@ -1,126 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-from ZODB import POSException
-from ZODB.utils import p64, u64, z64
-
-import tempfile
-
-class TmpStore:
-    """A storage to support subtransactions."""
-
-    _bver = ''
-
-    def __init__(self, base_version, storage):
-        self._transaction = None
-        self._storage = storage
-        if base_version:
-            self._bver = base_version
-        self._file = tempfile.TemporaryFile()
-        # _pos: current file position
-        # _tpos: file position at last commit point
-        self._pos = self._tpos = 0L
-        # _index: map oid to pos of last committed version
-        self._index = {}
-        # _tindex: map oid to pos for new updates
-        self._tindex = {}
-        self._creating = []
-
-    def close(self):
-        self._file.close()
-
-    def getName(self):
-        return self._storage.getName()
-
-    def getSize(self):
-        return self._pos
-
-    def load(self, oid, version):
-        pos = self._index.get(oid)
-        if pos is None:
-            return self._storage.load(oid, self._bver)
-        self._file.seek(pos)
-        h = self._file.read(8)
-        oidlen = u64(h)
-        read_oid = self._file.read(oidlen)
-        if read_oid != oid:
-            raise POSException.StorageSystemError('Bad temporary storage')
-        h = self._file.read(16)
-        size = u64(h[8:])
-        serial = h[:8]
-        return self._file.read(size), serial
-
-    def sortKey(self):
-        return self._storage.sortKey()
-
-    # TODO: clarify difference between self._storage & self._db._storage
-
-    def modifiedInVersion(self, oid):
-        if self._index.has_key(oid):
-            return self._bver
-        return self._storage.modifiedInVersion(oid)
-
-    def new_oid(self):
-        return self._storage.new_oid()
-
-    def registerDB(self, db, limit):
-        pass
-
-    def store(self, oid, serial, data, version, transaction):
-        if transaction is not self._transaction:
-            raise POSException.StorageTransactionError(self, transaction)
-        self._file.seek(self._pos)
-        l = len(data)
-        if serial is None:
-            serial = z64
-        header = p64(len(oid)) + oid + serial + p64(l)
-        self._file.write(header)
-        self._file.write(data)
-        self._tindex[oid] = self._pos
-        self._pos += l + len(header)
-        return serial
-
-    def tpc_abort(self, transaction):
-        if transaction is not self._transaction:
-            return
-        self._tindex.clear()
-        self._transaction = None
-        self._pos = self._tpos
-
-    def tpc_begin(self, transaction):
-        if self._transaction is transaction:
-            return
-        self._transaction = transaction
-        self._tindex.clear() # Just to be sure!
-        self._pos = self._tpos
-
-    def tpc_vote(self, transaction):
-        pass
-
-    def tpc_finish(self, transaction, f=None):
-        if transaction is not self._transaction:
-            return
-        if f is not None:
-            f()
-        self._index.update(self._tindex)
-        self._tindex.clear()
-        self._tpos = self._pos
-
-    def undoLog(self, first, last, filter=None):
-        return ()
-
-    def versionEmpty(self, version):
-        # TODO: what is this supposed to do?
-        if version == self._bver:
-            return len(self._index)
diff --git a/branches/bug1734/src/ZODB/UndoLogCompatible.py b/branches/bug1734/src/ZODB/UndoLogCompatible.py
deleted file mode 100644
index 1c4f8785..00000000
--- a/branches/bug1734/src/ZODB/UndoLogCompatible.py
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Provide backward compatibility with storages that only have undoLog()."""
-
-
-class UndoLogCompatible:
-
-    def undoInfo(self, first=0, last=-20, specification=None):
-        if specification:
-            def filter(desc, spec=specification.items()):
-                get=desc.get
-                for k, v in spec:
-                    if get(k, None) != v:
-                        return 0
-                return 1
-        else: filter=None
-
-        return self.undoLog(first, last, filter)
diff --git a/branches/bug1734/src/ZODB/ZApplication.py b/branches/bug1734/src/ZODB/ZApplication.py
deleted file mode 100644
index 4a3d72d7..00000000
--- a/branches/bug1734/src/ZODB/ZApplication.py
+++ /dev/null
@@ -1,88 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Implement an bobo_application object that is BoboPOS3 aware
-
-This module provides a wrapper that causes a database connection to be created
-and used when bobo publishes a bobo_application object.
-"""
-
-import transaction
-
-connection_open_hooks = []
-
-class ZApplicationWrapper:
-
-    def __init__(self, db, name, klass= None, klass_args= (),
-                 version_cookie_name=None):
-        self._stuff = db, name, version_cookie_name
-        if klass is not None:
-            conn=db.open()
-            root=conn.root()
-            if not root.has_key(name):
-                root[name]=klass()
-                transaction.commit()
-            conn.close()
-            self._klass=klass
-
-
-    # This hack is to overcome a bug in Bobo!
-    def __getattr__(self, name):
-        return getattr(self._klass, name)
-
-    def __bobo_traverse__(self, REQUEST=None, name=None):
-        db, aname, version_support = self._stuff
-        if version_support is not None and REQUEST is not None:
-            version=REQUEST.get(version_support,'')
-        else: version=''
-        conn=db.open(version)
-
-        if connection_open_hooks:
-            for hook in connection_open_hooks:
-                hook(conn)
-
-        # arrange for the connection to be closed when the request goes away
-        cleanup = Cleanup(conn)
-        REQUEST._hold(cleanup)
-
-        conn.setDebugInfo(REQUEST.environ, REQUEST.other)
-
-        v=conn.root()[aname]
-
-        if name is not None:
-            if hasattr(v, '__bobo_traverse__'):
-                return v.__bobo_traverse__(REQUEST, name)
-
-            if hasattr(v,name): return getattr(v,name)
-            return v[name]
-
-        return v
-
-
-    def __call__(self, connection=None):
-        db, aname, version_support = self._stuff
-
-        if connection is None:
-            connection=db.open()
-        elif isinstance(type, basestring):
-            connection=db.open(connection)
-
-        return connection.root()[aname]
-
-
-class Cleanup:
-    def __init__(self, jar):
-        self._jar = jar
-
-    def __del__(self):
-        self._jar.close()
diff --git a/branches/bug1734/src/ZODB/__init__.py b/branches/bug1734/src/ZODB/__init__.py
deleted file mode 100644
index 5d8b04cd..00000000
--- a/branches/bug1734/src/ZODB/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-# The next line must use double quotes, so release.py recognizes it.
-__version__ = "3.4a0"
-
-import sys
-import __builtin__
-
-from persistent import TimeStamp
-from persistent import list
-from persistent import mapping
-
-# Backward compat for old imports.
-sys.modules['ZODB.TimeStamp'] = sys.modules['persistent.TimeStamp']
-sys.modules['ZODB.PersistentMapping'] = sys.modules['persistent.mapping']
-sys.modules['ZODB.PersistentList'] = sys.modules['persistent.list']
-
-del mapping, list, sys
-
-from DB import DB
-
-# TODO:  get_transaction() scheduled to go away in ZODB 3.6.
-from transaction import get_transaction
-__builtin__.get_transaction = get_transaction
-
-del __builtin__
diff --git a/branches/bug1734/src/ZODB/broken.py b/branches/bug1734/src/ZODB/broken.py
deleted file mode 100644
index 0777ae0f..00000000
--- a/branches/bug1734/src/ZODB/broken.py
+++ /dev/null
@@ -1,337 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Broken object support
-
-$Id$
-"""
-
-import sys
-import persistent
-
-broken_cache = {}
-
-class Broken(object):
-    """Broken object base class
-
-       Broken objects are placeholders for objects that can no longer be
-       created because their class has gone away.
-
-       Broken objects don't really do much of anything, except hold their
-       state.   The Broken class is used as a base class for creating
-       classes in leu of missing classes::
-
-         >>> Atall = type('Atall', (Broken, ), {'__module__': 'not.there'})
-
-       The only thing the class can be used for is to create new objects::
-
-         >>> Atall()
-         <broken not.there.Atall instance>
-         >>> Atall().__Broken_newargs__
-         ()
-         >>> Atall().__Broken_initargs__
-         ()
-
-         >>> Atall(1, 2).__Broken_newargs__
-         (1, 2)
-         >>> Atall(1, 2).__Broken_initargs__
-         (1, 2)
-
-         >>> a = Atall.__new__(Atall, 1, 2)
-         >>> a
-         <broken not.there.Atall instance>
-         >>> a.__Broken_newargs__
-         (1, 2)
-         >>> a.__Broken_initargs__
-
-       You can't modify broken objects::
-
-         >>> a.x = 1
-         Traceback (most recent call last):
-         ...
-         BrokenModified: Can't change broken objects
-
-       But you can set their state::
-
-         >>> a.__setstate__({'x': 1, })
-
-       You can pickle broken objects::
-
-         >>> r = a.__reduce__()
-         >>> len(r)
-         3
-         >>> r[0] is rebuild
-         True
-         >>> r[1]
-         ('not.there', 'Atall', 1, 2)
-         >>> r[2]
-         {'x': 1}
-
-         >>> import cPickle
-         >>> a2 = cPickle.loads(cPickle.dumps(a, 1))
-         >>> a2
-         <broken not.there.Atall instance>
-         >>> a2.__Broken_newargs__
-         (1, 2)
-         >>> a2.__Broken_initargs__
-         >>> a2.__Broken_state__
-         {'x': 1}
-
-       Cleanup::
-
-         >>> broken_cache.clear()
-       """
-
-    __Broken_state__ = __Broken_initargs__ = None
-
-    __name__ = 'broken object'
-
-    def __new__(class_, *args):
-        result = object.__new__(class_)
-        result.__dict__['__Broken_newargs__'] = args
-        return result
-
-    def __init__(self, *args):
-        self.__dict__['__Broken_initargs__'] = args
-
-    def __reduce__(self):
-        """We pickle broken objects in hope of being able to fix them later
-        """
-        return (rebuild,
-                ((self.__class__.__module__, self.__class__.__name__)
-                 + self.__Broken_newargs__),
-                self.__Broken_state__,
-                )
-
-    def __setstate__(self, state):
-        self.__dict__['__Broken_state__'] = state
-
-    def __repr__(self):
-        return "<broken %s.%s instance>" % (
-            self.__class__.__module__, self.__class__.__name__)
-
-    def __setattr__(self, name, value):
-        raise BrokenModified("Can't change broken objects")
-
-def find_global(modulename, globalname,
-                # These are *not* optimizations. Callers can override these.
-                Broken=Broken, type=type,
-                ):
-    """Find a global object, returning a broken class if it can't be found.
-
-       This function looks up global variable in modules::
-
-         >>> import sys
-         >>> find_global('sys', 'path') is sys.path
-         True
-
-       If an object can't be found, a broken class is returned::
-
-         >>> broken = find_global('ZODB.not.there', 'atall')
-         >>> issubclass(broken, Broken)
-         True
-         >>> broken.__module__
-         'ZODB.not.there'
-         >>> broken.__name__
-         'atall'
-
-       Broken classes are cached::
-
-         >>> find_global('ZODB.not.there', 'atall') is broken
-         True
-
-       If we "repair" a missing global::
-
-         >>> class ZODBnotthere:
-         ...     atall = []
-
-         >>> sys.modules['ZODB.not'] = ZODBnotthere
-         >>> sys.modules['ZODB.not.there'] = ZODBnotthere
-
-       we can then get the repaired value::
-
-         >>> find_global('ZODB.not.there', 'atall') is ZODBnotthere.atall
-         True
-
-       Of course, if we beak it again::
-
-         >>> del sys.modules['ZODB.not']
-         >>> del sys.modules['ZODB.not.there']
-
-       we get the broken value::
-
-         >>> find_global('ZODB.not.there', 'atall') is broken
-         True
-
-       Cleanup::
-
-         >>> broken_cache.clear()
-       """
-    try:
-        __import__(modulename)
-    except ImportError:
-        pass
-    else:
-        module = sys.modules[modulename]
-        try:
-            return getattr(module, globalname)
-        except AttributeError:
-            pass
-
-    try:
-        return broken_cache[(modulename, globalname)]
-    except KeyError:
-        pass
-
-    class_ = type(globalname, (Broken, ), {'__module__': modulename})
-    broken_cache[(modulename, globalname)] = class_
-    return class_
-
-def rebuild(modulename, globalname, *args):
-    """Recreate a broken object, possibly recreating the missing class
-
-       This functions unpickles broken objects::
-
-         >>> broken = rebuild('ZODB.notthere', 'atall', 1, 2)
-         >>> broken
-         <broken ZODB.notthere.atall instance>
-         >>> broken.__Broken_newargs__
-         (1, 2)
-
-       If we "repair" the brokenness::
-
-         >>> class notthere: # fake notthere module
-         ...     class atall(object):
-         ...         def __new__(self, *args):
-         ...             ob = object.__new__(self)
-         ...             ob.args = args
-         ...             return ob
-         ...         def __repr__(self):
-         ...             return 'atall %s %s' % self.args
-
-         >>> sys.modules['ZODB.notthere'] = notthere
-
-         >>> rebuild('ZODB.notthere', 'atall', 1, 2)
-         atall 1 2
-
-         >>> del sys.modules['ZODB.notthere']
-
-       Cleanup::
-
-         >>> broken_cache.clear()
-
-       """
-    class_ = find_global(modulename, globalname)
-    return class_.__new__(class_, *args)
-
-class BrokenModified(TypeError):
-    """Attempt to modify a broken object
-    """
-
-class PersistentBroken(Broken, persistent.Persistent):
-    r"""Persistent broken objects
-
-        Persistent broken objects are used for broken objects that are
-        also persistent.  In addition to having to track the original
-        object data, they need to handle persistent meta data.
-
-        Persistent broken classes are created from existing broken classes
-        using the persistentBroken, function::
-
-          >>> Atall = type('Atall', (Broken, ), {'__module__': 'not.there'})
-          >>> PAtall = persistentBroken(Atall)
-
-        (Note that we always get the *same* persistent broken class
-         for a given broken class::
-
-          >>> persistentBroken(Atall) is PAtall
-          True
-
-         )
-
-        Persistent broken classes work a lot like broken classes::
-
-          >>> a = PAtall.__new__(PAtall, 1, 2)
-          >>> a
-          <persistent broken not.there.Atall instance None>
-          >>> a.__Broken_newargs__
-          (1, 2)
-          >>> a.__Broken_initargs__
-          >>> a.x = 1
-          Traceback (most recent call last):
-          ...
-          BrokenModified: Can't change broken objects
-
-        Unlike regular broken objects, persistent broken objects keep
-        track of persistence meta data:
-
-          >>> a._p_oid = '\0\0\0\0****'
-          >>> a
-          <persistent broken not.there.Atall instance '\x00\x00\x00\x00****'>
-
-        and persistent broken objects aren't directly picklable:
-
-          >>> a.__reduce__()
-          Traceback (most recent call last):
-          ...
-          BrokenModified: """ \
-        r"""<persistent broken not.there.Atall instance '\x00\x00\x00\x00****'>
-
-        but you can get their state:
-
-          >>> a.__setstate__({'y': 2})
-          >>> a.__getstate__()
-          {'y': 2}
-
-       Cleanup::
-
-         >>> broken_cache.clear()
-
-        """
-
-    def __new__(class_, *args):
-        result = persistent.Persistent.__new__(class_)
-        result.__dict__['__Broken_newargs__'] = args
-        return result
-
-    def __reduce__(self, *args):
-        raise BrokenModified(self)
-
-    def __getstate__(self):
-        return self.__Broken_state__
-
-    def __setattr__(self, name, value):
-        if name.startswith('_p_'):
-            persistent.Persistent.__setattr__(self, name, value)
-        else:
-            raise BrokenModified("Can't change broken objects")
-
-    def __repr__(self):
-        return "<persistent broken %s.%s instance %r>" % (
-            self.__class__.__module__, self.__class__.__name__,
-            self._p_oid)
-
-    def __getnewargs__(self):
-        return self.__Broken_newargs__
-
-def persistentBroken(class_):
-    try:
-        return class_.__dict__['__Broken_Persistent__']
-    except KeyError:
-        class_.__Broken_Persistent__ = (
-            type(class_.__name__,
-                 (PersistentBroken, class_),
-                 {'__module__': class_.__module__},
-                 )
-            )
-        return class_.__dict__['__Broken_Persistent__']
diff --git a/branches/bug1734/src/ZODB/collaborations.txt b/branches/bug1734/src/ZODB/collaborations.txt
deleted file mode 100644
index b6eff476..00000000
--- a/branches/bug1734/src/ZODB/collaborations.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-Participants
-    DB:  ZODB.DB.DB
-    C:  ZODB.Connection.Connection
-    S:  ZODB.FileStorage.FileStorage
-    T:  transaction.interfaces.ITransaction
-    TM:  transaction.interfaces.ITransactionManager
-    o1, o2, ...:  pre-existing persistent objects
-
-Scenario
-    """Simple fetch, modify, commit."""
-
-    DB.open()
-        create C
-        TM.registerSynch(C)
-    TM.begin()
-        create T
-    C.get(1) # fetches o1
-    C.get(2) # fetches o2
-    C.get(3) # fetches o3
-    o1.modify() # anything that modifies o1
-        C.register(o1)
-            T.join(C)
-    o2.modify()
-        C.register(o2)
-            # T.join(C) does not happen again
-    o1.modify()
-        # C.register(o1) doesn't happen again, because o1 was already
-        # in the changed state.
-    T.commit()
-        C.beforeCompletion(T)
-        C.tpc_begin(T)
-            S.tpc_begin(T)
-        C.commit(T)
-            S.store(1, ..., T)
-            S.store(2, ..., T)
-            # o3 is not stored, because it wasn't modified
-        C.tpc_vote(T)
-            S.tpc_vote(T)
-        C.tpc_finish(T)
-            S.tpc_finish(T, f) # f is a callback function, which arranges
-                               # to call DB.invalidate (next)
-                DB.invalidate(tid, {1: 1, 2: 1}, C)
-                    C2.invalidate(tid, {1: 1, 2: 1}) # for all connections
-                                                     # C2 to DB, where C2
-                                                     # is not C
-        TM.free(T)
-        C.afterCompletion(T)
-            C._flush_invalidations()
-            # Processes invalidations that may have come in from other
-            # transactions.
-
-
-Participants
-    DB:  ZODB.DB.DB
-    C:  ZODB.Connection.Connection
-    S:  ZODB.FileStorage.FileStorage
-    T:  transaction.interfaces.ITransaction
-    TM:  transaction.interfaces.ITransactionManager
-    o1, o2, ...:  pre-existing persistent objects
-
-Scenario
-    """Simple fetch, modify, abort."""
-
-    DB.open()
-        create C
-        TM.registerSynch(C)
-    TM.begin()
-        create T
-    C.get(1) # fetches o1
-    C.get(2) # fetches o2
-    C.get(3) # fetches o3
-    o1.modify() # anything that modifies o1
-        C.register(o1)
-            T.join(C)
-    o2.modify()
-        C.register(o2)
-            # T.join(C) does not happen again
-    o1.modify()
-        # C.register(o1) doesn't happen again, because o1 was already
-        # in the changed state.
-    T.abort()
-        C.beforeCompletion(T)
-        C.abort(T)
-            C._cache.invalidate(1)  # toss changes to o1
-            C._cache.invalidate(2)  # toss changes to o2
-            # o3 wasn't modified, and its cache entry isn't invalidated.
-        TM.free(T)
-        C.afterCompletion(T)
-            C._flush_invalidations()
-            # Processes invalidations that may have come in from other
-            # transactions.
-
-
-Participants:
-  T: ITransaction
-  o1, o2, o3: some persistent objects
-  C1, C2, C3: resource managers
-  S1, S2: Transaction savepoint objects
-  s11, s21, s22: resource-manager savepoints
-
-Scenario
-    """Rollback of a savepoint"""
-
-        create T
-        o1.modify()
-            C1.regisiter(o1)
-                T.join(C1)
-        T.savepoint()
-            C1.savepoint()
-                return s11
-            return S1 = Savepoint(T, [r11])
-        o1.modify()
-            C1.regisiter(o1)
-        o2.modify()
-            C2.regisiter(o2)
-                T.join(C2)
-        T.savepoint()
-            C1.savepoint()
-                return s21
-            C2.savepoint()
-                return s22
-            return S2 = Savepoint(T, [r21, r22])
-        o3.modify()
-            C3.regisiter(o3)
-                T.join(C3)
-        S1.rollback()
-            S2.rollback()
-                T.discard()
-                    C1.discard()
-                    C2.discard()
-                    C3.discard()
-                        o3.invalidate()
-            S2.discard()
-                s21.discard() # roll back changes since previous, which is r11
-                    C1.discard(s21)
-                        o1.invalidate()
-                        # truncates temporary storage to s21's position
-                s22.discard() # roll back changes since previous, which is r11
-                    C1.discard(s22)
-                        o2.invalidate()
-                        # truncates temporary storage to beginning, because
-                        # s22 was the first savepoint.  (Perhaps conection
-                        # savepoints record the log position before the 
-                        # data were written, which is 0 in this case. 
-        T.commit()
-            C1.beforeCompletion(T)
-            C2.beforeCompletion(T)
-            C3.beforeCompletion(T)
-            C1.tpc_begin(T)
-                S1.tpc_begin(T)
-            C2.tpc_begin(T)
-            C3.tpc_begin(T)
-            C1.commit(T)
-                S1.store(1, ..., T)
-            C2.commit(T)
-            C3.commit(T)
-            C1.tpc_vote(T)
-                S1.tpc_vote(T)
-            C2.tpc_vote(T)
-            C3.tpc_vote(T)
-            C1.tpc_finish(T)
-                S1.tpc_finish(T, f) # f is a callback function, which arranges
-                                   c# to call DB.invalidate (next)
-                    DB.invalidate(tid, {1: 1}, C)
-            TM.free(T)
-            C1.afterCompletion(T)
-                C1._flush_invalidations()
-            C2.afterCompletion(T)
-                C2._flush_invalidations()
-            C3.afterCompletion(T)
-                C3._flush_invalidations()
-
diff --git a/branches/bug1734/src/ZODB/component.xml b/branches/bug1734/src/ZODB/component.xml
deleted file mode 100644
index be40cf91..00000000
--- a/branches/bug1734/src/ZODB/component.xml
+++ /dev/null
@@ -1,161 +0,0 @@
-<component prefix="ZODB.config">
-
-  <!-- TODO needs descriptions for everything -->
-
-  <abstracttype name="ZODB.storage"/>
-  <abstracttype name="ZODB.database"/>
-
-  <sectiontype name="filestorage" datatype=".FileStorage"
-               implements="ZODB.storage">
-    <key name="path" required="yes">
-      <description>
-        Path name to the main storage file.  The names for
-        supplemental files, including index and lock files, will be
-        computed from this.
-      </description>
-    </key>
-    <key name="create" datatype="boolean" default="false">
-      <description>
-        Flag that indicates whether the storage should be truncated if
-        it already exists.
-      </description>
-    </key>
-    <key name="read-only" datatype="boolean" default="false">
-      <description>
-        If true, only reads may be executed against the storage.  Note
-        that the "pack" operation is not considered a write operation
-        and is still allowed on a read-only filestorage.
-      </description>
-    </key>
-    <key name="quota" datatype="byte-size">
-      <description>
-        Maximum allowed size of the storage file.  Operations which
-        would cause the size of the storage to exceed the quota will
-        result in a ZODB.FileStorage.FileStorageQuotaError being
-        raised.
-      </description>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="mappingstorage" datatype=".MappingStorage"
-               implements="ZODB.storage">
-    <key name="name" default="Mapping Storage"/>
-  </sectiontype>
-
-  <!-- The BDB storages probably need to be revised somewhat still.
-       The extension relationship seems a little odd.
-    -->
-  <sectiontype name="fullstorage" datatype=".BDBFullStorage"
-               implements="ZODB.storage">
-    <key name="envdir" required="yes" />
-    <key name="interval" datatype="time-interval" default="2m" />
-    <key name="kbyte" datatype="integer" default="0" />
-    <key name="min" datatype="integer" default="0" />
-    <key name="logdir" />
-    <key name="cachesize" datatype="byte-size" default="128MB" />
-    <key name="frequency" datatype="time-interval" default="0" />
-    <key name="packtime" datatype="time-interval" default="4h" />
-    <key name="gcpack" datatype="integer" default="0" />
-    <key name="read-only" datatype="boolean" default="off"/>
-  </sectiontype>
-
-  <sectiontype name="minimalstorage" datatype=".BDBMinimalStorage"
-               implements="ZODB.storage" extends="fullstorage"/>
-
-  <sectiontype name="zeoclient" datatype=".ZEOClient"
-               implements="ZODB.storage">
-    <multikey name="server" datatype="socket-address" required="yes"/>
-    <key name="storage" default="1">
-      <description>
-        The name of the storage that the client wants to use.  If the
-        ZEO server serves more than one storage, the client selects
-        the storage it wants to use by name.  The default name is '1',
-        which is also the default name for the ZEO server.
-      </description>
-    </key>
-    <key name="cache-size" datatype="byte-size" default="20MB">
-      <description>
-        The maximum size of the client cache, in bytes, KB or MB.
-      </description>
-    </key>
-    <key name="name" default="">
-      <description>
-        The storage name.  If unspecified, the address of the server
-        will be used as the name.
-      </description>
-    </key>
-    <key name="client">
-      <description>
-        Enables persistent cache files.  The string passed here is
-        used to construct the cache filenames.  If it is not
-        specified, the client creates a temporary cache that will
-        only be used by the current object.
-      </description>
-    </key>
-    <key name="var">
-      <description>
-        The directory where persistent cache files are stored.  By
-        default cache files, if they are persistent, are stored in
-        the current directory.
-      </description>
-    </key>
-    <key name="min-disconnect-poll" datatype="integer" default="5">
-      <description>
-        The minimum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 5 seconds.
-      </description>
-    </key>
-    <key name="max-disconnect-poll" datatype="integer" default="300">
-      <description>
-        The maximum delay in seconds between attempts to connect to
-        the server, in seconds.  Defaults to 300 seconds.
-      </description>
-    </key>
-    <key name="wait" datatype="boolean" default="on">
-      <description>
-        A boolean indicating whether the constructor should wait
-        for the client to connect to the server and verify the cache
-        before returning.  The default is true.
-      </description>
-    </key>
-    <key name="read-only" datatype="boolean" default="off">
-      <description>
-        A flag indicating whether this should be a read-only storage,
-        defaulting to false (i.e. writing is allowed by default).
-      </description>
-    </key>
-    <key name="read-only-fallback" datatype="boolean" default="off">
-      <description>
-        A flag indicating whether a read-only remote storage should be
-        acceptable as a fallback when no writable storages are
-        available.  Defaults to false.  At most one of read_only and
-        read_only_fallback should be true.
-      </description>
-    </key>
-    <key name="realm" required="no">
-      <description>
-        The authentication realm of the server.  Some authentication
-        schemes use a realm to identify the logic set of usernames
-        that are accepted by this server.
-      </description>
-    </key>
-  </sectiontype>
-
-  <sectiontype name="demostorage" datatype=".DemoStorage"
-               implements="ZODB.storage">
-    <key name="name" default="Demo Storage"/>
-    <section type="ZODB.storage" name="*" attribute="base"/>
-    <key name="quota" datatype="integer"/>
-  </sectiontype>
-
-
-  <sectiontype name="zodb" datatype=".ZODBDatabase"
-               implements="ZODB.database">
-    <section type="ZODB.storage" name="*" attribute="storage"/>
-    <key name="cache-size" datatype="integer" default="5000"/>
-    <key name="pool-size" datatype="integer" default="7"/>
-    <key name="version-pool-size" datatype="integer" default="3"/>
-    <key name="version-cache-size" datatype="integer" default="100"/>
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/ZODB/config.py b/branches/bug1734/src/ZODB/config.py
deleted file mode 100644
index ac5580b2..00000000
--- a/branches/bug1734/src/ZODB/config.py
+++ /dev/null
@@ -1,177 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Open database and storage from a configuration.
-
-$Id$"""
-
-import os
-from cStringIO import StringIO
-
-import ZConfig
-
-import ZODB
-
-db_schema_path = os.path.join(ZODB.__path__[0], "config.xml")
-_db_schema = None
-
-s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
-_s_schema = None
-
-def getDbSchema():
-    global _db_schema
-    if _db_schema is None:
-        _db_schema = ZConfig.loadSchema(db_schema_path)
-    return _db_schema
-
-def getStorageSchema():
-    global _s_schema
-    if _s_schema is None:
-        _s_schema = ZConfig.loadSchema(s_schema_path)
-    return _s_schema
-
-def databaseFromString(s):
-    return databaseFromFile(StringIO(s))
-
-def databaseFromFile(f):
-    config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
-    return databaseFromConfig(config.database)
-
-def databaseFromURL(url):
-    config, handler = ZConfig.loadConfig(getDbSchema(), url)
-    return databaseFromConfig(config.database)
-
-def databaseFromConfig(section):
-    return section.open()
-
-def storageFromString(s):
-    return storageFromFile(StringIO(s))
-
-def storageFromFile(f):
-    config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
-    return storageFromConfig(config.storage)
-
-def storageFromURL(url):
-    config, handler = ZConfig.loadConfig(getStorageSchema(), url)
-    return storageFromConfig(config.storage)
-
-def storageFromConfig(section):
-    return section.open()
-
-
-class BaseConfig:
-    """Object representing a configured storage or database.
-
-    Methods:
-
-    open() -- open and return the configured object
-
-    Attributes:
-
-    name   -- name of the storage
-
-    """
-
-    def __init__(self, config):
-        self.config = config
-        self.name = config.getSectionName()
-
-    def open(self):
-        """Open and return the storage object."""
-        raise NotImplementedError
-
-class ZODBDatabase(BaseConfig):
-
-    def open(self):
-        section = self.config
-        storage = section.storage.open()
-        try:
-            return ZODB.DB(storage,
-                           pool_size=section.pool_size,
-                           cache_size=section.cache_size,
-                           version_pool_size=section.version_pool_size,
-                           version_cache_size=section.version_cache_size)
-        except:
-            storage.close()
-            raise
-
-class MappingStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.MappingStorage import MappingStorage
-        return MappingStorage(self.config.name)
-
-class DemoStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.DemoStorage import DemoStorage
-        if self.config.base:
-            base = self.config.base.open()
-        else:
-            base = None
-        return DemoStorage(self.config.name,
-                           base=base,
-                           quota=self.config.quota)
-
-class FileStorage(BaseConfig):
-
-    def open(self):
-        from ZODB.FileStorage import FileStorage
-        return FileStorage(self.config.path,
-                           create=self.config.create,
-                           read_only=self.config.read_only,
-                           quota=self.config.quota)
-
-class ZEOClient(BaseConfig):
-
-    def open(self):
-        from ZEO.ClientStorage import ClientStorage
-        # config.server is a multikey of socket-address values
-        # where the value is a socket family, address tuple.
-        L = [server.address for server in self.config.server]
-        return ClientStorage(
-            L,
-            storage=self.config.storage,
-            cache_size=self.config.cache_size,
-            name=self.config.name,
-            client=self.config.client,
-            var=self.config.var,
-            min_disconnect_poll=self.config.min_disconnect_poll,
-            max_disconnect_poll=self.config.max_disconnect_poll,
-            wait=self.config.wait,
-            read_only=self.config.read_only,
-            read_only_fallback=self.config.read_only_fallback)
-
-class BDBStorage(BaseConfig):
-
-    def open(self):
-        from BDBStorage.BerkeleyBase import BerkeleyConfig
-        storageclass = self.get_storageclass()
-        bconf = BerkeleyConfig()
-        for name in dir(BerkeleyConfig):
-            if name.startswith('_'):
-                continue
-            setattr(bconf, name, getattr(self.config, name))
-        return storageclass(self.config.envdir, config=bconf)
-
-class BDBMinimalStorage(BDBStorage):
-
-    def get_storageclass(self):
-        import BDBStorage.BDBMinimalStorage
-        return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
-
-class BDBFullStorage(BDBStorage):
-
-    def get_storageclass(self):
-        import BDBStorage.BDBFullStorage
-        return BDBStorage.BDBFullStorage.BDBFullStorage
diff --git a/branches/bug1734/src/ZODB/config.xml b/branches/bug1734/src/ZODB/config.xml
deleted file mode 100644
index 28f8ad75..00000000
--- a/branches/bug1734/src/ZODB/config.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<schema prefix="ZODB.config">
-
-  <import package="ZODB"/>
-
-  <section type="ZODB.database" name="*" attribute="database"/>
-
-</schema>
diff --git a/branches/bug1734/src/ZODB/conversionhack.py b/branches/bug1734/src/ZODB/conversionhack.py
deleted file mode 100644
index b321f52f..00000000
--- a/branches/bug1734/src/ZODB/conversionhack.py
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import persistent.mapping
-
-class fixer:
-    def __of__(self, parent):
-        def __setstate__(state, self=parent):
-            self._container=state
-            del self.__setstate__
-        return __setstate__
-
-fixer=fixer()
-
-class hack: pass
-hack=hack()
-
-def __basicnew__():
-    r=persistent.mapping.PersistentMapping()
-    r.__setstate__=fixer
-    return r
-
-hack.__basicnew__=__basicnew__
diff --git a/branches/bug1734/src/ZODB/dbmStorage.py b/branches/bug1734/src/ZODB/dbmStorage.py
deleted file mode 100644
index 2f133b8a..00000000
--- a/branches/bug1734/src/ZODB/dbmStorage.py
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Very Simple dbm-based ZODB storage
-
-This storage provides for use of dbm files as storages that
-don't support versions or Undo.  This may be useful when implementing
-objects like hit counters that don't need or want to participate
-in undo or versions.
-"""
-
-from ZODB.utils import z64
-
-from MappingStorage import MappingStorage
-from BaseStorage import BaseStorage
-import anydbm, os
-
-class anydbmStorage(MappingStorage):
-
-    def __init__(self, filename, flag='r', mode=0666):
-
-
-        BaseStorage.__init__(self, filename)
-        self._index=anydbm.open(filename, flag, mode)
-        self._tindex=[]
-        keys=self._index.keys()
-        if keys: self._oid=max(keys)
-
-    def getSize(self):
-        # This is a little iffy, since we aren't entirely sure what the file is
-        self._lock_acquire()
-        try:
-            try:
-                return (os.stat(self.__name__+'.data')[6] +
-                        os.stat(self.__name__+'.dir')[6]
-                        )
-            except:
-                try: return os.stat(self.__name__)[6]
-                except: return 0
-        finally: self._lock_release()
-
-class gdbmStorage(anydbmStorage):
-
-    def __init__(self, filename, flag='r', mode=0666):
-
-        BaseStorage.__init__(self, filename)
-        import gdbm
-        self._index=index=gdbm.open(filename, flag[:1]+'f', mode)
-        self._tindex=[]
-
-        m=z64
-        oid=index.firstkey()
-        while oid != None:
-            m=max(m, oid)
-            oid=index.nextkey(oid)
-
-        self._oid=m
-
-    def getSize(self):
-        self._lock_acquire()
-        try: return os.stat(self.__name__)[6]
-        finally: self._lock_release()
-
-    def pack(self, t, referencesf):
-
-        self._lock_acquire()
-        try:
-            # Build an index of *only* those objects reachable
-            # from the root.
-            index=self._index
-            rootl=[z64]
-            pop=rootl.pop
-            pindex={}
-            referenced=pindex.has_key
-            while rootl:
-                oid=pop()
-                if referenced(oid): continue
-
-                # Scan non-version pickle for references
-                r=index[oid]
-                pindex[oid]=r
-                p=r[8:]
-                referencesf(p, rootl)
-
-            # Now delete any unreferenced entries:
-
-            deleted=[]
-            oid=index.firstkey()
-            while oid != None:
-                if not referenced(oid): deleted.append(oid)
-                oid=index.nextkey(oid)
-
-            pindex=referenced=None
-
-            for oid in deleted: del index[oid]
-
-            index.sync()
-            index.reorganize()
-
-        finally: self._lock_release()
-
-
-    def _finish(self, tid, user, desc, ext):
-
-        index=self._index
-        for oid, p in self._tindex: index[oid]=p
-        index.sync()
diff --git a/branches/bug1734/src/ZODB/fsIndex.py b/branches/bug1734/src/ZODB/fsIndex.py
deleted file mode 100644
index 956ce5fc..00000000
--- a/branches/bug1734/src/ZODB/fsIndex.py
+++ /dev/null
@@ -1,194 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Implement an OID to File-position (long integer) mapping."""
-
-# To save space, we do two things:
-#
-#     1. We split the keys (OIDS) into 6-byte prefixes and 2-byte suffixes.
-#        We use the prefixes as keys in a mapping from prefix to mappings
-#        of suffix to data:
-#
-#           data is  {prefix -> {suffix -> data}}
-#
-#     2. We limit the data size to 48 bits. This should allow databases
-#        as large as 256 terabytes.
-#
-# Most of the space is consumed by items in the mappings from 2-byte
-# suffix to 6-byte data. This should reduce the overall memory usage to
-# 8-16 bytes per OID.
-#
-# Since the mapping from suffix to data contains at most 256 entries,
-# we use a BTree bucket instead of a full BTree to store the results.
-#
-# We use p64 to convert integers to 8-byte strings and lop off the two
-# high-order bytes when saving. On loading data, we add the leading
-# bytes back before using u64 to convert the data back to (long)
-# integers.
-
-import struct
-
-from BTrees._fsBTree import fsBucket
-from BTrees.OOBTree import OOBTree
-
-# convert between numbers and six-byte strings
-
-def num2str(n):
-    return struct.pack(">Q", n)[2:]
-
-def str2num(s):
-    return struct.unpack(">Q", "\000\000" + s)[0]
-
-def prefix_plus_one(s):
-    num = str2num(s)
-    return num2str(num + 1)
-
-def prefix_minus_one(s):
-    num = str2num(s)
-    return num2str(num - 1)
-
-class fsIndex(object):
-
-    def __init__(self):
-        self._data = OOBTree()
-
-    def __getitem__(self, key):
-        return str2num(self._data[key[:6]][key[6:]])
-
-    def get(self, key, default=None):
-        tree = self._data.get(key[:6], default)
-        if tree is default:
-            return default
-        v = tree.get(key[6:], default)
-        if v is default:
-            return default
-        return str2num(v)
-
-    def __setitem__(self, key, value):
-        value = num2str(value)
-        treekey = key[:6]
-        tree = self._data.get(treekey)
-        if tree is None:
-            tree = fsBucket()
-            self._data[treekey] = tree
-        tree[key[6:]] = value
-
-    def __len__(self):
-        r = 0
-        for tree in self._data.itervalues():
-            r += len(tree)
-        return r
-
-    def update(self, mapping):
-        for k, v in mapping.items():
-            self[k] = v
-
-    def has_key(self, key):
-        v = self.get(key, self)
-        return v is not self
-
-    def __contains__(self, key):
-        tree = self._data.get(key[:6])
-        if tree is None:
-            return False
-        v = tree.get(key[6:], None)
-        if v is None:
-            return False
-        return True
-
-    def clear(self):
-        self._data.clear()
-
-    def __iter__(self):
-        for prefix, tree in self._data.iteritems():
-            for suffix in tree:
-                yield prefix + suffix
-
-    iterkeys = __iter__
-
-    def keys(self):
-        return list(self.iterkeys())
-
-    def iteritems(self):
-        for prefix, tree in self._data.iteritems():
-            for suffix, value in tree.iteritems():
-                yield (prefix + suffix, str2num(value))
-
-    def items(self):
-        return list(self.iteritems())
-
-    def itervalues(self):
-        for tree in self._data.itervalues():
-            for value in tree.itervalues():
-                yield str2num(value)
-
-    def values(self):
-        return list(self.itervalues())
-
-    # Comment below applies for the following minKey and maxKey methods
-    #
-    # Obscure:  what if `tree` is actually empty?  We're relying here on
-    # that this class doesn't implement __delitem__:  once a key gets
-    # into an fsIndex, the only way it can go away is by invoking
-    # clear().  Therefore nothing in _data.values() is ever empty.
-    #
-    # Note that because `tree` is an fsBTree, its minKey()/maxKey() methods are
-    # very efficient.
-
-    def minKey(self, key=None):
-        if key is None:
-            smallest_prefix = self._data.minKey()
-        else:
-            smallest_prefix = self._data.minKey(key[:6])
-
-        tree = self._data[smallest_prefix]
-
-        assert tree
-
-        if key is None:
-            smallest_suffix = tree.minKey()
-        else:
-            try:
-                smallest_suffix = tree.minKey(key[6:])
-            except ValueError: # 'empty tree' (no suffix >= arg)
-                next_prefix = prefix_plus_one(smallest_prefix)
-                smallest_prefix = self._data.minKey(next_prefix)
-                tree = self._data[smallest_prefix]
-                assert tree
-                smallest_suffix = tree.minKey()
-
-        return smallest_prefix + smallest_suffix
-
-    def maxKey(self, key=None):
-        if key is None:
-            biggest_prefix = self._data.maxKey()
-        else:
-            biggest_prefix = self._data.maxKey(key[:6])
-
-        tree = self._data[biggest_prefix]
-
-        assert tree
-
-        if key is None:
-            biggest_suffix = tree.maxKey()
-        else:
-            try:
-                biggest_suffix = tree.maxKey(key[6:])
-            except ValueError: # 'empty tree' (no suffix <= arg)
-                next_prefix = prefix_minus_one(biggest_prefix)
-                biggest_prefix = self._data.maxKey(next_prefix)
-                tree = self._data[biggest_prefix]
-                assert tree
-                biggest_suffix = tree.maxKey()
-
-        return biggest_prefix + biggest_suffix
diff --git a/branches/bug1734/src/ZODB/fsrecover.py b/branches/bug1734/src/ZODB/fsrecover.py
deleted file mode 100644
index 32f7e61a..00000000
--- a/branches/bug1734/src/ZODB/fsrecover.py
+++ /dev/null
@@ -1,386 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Simple script for repairing damaged FileStorage files.
-
-Usage: %s [-f] [-v level] [-p] [-P seconds] input output
-
-Recover data from a FileStorage data file, skipping over damaged data.  Any
-damaged data will be lost.  This could lead to useless output if critical
-data is lost.
-
-Options:
-
-    -f
-       Overwrite output file even if it exists.
-
-    -v level
-
-       Set the verbosity level:
-
-         0 -- show progress indicator (default)
-
-         1 -- show transaction times and sizes
-
-         2 -- show transaction times and sizes, and show object (record)
-              ids, versions, and sizes
-
-    -p
-
-       Copy partial transactions.  If a data record in the middle of a
-       transaction is bad, the data up to the bad data are packed.  The
-       output record is marked as packed.  If this option is not used,
-       transactions with any bad data are skipped.
-
-    -P t
-
-       Pack data to t seconds in the past.  Note that if the "-p" option is
-       used, then t should be 0.
-
-
-Important:  The ZODB package must be importable.  You may need to adjust
-            PYTHONPATH accordingly.
-"""
-
-# Algorithm:
-#
-#     position to start of input
-#     while 1:
-#         if end of file:
-#             break
-#         try:
-#             copy_transaction
-#          except:
-#             scan for transaction
-#             continue
-
-import sys
-import os
-import getopt
-import time
-from struct import unpack
-from cPickle import loads
-
-try:
-    import ZODB
-except ImportError:
-    if os.path.exists('ZODB'):
-        sys.path.append('.')
-    elif os.path.exists('FileStorage.py'):
-        sys.path.append('..')
-    import ZODB
-
-import ZODB.FileStorage
-from ZODB.utils import t32, u64
-from ZODB.FileStorage import RecordIterator
-
-from persistent.TimeStamp import TimeStamp
-
-
-def die(mess='', show_docstring=False):
-    if mess:
-        print >> sys.stderr, mess + '\n'
-    if show_docstring:
-        print >> sys.stderr, __doc__ % sys.argv[0]
-    sys.exit(1)
-
-class ErrorFound(Exception):
-    pass
-
-def error(mess, *args):
-    raise ErrorFound(mess % args)
-
-def read_txn_header(f, pos, file_size, outp, ltid):
-    # Read the transaction record
-    f.seek(pos)
-    h = f.read(23)
-    if len(h) < 23:
-        raise EOFError
-
-    tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
-    if el < 0: el=t32-el
-
-    tl = u64(stl)
-
-    if pos + (tl + 8) > file_size:
-        error("bad transaction length at %s", pos)
-
-    if tl < (23 + ul + dl + el):
-        error("invalid transaction length, %s, at %s", tl, pos)
-
-    if ltid and tid < ltid:
-        error("time-stamp reducation %s < %s, at %s", u64(tid), u64(ltid), pos)
-
-    if status == "c":
-        truncate(f, pos, file_size, outp)
-        raise EOFError
-
-    if status not in " up":
-        error("invalid status, %r, at %s", status, pos)
-
-    tpos = pos
-    tend = tpos + tl
-
-    if status == "u":
-        # Undone transaction, skip it
-        f.seek(tend)
-        h = f.read(8)
-        if h != stl:
-            error("inconsistent transaction length at %s", pos)
-        pos = tend + 8
-        return pos, None, tid
-
-    pos = tpos+(23+ul+dl+el)
-    user = f.read(ul)
-    description = f.read(dl)
-    if el:
-        try: e=loads(f.read(el))
-        except: e={}
-    else: e={}
-
-    result = RecordIterator(tid, status, user, description, e, pos, tend,
-                            f, tpos)
-    pos = tend
-
-    # Read the (intentionally redundant) transaction length
-    f.seek(pos)
-    h = f.read(8)
-    if h != stl:
-        error("redundant transaction length check failed at %s", pos)
-    pos += 8
-
-    return pos, result, tid
-
-def truncate(f, pos, file_size, outp):
-    """Copy data from pos to end of f to a .trNNN file."""
-
-    i = 0
-    while 1:
-        trname = outp + ".tr%d" % i
-        if os.path.exists(trname):
-            i += 1
-    tr = open(trname, "wb")
-    copy(f, tr, file_size - pos)
-    f.seek(pos)
-    tr.close()
-
-def copy(src, dst, n):
-    while n:
-        buf = src.read(8096)
-        if not buf:
-            break
-        if len(buf) > n:
-            buf = buf[:n]
-        dst.write(buf)
-        n -= len(buf)
-
-def scan(f, pos):
-    """Return a potential transaction location following pos in f.
-
-    This routine scans forward from pos looking for the last data
-    record in a transaction.  A period '.' always occurs at the end of
-    a pickle, and an 8-byte transaction length follows the last
-    pickle.  If a period is followed by a plausible 8-byte transaction
-    length, assume that we have found the end of a transaction.
-
-    The caller should try to verify that the returned location is
-    actually a transaction header.
-    """
-    while 1:
-        f.seek(pos)
-        data = f.read(8096)
-        if not data:
-            return 0
-
-        s = 0
-        while 1:
-            l = data.find(".", s)
-            if l < 0:
-                pos += len(data)
-                break
-            # If we are less than 8 bytes from the end of the
-            # string, we need to read more data.
-            s = l + 1
-            if s > len(data) - 8:
-                pos += l
-                break
-            tl = u64(data[s:s+8])
-            if tl < pos:
-                return pos + s + 8
-
-def iprogress(i):
-    if i % 2:
-        print ".",
-    else:
-        print (i/2) % 10,
-    sys.stdout.flush()
-
-def progress(p):
-    for i in range(p):
-        iprogress(i)
-
-def main():
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "fv:pP:")
-    except getopt.error, msg:
-        die(str(msg), show_docstring=True)
-
-    if len(args) != 2:
-        die("two positional arguments required", show_docstring=True)
-    inp, outp = args
-
-    force = partial = False
-    verbose = 0
-    pack = None
-    for opt, v in opts:
-        if opt == "-v":
-            verbose = int(v)
-        elif opt == "-p":
-            partial = True
-        elif opt == "-f":
-            force = True
-        elif opt == "-P":
-            pack = time.time() - float(v)
-
-    recover(inp, outp, verbose, partial, force, pack)
-
-def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
-    print "Recovering", inp, "into", outp
-
-    if os.path.exists(outp) and not force:
-        die("%s exists" % outp)
-
-    f = open(inp, "rb")
-    if f.read(4) != ZODB.FileStorage.packed_version:
-        die("input is not a file storage")
-
-    f.seek(0,2)
-    file_size = f.tell()
-
-    ofs = ZODB.FileStorage.FileStorage(outp, create=1)
-    _ts = None
-    ok = 1
-    prog1 = 0
-    undone = 0
-
-    pos = 4L
-    ltid = None
-    while pos:
-        try:
-            npos, txn, tid = read_txn_header(f, pos, file_size, outp, ltid)
-        except EOFError:
-            break
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except Exception, err:
-            print "error reading txn header:", err
-            if not verbose:
-                progress(prog1)
-            pos = scan(f, pos)
-            if verbose > 1:
-                print "looking for valid txn header at", pos
-            continue
-        ltid = tid
-
-        if txn is None:
-            undone = undone + npos - pos
-            pos = npos
-            continue
-        else:
-            pos = npos
-
-        tid = txn.tid
-
-        if _ts is None:
-            _ts = TimeStamp(tid)
-        else:
-            t = TimeStamp(tid)
-            if t <= _ts:
-                if ok:
-                    print ("Time stamps out of order %s, %s" % (_ts, t))
-                ok = 0
-                _ts = t.laterThan(_ts)
-                tid = `_ts`
-            else:
-                _ts = t
-                if not ok:
-                    print ("Time stamps back in order %s" % (t))
-                    ok = 1
-
-        ofs.tpc_begin(txn, tid, txn.status)
-
-        if verbose:
-            print "begin", pos, _ts,
-            if verbose > 1:
-                print
-            sys.stdout.flush()
-
-        nrec = 0
-        try:
-            for r in txn:
-                if verbose > 1:
-                    if r.data is None:
-                        l = "bp"
-                    else:
-                        l = len(r.data)
-
-                    print "%7d %s %s" % (u64(r.oid), l, r.version)
-                ofs.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
-                            txn)
-                nrec += 1
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except Exception, err:
-            if partial and nrec:
-                ofs._status = "p"
-                ofs.tpc_vote(txn)
-                ofs.tpc_finish(txn)
-                if verbose:
-                    print "partial"
-            else:
-                ofs.tpc_abort(txn)
-            print "error copying transaction:", err
-            if not verbose:
-                progress(prog1)
-            pos = scan(f, pos)
-            if verbose > 1:
-                print "looking for valid txn header at", pos
-        else:
-            ofs.tpc_vote(txn)
-            ofs.tpc_finish(txn)
-            if verbose:
-                print "finish"
-                sys.stdout.flush()
-
-        if not verbose:
-            prog = pos * 20l / file_size
-            while prog > prog1:
-                prog1 = prog1 + 1
-                iprogress(prog1)
-
-
-    bad = file_size - undone - ofs._pos
-
-    print "\n%s bytes removed during recovery" % bad
-    if undone:
-        print "%s bytes of undone transaction data were skipped" % undone
-
-    if pack is not None:
-        print "Packing ..."
-        from ZODB.serialize import referencesf
-        ofs.pack(pack, referencesf)
-
-    ofs.close()
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZODB/fstools.py b/branches/bug1734/src/ZODB/fstools.py
deleted file mode 100644
index c6c7b476..00000000
--- a/branches/bug1734/src/ZODB/fstools.py
+++ /dev/null
@@ -1,151 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Tools for using FileStorage data files.
-
-TODO:  This module needs tests.
-Caution:  This file needs to be kept in sync with FileStorage.py.
-"""
-
-import cPickle
-import struct
-
-from ZODB.FileStorage.format import TRANS_HDR, DATA_HDR, TRANS_HDR_LEN, \
-     DATA_HDR_LEN, DATA_VERSION_HDR_LEN
-from ZODB.utils import u64
-from persistent.TimeStamp import TimeStamp
-
-class TxnHeader:
-    """Object representing a transaction record header.
-
-    Attribute   Position  Value
-    ---------   --------  -----
-    tid           0- 8    transaction id
-    length        8-16    length of entire transaction record - 8
-    status       16-17    status of transaction (' ', 'u', 'p'?)
-    user_len     17-19    length of user field (pack code H)
-    descr_len    19-21    length of description field (pack code H)
-    ext_len      21-23    length of extensions (pack code H)
-    """
-
-    def __init__(self, file, pos):
-        self._file = file
-        self._pos = pos
-        self._read_header()
-
-    def _read_header(self):
-        self._file.seek(self._pos)
-        self._hdr = self._file.read(TRANS_HDR_LEN)
-        (self.tid, self.length, self.status, self.user_len, self.descr_len,
-         self.ext_len) = struct.unpack(TRANS_HDR, self._hdr)
-
-    def read_meta(self):
-        """Load user, descr, and ext attributes."""
-        self.user = ""
-        self.descr = ""
-        self.ext = {}
-        if not (self.user_len or self.descr_len or self.ext_len):
-            return
-        self._file.seek(self._pos + TRANS_HDR_LEN)
-        if self.user_len:
-            self.user = self._file.read(self.user_len)
-        if self.descr_len:
-            self.descr = self._file.read(self.descr_len)
-        if self.ext_len:
-            self._ext = self._file.read(self.ext_len)
-            self.ext = cPickle.loads(self._ext)
-
-    def get_data_offset(self):
-        return (self._pos + TRANS_HDR_LEN + self.user_len + self.descr_len
-                + self.ext_len)
-
-    def get_timestamp(self):
-        return TimeStamp(self.tid)
-
-    def get_raw_data(self):
-        data_off = self.get_data_offset()
-        data_len = self.length - (data_off - self._pos)
-        self._file.seek(data_off)
-        return self._file.read(data_len)
-
-    def next_txn(self):
-        off = self._pos + self.length + 8
-        self._file.seek(off)
-        s = self._file.read(8)
-        if not s:
-            return None
-        return TxnHeader(self._file, off)
-
-    def prev_txn(self):
-        if self._pos == 4:
-            return None
-        self._file.seek(self._pos - 8)
-        tlen = u64(self._file.read(8))
-        return TxnHeader(self._file, self._pos - (tlen + 8))
-
-class DataHeader:
-    """Object representing a data record header.
-
-    Attribute         Position  Value
-    ---------         --------  -----
-    oid                 0- 8    object id
-    serial              8-16    object serial numver
-    prev_rec_pos       16-24    position of previous data record for object
-    txn_pos            24-32    position of txn header
-    version_len        32-34    length of version
-    data_len           34-42    length of data
-    nonversion_pos     42-50*   position of nonversion data record
-    prev_version_pos   50-58*   pos of previous version data record
-
-    * these attributes are only present if version_len != 0.
-    """
-
-    def __init__(self, file, pos):
-        self._file = file
-        self._pos = pos
-        self._read_header()
-
-    def _read_header(self):
-        self._file.seek(self._pos)
-        self._hdr = self._file.read(DATA_VERSION_HDR_LEN)
-        # always read the longer header, just in case
-        (self.oid, self.serial, prev_rec_pos, txn_pos, self.version_len,
-         data_len) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN])
-        self.prev_rec_pos = u64(prev_rec_pos)
-        self.txn_pos = u64(txn_pos)
-        self.data_len = u64(data_len)
-        if self.version_len:
-            s = self._hdr[DATA_HDR_LEN:]
-            self.nonversion_pos = u64(s[:8])
-            self.prev_version_pos = u64(s[8:])
-        else:
-            self.nonversion_pos = None
-            self.prev_version_pos = None
-
-    def next_offset(self):
-        """Return offset of next record."""
-        off = self._pos + self.data_len
-        if self.version_len:
-            off += self.version_len + DATA_VERSION_HDR_LEN
-        else:
-            off += DATA_HDR_LEN
-        if self.data_len == 0:
-            off += 8 # backpointer
-        return off
-
-def prev_txn(f):
-    """Return transaction located before current file position."""
-    f.seek(-8, 1)
-    tlen = u64(f.read(8)) + 8
-    return TxnHeader(f, f.tell() - tlen)
diff --git a/branches/bug1734/src/ZODB/interfaces.py b/branches/bug1734/src/ZODB/interfaces.py
deleted file mode 100644
index f6d26f3e..00000000
--- a/branches/bug1734/src/ZODB/interfaces.py
+++ /dev/null
@@ -1,475 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interfaces for ZODB.
-
-$Id$
-"""
-
-from zope.interface import Interface, Attribute
-
-class IConnection(Interface):
-    """Connection to ZODB for loading and storing objects.
-
-    The Connection object serves as a data manager.  The root() method
-    on a Connection returns the root object for the database.  This
-    object and all objects reachable from it are associated with the
-    Connection that loaded them.  When a transaction commits, it uses
-    the Connection to store modified objects.
-
-    Typical use of ZODB is for each thread to have its own
-    Connection and that no thread should have more than one Connection
-    to the same database.  A thread is associated with a Connection by
-    loading objects from that Connection.  Objects loaded by one
-    thread should not be used by another thread.
-
-    A Connection can be associated with a single version when it is
-    created.  By default, a Connection is not associated with a
-    version; it uses non-version data.
-
-    Each Connection provides an isolated, consistent view of the
-    database, by managing independent copies of objects in the
-    database.  At transaction boundaries, these copies are updated to
-    reflect the current state of the database.
-
-    You should not instantiate this class directly; instead call the
-    open() method of a DB instance.
-
-    In many applications, root() is the only method of the Connection
-    that you will need to use.
-
-    Synchronization
-    ---------------
-
-    A Connection instance is not thread-safe.  It is designed to
-    support a thread model where each thread has its own transaction.
-    If an application has more than one thread that uses the
-    connection or the transaction the connection is registered with,
-    the application should provide locking.
-
-    The Connection manages movement of objects in and out of object
-    storage.
-
-    TODO:  We should document an intended API for using a Connection via
-    multiple threads.
-
-    TODO:  We should explain that the Connection has a cache and that
-    multiple calls to get() will return a reference to the same
-    object, provided that one of the earlier objects is still
-    referenced.  Object identity is preserved within a connection, but
-    not across connections.
-
-    TODO:  Mention the database pool.
-
-    A database connection always presents a consistent view of the
-    objects in the database, although it may not always present the
-    most current revision of any particular object.  Modifications
-    made by concurrent transactions are not visible until the next
-    transaction boundary (abort or commit).
-
-    Two options affect consistency.  By default, the mvcc and synch
-    options are enabled by default.
-
-    If you pass mvcc=True to db.open(), the Connection will never read
-    non-current revisions of an object.  Instead it will raise a
-    ReadConflictError to indicate that the current revision is
-    unavailable because it was written after the current transaction
-    began.
-
-    The logic for handling modifications assumes that the thread that
-    opened a Connection (called db.open()) is the thread that will use
-    the Connection.  If this is not true, you should pass synch=False
-    to db.open().  When the synch option is disabled, some transaction
-    boundaries will be missed by the Connection; in particular, if a
-    transaction does not involve any modifications to objects loaded
-    from the Connection and synch is disabled, the Connection will
-    miss the transaction boundary.  Two examples of this behavior are
-    db.undo() and read-only transactions.
-
-    Groups of methods:
-
-        User Methods:
-            root, get, add, close, db, sync, isReadOnly, cacheGC, cacheFullSweep,
-            cacheMinimize, getVersion, modifiedInVersion
-
-        Experimental Methods:
-            onCloseCallbacks
-
-        Database Invalidation Methods:
-            invalidate
-
-        Other Methods: exchange, getDebugInfo, setDebugInfo,
-            getTransferCounts
-    """
-
-    def __init__(version='', cache_size=400,
-                 cache_deactivate_after=None, mvcc=True, txn_mgr=None,
-                 synch=True):
-        """Create a new Connection.
-
-        A Connection instance should by instantiated by the DB
-        instance that it is connected to.
-
-        Parameters:
-        version: the "version" that all changes will be made in, defaults
-            to no version.
-        cache_size: the target size of the in-memory object cache, measured
-            in objects.
-        mvcc: boolean indicating whether MVCC is enabled
-        txn_mgr: transaction manager to use. None means used the default
-            transaction manager.
-        synch: boolean indicating whether Connection should register for
-            afterCompletion() calls.
-        """
-
-    def add(ob):
-        """Add a new object 'obj' to the database and assign it an oid.
-
-        A persistent object is normally added to the database and
-        assigned an oid when it becomes reachable to an object already in
-        the database.  In some cases, it is useful to create a new
-        object and use its oid (_p_oid) in a single transaction.
-
-        This method assigns a new oid regardless of whether the object
-        is reachable.
-
-        The object is added when the transaction commits.  The object
-        must implement the IPersistent interface and must not
-        already be associated with a Connection.
-
-        Parameters:
-        obj: a Persistent object
-
-        Raises TypeError if obj is not a persistent object.
-
-        Raises InvalidObjectReference if obj is already associated with another
-        connection.
-
-        Raises ConnectionStateError if the connection is closed.
-        """
-
-    def get(oid):
-        """Return the persistent object with oid 'oid'.
-
-        If the object was not in the cache and the object's class is
-        ghostable, then a ghost will be returned.  If the object is
-        already in the cache, a reference to the cached object will be
-        returned.
-
-        Applications seldom need to call this method, because objects
-        are loaded transparently during attribute lookup.
-
-        Parameters:
-        oid: an object id
-
-        Raises KeyError if oid does not exist.
-
-            It is possible that an object does not exist as of the current
-            transaction, but existed in the past.  It may even exist again in
-            the future, if the transaction that removed it is undone.
-
-        Raises ConnectionStateError if the connection is closed.
-        """
-
-    def cacheMinimize():
-        """Deactivate all unmodified objects in the cache.
-
-        Call _p_deactivate() on each cached object, attempting to turn
-        it into a ghost.  It is possible for individual objects to
-        remain active.
-        """
-
-    def cacheGC():
-        """Reduce cache size to target size.
-
-        Call _p_deactivate() on cached objects until the cache size
-        falls under the target size.
-        """
-
-    def onCloseCallback(f):
-        """Register a callable, f, to be called by close().
-
-        f will be called with no arguments before the Connection is closed.
-
-        Parameters:
-        f: method that will be called on `close`
-        """
-
-    def close():
-        """Close the Connection.
-
-        When the Connection is closed, all callbacks registered by
-        onCloseCallback() are invoked and the cache is garbage collected.
-
-        A closed Connection should not be used by client code.  It can't load
-        or store objects.  Objects in the cache are not freed, because
-        Connections are re-used and the cache is expected to be useful to the
-        next client.
-        """
-
-    def db():
-        """Returns a handle to the database this connection belongs to."""
-
-    def isReadOnly():
-        """Returns True if the storage for this connection is read only."""
-
-    def invalidate(tid, oids):
-        """Notify the Connection that transaction 'tid' invalidated oids.
-
-        When the next transaction boundary is reached, objects will be
-        invalidated.  If any of the invalidated objects are accessed by the
-        current transaction, the revision written before Connection.tid will be
-        used.
-
-        The DB calls this method, even when the Connection is closed.
-
-        Parameters:
-        tid: the storage-level id of the transaction that committed
-        oids: oids is a set of oids, represented as a dict with oids as keys.
-        """
-
-    def root():
-        """Return the database root object.
-
-        The root is a persistent.mapping.PersistentMapping.
-        """
-
-    def getVersion():
-        """Returns the version this connection is attached to."""
-
-    # Multi-database support.
-
-    connections = Attribute("""\
-        A mapping from database name to a Connection to that database.
-
-        In multi-database use, the Connections of all members of a database
-        collection share the same .connections object.
-
-        In single-database use, of course this mapping contains a single
-        entry.
-        """)
-
-    # TODO:  should this accept all the arguments one may pass to DB.open()?
-    def get_connection(database_name):
-        """Return a Connection for the named database.
-
-        This is intended to be called from an open Connection associated with
-        a multi-database.  In that case, database_name must be the name of a
-        database within the database collection (probably the name of a
-        different database than is associated with the calling Connection
-        instance, but it's fine to use the name of the calling Connection
-        object's database).  A Connection for the named database is
-        returned.  If no connection to that database is already open, a new
-        Connection is opened.  So long as the multi-database remains open,
-        passing the same name to get_connection() multiple times returns the
-        same Connection object each time.
-        """
-
-    def sync():
-        """Manually update the view on the database.
-
-        This includes aborting the current transaction, getting a fresh and
-        consistent view of the data (synchronizing with the storage if possible)
-        and call cacheGC() for this connection.
-
-        This method was especially useful in ZODB 3.2 to better support
-        read-only connections that were affected by a couple of problems.
-        """
-
-    # Debug information
-
-    def getDebugInfo():
-        """Returns a tuple with different items for debugging the connection.
-
-        Debug information can be added to a connection by using setDebugInfo.
-        """
-
-    def setDebugInfo(*items):
-        """Add the given items to the debug information of this connection."""
-
-    def getTransferCounts(clear=False):
-        """Returns the number of objects loaded and stored.
-
-        If clear is True, reset the counters.
-        """
-
-class IDatabase(Interface):
-    """ZODB DB.
-
-    TODO: This interface is incomplete.
-    """
-
-    def __init__(storage,
-                 pool_size=7,
-                 cache_size=400,
-                 version_pool_size=3,
-                 version_cache_size=100,
-                 database_name='unnamed',
-                 databases=None,
-                 ):
-        """Create an object database.
-
-        storage: the storage used by the database, e.g. FileStorage
-        pool_size: expected maximum number of open connections
-        cache_size: target size of Connection object cache, in number of
-            objects
-        version_pool_size: expected maximum number of connections (per
-            version)
-        version_cache_size: target size of Connection object cache for
-             version connections, in number of objects
-        database_name: when using a multi-database, the name of this DB
-            within the database group.  It's a (detected) error if databases
-            is specified too and database_name is already a key in it.
-            This becomes the value of the DB's database_name attribute.
-        databases: when using a multi-database, a mapping to use as the
-            binding of this DB's .databases attribute.  It's intended
-            that the second and following DB's added to a multi-database
-            pass the .databases attribute set on the first DB added to the
-            collection.
-        """
-
-    databases = Attribute("""\
-        A mapping from database name to DB (database) object.
-
-        In multi-database use, all DB members of a database collection share
-        the same .databases object.
-
-        In single-database use, of course this mapping contains a single
-        entry.
-        """)
-
-class IStorage(Interface):
-    """A storage is responsible for storing and retrieving data of objects.
-    """
-
-    def load(oid, version):
-        """TODO"""
-
-    def close():
-        """TODO"""
-
-    def cleanup():
-        """TODO"""
-
-    def lastSerial():
-        """TODO"""
-
-    def lastTransaction():
-        """TODO"""
-
-    def lastTid(oid):
-        """Return last serialno committed for object oid."""
-
-    def loadSerial(oid, serial):
-        """TODO"""
-
-    def loadBefore(oid, tid):
-        """TODO"""
-
-    def iterator(start=None, stop=None):
-        """TODO"""
-
-    def sortKey():
-        """TODO"""
-
-    def getName():
-        """TODO"""
-
-    def getSize():
-        """TODO"""
-
-    def history(oid, version, length=1, filter=None):
-        """TODO"""
-
-    def new_oid(last=None):
-        """TODO"""
-
-    def set_max_oid(possible_new_max_oid):
-        """TODO"""
-
-    def registerDB(db, limit):
-        """TODO"""
-
-    def isReadOnly():
-        """TODO"""
-
-    def supportsUndo():
-        """TODO"""
-
-    def supportsVersions():
-        """TODO"""
-
-    def tpc_abort(transaction):
-        """TODO"""
-
-    def tpc_begin(transaction):
-        """TODO"""
-
-    def tpc_vote(transaction):
-        """TODO"""
-
-    def tpc_finish(transaction, f=None):
-        """TODO"""
-
-    def getSerial(oid):
-        """TODO"""
-
-    def loadSerial(oid, serial):
-        """TODO"""
-
-    def loadBefore(oid, tid):
-        """TODO"""
-
-    def getExtensionMethods():
-        """TODO"""
-
-    def copyTransactionsFrom():
-        """TODO"""
-
-    def store(oid, oldserial, data, version, transaction):
-        """
-
-        may return the new serial or not
-        """
-
-class IUndoableStorage(IStorage):
-
-    def undo(transaction_id, txn):
-        """TODO"""
-
-    def undoInfo():
-        """TODO"""
-
-    def undoLog(first, last, filter=None):
-        """TODO"""
-
-    def pack(t, referencesf):
-        """TODO"""
-
-class IVersioningStorage(IStorage):
-
-    def abortVersion(src, transaction):
-        """TODO"""
-
-    def commitVersion(src, dest, transaction):
-        """TODO"""
-
-    def modifiedInVersion(oid):
-        """TODO"""
-
-    def versionEmpty(version):
-        """TODO"""
-
-    def versions(max=None):
-        """TODO"""
-
diff --git a/branches/bug1734/src/ZODB/lock_file.py b/branches/bug1734/src/ZODB/lock_file.py
deleted file mode 100644
index 7e4c2929..00000000
--- a/branches/bug1734/src/ZODB/lock_file.py
+++ /dev/null
@@ -1,75 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import os
-import errno
-import logging
-logger = logging.getLogger("ZODB.lock_file")
-
-try:
-    import fcntl
-except ImportError:
-    try:
-        from winlock import LockFile as _LockFile
-        from winlock import UnlockFile as _UnlockFile
-    except ImportError:
-        def lock_file(file):
-            logger.info('No file-locking support on this platform')
-
-    # Windows
-    def lock_file(file):
-        # Lock just the first byte
-        _LockFile(file.fileno(), 0, 0, 1, 0)
-
-    def unlock_file(file):
-        _UnlockFile(file.fileno(), 0, 0, 1, 0)
-else:
-    # Unix
-    _flags = fcntl.LOCK_EX | fcntl.LOCK_NB
-
-    def lock_file(file):
-        fcntl.flock(file.fileno(), _flags)
-
-    def unlock_file(file):
-        # File is automatically unlocked on close
-        pass
-
-
-
-# This is a better interface to use than the lockfile.lock_file() interface.
-# Creating the instance acquires the lock.  The file remains open.  Calling
-# close both closes and unlocks the lock file.
-class LockFile:
-    def __init__(self, path):
-        self._path = path
-        try:
-            self._fp = open(path, 'r+')
-        except IOError, e:
-            if e.errno <> errno.ENOENT: raise
-            self._fp = open(path, 'w+')
-        # Acquire the lock and piss on the hydrant
-        try:
-            lock_file(self._fp)
-        except:
-            logger.exception("Error locking file %s", path)
-            raise
-        print >> self._fp, os.getpid()
-        self._fp.flush()
-
-    def close(self):
-        if self._fp is not None:
-            unlock_file(self._fp)
-            self._fp.close()
-            os.unlink(self._path)
-            self._fp = None
diff --git a/branches/bug1734/src/ZODB/loglevels.py b/branches/bug1734/src/ZODB/loglevels.py
deleted file mode 100644
index f6adb7fe..00000000
--- a/branches/bug1734/src/ZODB/loglevels.py
+++ /dev/null
@@ -1,47 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Supplies custom logging levels BLATHER and TRACE.
-
-$Revision: 1.1 $
-"""
-
-import logging
-
-__all__ = ["BLATHER", "TRACE"]
-
-# In the days of zLOG, there were 7 standard log levels, and ZODB/ZEO used
-# all of them.  Here's how they map to the logging package's 5 standard
-# levels:
-#
-#    zLOG                         logging
-#    -------------                ---------------
-#    PANIC (300)                  FATAL, CRITICAL (50)
-#    ERROR (200)                  ERROR (40)
-#    WARNING, PROBLEM (100)       WARN (30)
-#    INFO (0)                     INFO (20)
-#    BLATHER (-100)               none -- defined here as BLATHER (15)
-#    DEBUG (-200)                 DEBUG (10)
-#    TRACE (-300)                 none -- defined here as TRACE (5)
-#
-# TRACE is used by ZEO for extremely verbose trace output, enabled only
-# when chasing bottom-level communications bugs.  It really should be at
-# a lower level than DEBUG.
-#
-# BLATHER is a harder call, and various instances could probably be folded
-# into INFO or DEBUG without real harm.
-
-BLATHER = 15
-TRACE = 5
-logging.addLevelName("BLATHER", BLATHER)
-logging.addLevelName("TRACE", TRACE)
diff --git a/branches/bug1734/src/ZODB/serialize.py b/branches/bug1734/src/ZODB/serialize.py
deleted file mode 100644
index 59bd4209..00000000
--- a/branches/bug1734/src/ZODB/serialize.py
+++ /dev/null
@@ -1,551 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Support for ZODB object serialization.
-
-ZODB serializes objects using a custom format based on Python pickles.
-When an object is unserialized, it can be loaded as either a ghost or
-a real object.  A ghost is a persistent object of the appropriate type
-but without any state.  The first time a ghost is accessed, the
-persistence machinery traps access and loads the actual state.  A
-ghost allows many persistent objects to be loaded while minimizing the
-memory consumption of referenced but otherwise unused objects.
-
-Pickle format
--------------
-
-ZODB stores serialized objects using a custom format based on pickle.
-Each serialized object has two parts: the class description and the
-object state.  The class description must provide enough information
-to call the class's ``__new__`` and create an empty object.  Once the
-object exists as a ghost, its state is passed to ``__setstate__``.
-
-The class description can be in a variety of formats, in part to
-provide backwards compatibility with earlier versions of Zope.  The
-two current formats for class description are:
-
-    1. type(obj)
-    2. type(obj), obj.__getnewargs__()
-
-The second of these options is used if the object has a __getnewargs__()
-method.  It is intended to support objects like persistent classes that have
-custom C layouts that are determined by arguments to __new__().
-
-The type object is usually stored using the standard pickle mechanism, which
-involves the pickle GLOBAL opcode (giving the type's module and name as
-strings).  The type may itself be a persistent object, in which case a
-persistent reference (see below) is used.
-
-It's unclear what "usually" means in the last paragraph.  There are two
-useful places to concentrate confusion about exactly which formats exist:
-
-- BaseObjectReader.getClassName() below returns a dotted "module.class"
-  string, via actually loading a pickle.  This requires that the
-  implementation of application objects be available.
-
-- ZODB/utils.py's get_pickle_metadata() tries to return the module and
-  class names (as strings) without importing any application modules or
-  classes, via analyzing the pickle.
-
-Earlier versions of Zope supported several other kinds of class
-descriptions.  The current serialization code reads these descriptions, but
-does not write them.  The four earlier formats are:
-
-    3. (module name, class name), None
-    4. (module name, class name), __getinitargs__()
-    5. class, None
-    6. class, __getinitargs__()
-
-Formats 4 and 6 are used only if the class defines a __getinitargs__()
-method.  Formats 5 and 6 are used if the class does not have a __module__
-attribute (I'm not sure when this applies, but I think it occurs for some
-but not all ZClasses).
-
-
-Persistent references
----------------------
-
-A persistent reference is a pair containing an oid and class metadata.
-When one persistent object pickle refers to another persistent object,
-the database uses a persistent reference.  The format allows a
-significant optimization, because ghosts can be created directly from
-persistent references.  If the reference was just an oid, a database
-access would be required to determine the class of the ghost.
-
-Because the persistent reference includes the class, it is not
-possible to change the class of a persistent object.  If a transaction
-changed the class of an object, a new record with new class metadata
-would be written but all the old references would still include the
-old class.
-"""
-
-import cPickle
-import cStringIO
-import logging
-
-
-from persistent import Persistent
-from persistent.wref import WeakRefMarker, WeakRef
-from ZODB import broken
-from ZODB.broken import Broken
-from ZODB.POSException import InvalidObjectReference
-
-# Might to update or redo coptimizations to reflect weakrefs:
-# from ZODB.coptimizations import new_persistent_id
-
-def myhasattr(obj, name, _marker=object()):
-    """Make sure we don't mask exceptions like hasattr().
-
-    We don't want exceptions other than AttributeError to be masked,
-    since that too often masks other programming errors.
-    Three-argument getattr() doesn't mask those, so we use that to
-    implement our own hasattr() replacement.
-    """
-    return getattr(obj, name, _marker) is not _marker
-
-
-class BaseObjectWriter:
-    """Serializes objects for storage in the database.
-
-    The ObjectWriter creates object pickles in the ZODB format.  It
-    also detects new persistent objects reachable from the current
-    object.
-    """
-
-    def __init__(self, jar=None):
-        self._file = cStringIO.StringIO()
-        self._p = cPickle.Pickler(self._file, 1)
-        self._stack = []
-        self._p.persistent_id = self.persistent_id
-        if jar is not None:
-            assert myhasattr(jar, "new_oid")
-        self._jar = jar
-
-    def persistent_id(self, obj):
-        """Return the persistent id for obj.
-
-        >>> from ZODB.tests.util import P
-        >>> class DummyJar:
-        ...     def new_oid(self):
-        ...         return 42
-        >>> jar = DummyJar()
-        >>> writer = BaseObjectWriter(jar)
-
-        Normally, object references include the oid and a cached
-        reference to the class.  Having the class available allows
-        fast creation of the ghost, avoiding requiring an additional
-        database lookup.
-
-        >>> bob = P('bob')
-        >>> oid, cls = writer.persistent_id(bob)
-        >>> oid
-        42
-        >>> cls is P
-        True
-
-        If a persistent object does not already have an oid and jar,
-        these will be assigned by persistent_id():
-
-        >>> bob._p_oid
-        42
-        >>> bob._p_jar is jar
-        True
-
-        If the object already has a persistent id, the id is not changed:
-
-        >>> bob._p_oid = 24
-        >>> oid, cls = writer.persistent_id(bob)
-        >>> oid
-        24
-        >>> cls is P
-        True
-
-        If the jar doesn't match that of the writer, an error is raised:
-
-        >>> bob._p_jar = DummyJar()
-        >>> writer.persistent_id(bob)
-        Traceback (most recent call last):
-          ...
-        InvalidObjectReference: Attempt to store an object from a """ \
-               """foreign database connection
-
-        Constructor arguments used by __new__(), as returned by
-        __getnewargs__(), can affect memory allocation, but may also
-        change over the life of the object.  This makes it useless to
-        cache even the object's class.
-
-        >>> class PNewArgs(P):
-        ...     def __getnewargs__(self):
-        ...         return ()
-
-        >>> sam = PNewArgs('sam')
-        >>> writer.persistent_id(sam)
-        42
-        >>> sam._p_oid
-        42
-        >>> sam._p_jar is jar
-        True
-
-        Check that simple objects don't get accused of persistence:
-
-        >>> writer.persistent_id(42)
-        >>> writer.persistent_id(object())
-
-        Check that a classic class doesn't get identified improperly:
-
-        >>> class ClassicClara:
-        ...    pass
-        >>> clara = ClassicClara()
-
-        >>> writer.persistent_id(clara)
-        """
-
-        # Most objects are not persistent. The following cheap test
-        # identifies most of them.  For these, we return None,
-        # signalling that the object should be pickled normally.
-        if not isinstance(obj, (Persistent, type, WeakRef)):
-            # Not persistent, pickle normally
-            return None
-
-        # Any persistent object must have an oid:
-        try:
-            oid = obj._p_oid
-        except AttributeError:
-            # Not persistent, pickle normally
-            return None
-
-        if not (oid is None or isinstance(oid, str)):
-            # Deserves a closer look:
-
-            # Make sure it's not a descriptor
-            if hasattr(oid, '__get__'):
-                # The oid is a descriptor.  That means obj is a non-persistent
-                # class whose instances are persistent, so ...
-                # Not persistent, pickle normally
-                return None
-
-            if oid is WeakRefMarker:
-                # we have a weakref, see weakref.py
-
-                oid = obj.oid
-                if oid is None:
-                    obj = obj() # get the referenced object
-                    oid = obj._p_oid
-                    if oid is None:
-                        # Here we are causing the object to be saved in
-                        # the database. One could argue that we shouldn't
-                        # do this, because a weakref should not cause an object
-                        # to be added.  We'll be optimistic, though, and
-                        # assume that the object will be added eventually.
-
-                        oid = self._jar.new_oid()
-                        obj._p_jar = self._jar
-                        obj._p_oid = oid
-                        self._stack.append(obj)
-                return [oid]
-
-
-        # Since we have an oid, we have either a persistent instance
-        # (an instance of Persistent), or a persistent class.
-
-        # NOTE! Persistent classes don't (and can't) subclass persistent.
-
-        if oid is None:
-            oid = obj._p_oid = self._jar.new_oid()
-            obj._p_jar = self._jar
-            self._stack.append(obj)
-        elif obj._p_jar is not self._jar:
-            raise InvalidObjectReference(
-                "Attempt to store an object from a foreign "
-                "database connection"
-                )
-
-        klass = type(obj)
-        if hasattr(klass, '__getnewargs__'):
-            # We don't want to save newargs in object refs.
-            # It's possible that __getnewargs__ is degenerate and
-            # returns (), but we don't want to have to deghostify
-            # the object to find out.
-            return oid
-
-        return oid, klass
-
-    def serialize(self, obj):
-        # We don't use __class__ here, because obj could be a persistent proxy.
-        # We don't want to be fooled by proxies.
-        klass = type(obj)
-
-        newargs = getattr(obj, "__getnewargs__", None)
-        if newargs is None:
-            meta = klass
-        else:
-            meta = klass, newargs()
-
-        return self._dump(meta, obj.__getstate__())
-
-    def _dump(self, classmeta, state):
-        # To reuse the existing cStringIO object, we must reset
-        # the file position to 0 and truncate the file after the
-        # new pickle is written.
-        self._file.seek(0)
-        self._p.clear_memo()
-        self._p.dump(classmeta)
-        self._p.dump(state)
-        self._file.truncate()
-        return self._file.getvalue()
-
-class ObjectWriter(BaseObjectWriter):
-
-    def __init__(self, obj):
-        BaseObjectWriter.__init__(self, obj._p_jar)
-        self._stack.append(obj)
-
-    def __iter__(self):
-        return NewObjectIterator(self._stack)
-
-class NewObjectIterator:
-
-    # The pickler is used as a forward iterator when the connection
-    # is looking for new objects to pickle.
-
-    def __init__(self, stack):
-        self._stack = stack
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        if self._stack:
-            elt = self._stack.pop()
-            return elt
-        else:
-            raise StopIteration
-
-class BaseObjectReader:
-
-    def _persistent_load(self, oid):
-        # subclasses must define _persistent_load().
-        raise NotImplementedError
-
-    def _get_class(self, module, name):
-        # subclasses must define _get_class()
-        raise NotImplementedError
-
-    def _get_unpickler(self, pickle):
-        file = cStringIO.StringIO(pickle)
-        unpickler = cPickle.Unpickler(file)
-        unpickler.persistent_load = self._persistent_load
-        return unpickler
-
-    def _new_object(self, klass, args):
-        if not args and not myhasattr(klass, "__getnewargs__"):
-            obj = klass.__new__(klass)
-        else:
-            obj = klass(*args)
-            if not isinstance(klass, type):
-                obj.__dict__.clear()
-
-        return obj
-
-    def getClassName(self, pickle):
-        unpickler = self._get_unpickler(pickle)
-        klass = unpickler.load()
-        if isinstance(klass, tuple):
-            klass, args = klass
-            if isinstance(klass, tuple):
-                # old style reference
-                return "%s.%s" % klass
-        return "%s.%s" % (klass.__module__, klass.__name__)
-
-    def getGhost(self, pickle):
-        unpickler = self._get_unpickler(pickle)
-        klass = unpickler.load()
-        if isinstance(klass, tuple):
-            # Here we have a separate class and args.
-            # This could be an old record, so the class module ne a named
-            # refernce
-            klass, args = klass
-            if isinstance(klass, tuple):
-                # Old module_name, class_name tuple
-                klass = self._get_class(*klass)
-
-            if args is None:
-                args = ()
-        else:
-            # Definitely new style direct class reference
-            args = ()
-
-        if issubclass(klass, Broken):
-            # We got a broken class. We might need to make it
-            # PersistentBroken
-            if not issubclass(klass, broken.PersistentBroken):
-                klass = broken.persistentBroken(klass)
-
-        return klass.__new__(klass, *args)
-
-    def getState(self, pickle):
-        unpickler = self._get_unpickler(pickle)
-        try:
-            unpickler.load() # skip the class metadata
-            return unpickler.load()
-        except EOFError, msg:
-            log = logging.getLogger("ZODB.serialize")
-            log.exception("Unpickling error: %r", pickle)
-            raise
-
-    def setGhostState(self, obj, pickle):
-        state = self.getState(pickle)
-        obj.__setstate__(state)
-
-
-class ExternalReference(object):
-    pass
-
-class SimpleObjectReader(BaseObjectReader):
-    """Can be used to inspect a single object pickle.
-
-    It returns an ExternalReference() object for other persistent
-    objects.  It can't instantiate the object.
-    """
-
-    ext_ref = ExternalReference()
-
-    def _persistent_load(self, oid):
-        return self.ext_ref
-
-    def _get_class(self, module, name):
-        return None
-
-class ConnectionObjectReader(BaseObjectReader):
-
-    def __init__(self, conn, cache, factory):
-        self._conn = conn
-        self._cache = cache
-        self._factory = factory
-
-    def _get_class(self, module, name):
-        return self._factory(self._conn, module, name)
-
-    def _get_unpickler(self, pickle):
-        unpickler = BaseObjectReader._get_unpickler(self, pickle)
-        factory = self._factory
-        conn = self._conn
-
-        def find_global(modulename, name):
-            return factory(conn, modulename, name)
-
-        unpickler.find_global = find_global
-
-        return unpickler
-
-    def _persistent_load(self, oid):
-        if isinstance(oid, tuple):
-            # Quick instance reference.  We know all we need to know
-            # to create the instance w/o hitting the db, so go for it!
-            oid, klass = oid
-
-            obj = self._cache.get(oid, None)
-            if obj is not None:
-                return obj
-
-            if isinstance(klass, tuple):
-                klass = self._get_class(*klass)
-
-            if issubclass(klass, Broken):
-                # We got a broken class. We might need to make it
-                # PersistentBroken
-                if not issubclass(klass, broken.PersistentBroken):
-                    klass = broken.persistentBroken(klass)
-
-            try:
-                obj = klass.__new__(klass)
-            except TypeError:
-                # Couldn't create the instance.  Maybe there's more
-                # current data in the object's actual record!
-                return self._conn.get(oid)
-
-            # TODO: should be done by connection
-            obj._p_oid = oid
-            obj._p_jar = self._conn
-            # When an object is created, it is put in the UPTODATE
-            # state.  We must explicitly deactivate it to turn it into
-            # a ghost.
-            obj._p_changed = None
-
-            self._cache[oid] = obj
-            return obj
-
-        elif isinstance(oid, list):
-            # see weakref.py
-            [oid] = oid
-            obj = WeakRef.__new__(WeakRef)
-            obj.oid = oid
-            obj.dm = self._conn
-            return obj
-
-        obj = self._cache.get(oid, None)
-        if obj is not None:
-            return obj
-        return self._conn.get(oid)
-
-def referencesf(p, rootl=None):
-
-    if rootl is None:
-        rootl = []
-
-    u = cPickle.Unpickler(cStringIO.StringIO(p))
-    l = len(rootl)
-    u.persistent_load = rootl
-    u.noload()
-    try:
-        u.noload()
-    except:
-        # Hm.  We failed to do second load.  Maybe there wasn't a
-        # second pickle.  Let's check:
-        f = cStringIO.StringIO(p)
-        u = cPickle.Unpickler(f)
-        u.persistent_load = []
-        u.noload()
-        if len(p) > f.tell():
-            raise ValueError, 'Error unpickling, %s' % p
-
-
-    # References may be:
-    #
-    # - A tuple, in which case they are an oid and class.
-    #   In this case, just extract the first element, which is
-    #   the oid
-    #
-    # - A list, which is a weak reference. We skip those.
-    #
-    # - Anything else must be an oid. This means that an oid
-    #   may not be a list or a tuple. This is a bit lame.
-    #   We could avoid this lamosity by allowing single-element
-    #   tuples, so that we wrap oids that are lists or tuples in
-    #   tuples.
-    #
-    # - oids may *not* be False.  I'm not sure why.
-
-    out = []
-    for v in rootl:
-        assert v # Let's see if we ever get empty ones
-        if type(v) is list:
-            # skip wekrefs
-            continue
-        if type(v) is tuple:
-            v = v[0]
-        out.append(v)
-
-    rootl[:] = out
-
-    return rootl
diff --git a/branches/bug1734/src/ZODB/storage.xml b/branches/bug1734/src/ZODB/storage.xml
deleted file mode 100644
index ff459ae0..00000000
--- a/branches/bug1734/src/ZODB/storage.xml
+++ /dev/null
@@ -1,4 +0,0 @@
-<schema>
-<import package="ZODB"/>
-<section type="ZODB.storage" name="*" attribute="storage"/>
-</schema>
diff --git a/branches/bug1734/src/ZODB/subtransactions.txt b/branches/bug1734/src/ZODB/subtransactions.txt
deleted file mode 100644
index 92f803d9..00000000
--- a/branches/bug1734/src/ZODB/subtransactions.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-=========================
-Subtransactions in ZODB 3
-=========================
-
-ZODB 3 provides limited support for subtransactions. Subtransactions
-are nested to *one* level. There are top-level transactions and
-subtransactions.  When a transaction is committed, a flag is passed
-indicating whether it is a subtransaction or a top-level transaction.
-Consider the following exampler commit calls:
-
-- commit()
-
-  A regular top-level transaction is committed.
-
-- commit(1)
-
-  A subtransaction is committed. There is now one subtransaction of
-  the current top-level transaction.
-
-- commit(1)
-
-  A subtransaction is committed. There are now two subtransactions of
-  the current top-level transaction.
-
-- abort(1)
-
-  A subtransaction is aborted. There are still two subtransactions of
-  the current top-level transaction; work done since the last
-  commit(1) call is discarded.
-
-- commit()
-
-  We now commit a top-level transaction. The work done in the previous
-  two subtransactions *plus* work done since the last abort(1) call
-  is saved.
-
-- commit(1)
-
-  A subtransaction is committed. There is now one subtransaction of
-  the current top-level transaction.
-
-- commit(1)
-
-  A subtransaction is committed. There are now two subtransactions of
-  the current top-level transaction.
-
-- abort()
-
-  We now abort a top-level transaction. We discard the work done in
-  the previous two subtransactions *plus* work done since the last
-  commit(1) call.
diff --git a/branches/bug1734/src/ZODB/tests/BasicStorage.py b/branches/bug1734/src/ZODB/tests/BasicStorage.py
deleted file mode 100644
index f10aa907..00000000
--- a/branches/bug1734/src/ZODB/tests/BasicStorage.py
+++ /dev/null
@@ -1,221 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Run the basic tests for a storage as described in the official storage API
-
-The most complete and most out-of-date description of the interface is:
-http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storage_Interface_Info.html
-
-All storages should be able to pass these tests.
-"""
-
-from ZODB import POSException
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase \
-     import zodb_unpickle, zodb_pickle, handle_serials
-
-import transaction
-
-ZERO = '\0'*8
-
-
-
-class BasicStorage:
-    def checkBasics(self):
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        # This should simply return
-        self._storage.tpc_begin(t)
-        # Aborting is easy
-        self._storage.tpc_abort(t)
-        # Test a few expected exceptions when we're doing operations giving a
-        # different Transaction object than the one we've begun on.
-        self._storage.tpc_begin(t)
-        self.assertRaises(
-            POSException.StorageTransactionError,
-            self._storage.store,
-            0, 0, 0, 0, transaction.Transaction())
-
-        try:
-            self._storage.abortVersion('dummy', transaction.Transaction())
-        except (POSException.StorageTransactionError,
-                POSException.VersionCommitError):
-            pass # test passed ;)
-        else:
-            assert 0, "Should have failed, invalid transaction."
-
-        try:
-            self._storage.commitVersion('dummy', 'dummer',
-                                        transaction.Transaction())
-        except (POSException.StorageTransactionError,
-                POSException.VersionCommitError):
-            pass # test passed ;)
-        else:
-            assert 0, "Should have failed, invalid transaction."
-
-        self.assertRaises(
-            POSException.StorageTransactionError,
-            self._storage.store,
-            0, 1, 2, 3, transaction.Transaction())
-        self._storage.tpc_abort(t)
-
-    def checkSerialIsNoneForInitialRevision(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        txn = transaction.Transaction()
-        self._storage.tpc_begin(txn)
-        # Use None for serial.  Don't use _dostore() here because that coerces
-        # serial=None to serial=ZERO.
-        r1 = self._storage.store(oid, None, zodb_pickle(MinPO(11)),
-                                       '', txn)
-        r2 = self._storage.tpc_vote(txn)
-        self._storage.tpc_finish(txn)
-        newrevid = handle_serials(oid, r1, r2)
-        data, revid = self._storage.load(oid, '')
-        value = zodb_unpickle(data)
-        eq(value, MinPO(11))
-        eq(revid, newrevid)
-
-    def checkNonVersionStore(self):
-        revid = ZERO
-        newrevid = self._dostore(revid=None)
-        # Finish the transaction.
-        self.assertNotEqual(newrevid, revid)
-
-    def checkNonVersionStoreAndLoad(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        self._dostore(oid=oid, data=MinPO(7))
-        data, revid = self._storage.load(oid, '')
-        value = zodb_unpickle(data)
-        eq(value, MinPO(7))
-        # Now do a bunch of updates to an object
-        for i in range(13, 22):
-            revid = self._dostore(oid, revid=revid, data=MinPO(i))
-        # Now get the latest revision of the object
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(21))
-
-    def checkNonVersionModifiedInVersion(self):
-        oid = self._storage.new_oid()
-        self._dostore(oid=oid)
-        self.assertEqual(self._storage.modifiedInVersion(oid), '')
-
-    def checkConflicts(self):
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        self._dostore(oid, revid=revid1, data=MinPO(12))
-        self.assertRaises(POSException.ConflictError,
-                          self._dostore,
-                          oid, revid=revid1, data=MinPO(13))
-
-    def checkWriteAfterAbort(self):
-        oid = self._storage.new_oid()
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
-        # Now abort this transaction
-        self._storage.tpc_abort(t)
-        # Now start all over again
-        oid = self._storage.new_oid()
-        self._dostore(oid=oid, data=MinPO(6))
-
-    def checkAbortAfterVote(self):
-        oid1 = self._storage.new_oid()
-        revid1 = self._dostore(oid=oid1, data=MinPO(-2))
-        oid = self._storage.new_oid()
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
-        # Now abort this transaction
-        self._storage.tpc_vote(t)
-        self._storage.tpc_abort(t)
-        # Now start all over again
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid=oid, data=MinPO(6))
-
-        for oid, revid in [(oid1, revid1), (oid, revid)]:
-            data, _revid = self._storage.load(oid, '')
-            self.assertEqual(revid, _revid)
-
-    def checkStoreTwoObjects(self):
-        noteq = self.assertNotEqual
-        p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        noteq(oid1, oid2)
-        revid1 = self._dostore(oid1, data=p31)
-        revid2 = self._dostore(oid2, data=p51)
-        noteq(revid1, revid2)
-        revid3 = self._dostore(oid1, revid=revid1, data=p32)
-        revid4 = self._dostore(oid2, revid=revid2, data=p52)
-        noteq(revid3, revid4)
-
-    def checkGetSerial(self):
-        if not hasattr(self._storage, 'getSerial'):
-            return
-        eq = self.assertEqual
-        p41, p42 = map(MinPO, (41, 42))
-        oid = self._storage.new_oid()
-        self.assertRaises(KeyError, self._storage.getSerial, oid)
-        # Now store a revision
-        revid1 = self._dostore(oid, data=p41)
-        eq(revid1, self._storage.getSerial(oid))
-        # And another one
-        revid2 = self._dostore(oid, revid=revid1, data=p42)
-        eq(revid2, self._storage.getSerial(oid))
-
-    def checkTwoArgBegin(self):
-        # Unsure: how standard is three-argument tpc_begin()?
-        t = transaction.Transaction()
-        tid = '\0\0\0\0\0psu'
-        self._storage.tpc_begin(t, tid)
-        oid = self._storage.new_oid()
-        data = zodb_pickle(MinPO(8))
-        self._storage.store(oid, None, data, '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-    def checkLen(self):
-        # len(storage) reports the number of objects.
-        # check it is zero when empty
-        self.assertEqual(len(self._storage),0)
-        # check it is correct when the storage contains two object.
-        # len may also be zero, for storages that do not keep track
-        # of this number
-        self._dostore(data=MinPO(22))
-        self._dostore(data=MinPO(23))
-        self.assert_(len(self._storage) in [0,2])
-
-    def checkGetSize(self):
-        self._dostore(data=MinPO(25))
-        size = self._storage.getSize()
-        # The storage API doesn't make any claims about what size
-        # means except that it ought to be printable.
-        str(size)
-
-    def checkNote(self):
-        oid = self._storage.new_oid()
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        t.note('this is a test')
-        self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-    def checkGetExtensionMethods(self):
-        m = self._storage.getExtensionMethods()
-        self.assertEqual(type(m),type({}))
-        for k,v in m.items():
-            self.assertEqual(v,None)
-            self.assert_(callable(getattr(self._storage,k)))
diff --git a/branches/bug1734/src/ZODB/tests/ConflictResolution.py b/branches/bug1734/src/ZODB/tests/ConflictResolution.py
deleted file mode 100644
index 145d95a6..00000000
--- a/branches/bug1734/src/ZODB/tests/ConflictResolution.py
+++ /dev/null
@@ -1,183 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for application-level conflict resolution."""
-
-from ZODB.POSException import ConflictError, UndoError
-from persistent import Persistent
-from transaction import Transaction
-
-from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
-
-class PCounter(Persistent):
-
-    _value = 0
-
-    def __repr__(self):
-        return "<PCounter %d>" % self._value
-
-    def inc(self):
-        self._value = self._value + 1
-
-    def _p_resolveConflict(self, oldState, savedState, newState):
-        savedDiff = savedState['_value'] - oldState['_value']
-        newDiff = newState['_value'] - oldState['_value']
-
-        oldState['_value'] = oldState['_value'] + savedDiff + newDiff
-
-        return oldState
-
-    # Insecurity:  What if _p_resolveConflict _thinks_ it resolved the
-    # conflict, but did something wrong?
-
-class PCounter2(PCounter):
-
-    def _p_resolveConflict(self, oldState, savedState, newState):
-        raise ConflictError
-
-class PCounter3(PCounter):
-    def _p_resolveConflict(self, oldState, savedState, newState):
-        raise AttributeError, "no attribute (testing conflict resolution)"
-
-class PCounter4(PCounter):
-    def _p_resolveConflict(self, oldState, savedState):
-        raise RuntimeError, "Can't get here; not enough args"
-
-class ConflictResolvingStorage:
-
-    def checkResolve(self):
-        obj = PCounter()
-        obj.inc()
-
-        oid = self._storage.new_oid()
-
-        revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
-
-        obj.inc()
-        obj.inc()
-        # The effect of committing two transactions with the same
-        # pickle is to commit two different transactions relative to
-        # revid1 that add two to _value.
-        revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-
-        data, serialno = self._storage.load(oid, '')
-        inst = zodb_unpickle(data)
-        self.assertEqual(inst._value, 5)
-
-    def checkUnresolvable(self):
-        obj = PCounter2()
-        obj.inc()
-
-        oid = self._storage.new_oid()
-
-        revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
-
-        obj.inc()
-        obj.inc()
-        # The effect of committing two transactions with the same
-        # pickle is to commit two different transactions relative to
-        # revid1 that add two to _value.
-        revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        try:
-            self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        except ConflictError, err:
-            self.assert_("PCounter2" in str(err))
-        else:
-            self.fail("Expected ConflictError")
-
-    def checkZClassesArentResolved(self):
-        from ZODB.ConflictResolution import find_global, BadClassName
-        dummy_class_tuple = ('*foobar', ())
-        self.assertRaises(BadClassName, find_global, '*foobar', ())
-
-    def checkBuggyResolve1(self):
-        obj = PCounter3()
-        obj.inc()
-
-        oid = self._storage.new_oid()
-
-        revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
-
-        obj.inc()
-        obj.inc()
-        # The effect of committing two transactions with the same
-        # pickle is to commit two different transactions relative to
-        # revid1 that add two to _value.
-        revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        self.assertRaises(ConflictError,
-                          self._dostoreNP,
-                          oid, revid=revid1, data=zodb_pickle(obj))
-
-    def checkBuggyResolve2(self):
-        obj = PCounter4()
-        obj.inc()
-
-        oid = self._storage.new_oid()
-
-        revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
-
-        obj.inc()
-        obj.inc()
-        # The effect of committing two transactions with the same
-        # pickle is to commit two different transactions relative to
-        # revid1 that add two to _value.
-        revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        self.assertRaises(ConflictError,
-                          self._dostoreNP,
-                          oid, revid=revid1, data=zodb_pickle(obj))
-
-class ConflictResolvingTransUndoStorage:
-
-    def checkUndoConflictResolution(self):
-        # This test is based on checkNotUndoable in the
-        # TransactionalUndoStorage test suite.  Except here, conflict
-        # resolution should allow us to undo the transaction anyway.
-
-        obj = PCounter()
-        obj.inc()
-        oid = self._storage.new_oid()
-        revid_a = self._dostore(oid, data=obj)
-        obj.inc()
-        revid_b = self._dostore(oid, revid=revid_a, data=obj)
-        obj.inc()
-        revid_c = self._dostore(oid, revid=revid_b, data=obj)
-        # Start the undo
-        info = self._storage.undoInfo()
-        tid = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.undo(tid, t)
-        self._storage.tpc_finish(t)
-
-    def checkUndoUnresolvable(self):
-        # This test is based on checkNotUndoable in the
-        # TransactionalUndoStorage test suite.  Except here, conflict
-        # resolution should allow us to undo the transaction anyway.
-
-        obj = PCounter2()
-        obj.inc()
-        oid = self._storage.new_oid()
-        revid_a = self._dostore(oid, data=obj)
-        obj.inc()
-        revid_b = self._dostore(oid, revid=revid_a, data=obj)
-        obj.inc()
-        revid_c = self._dostore(oid, revid=revid_b, data=obj)
-        # Start the undo
-        info = self._storage.undoInfo()
-        tid = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self.assertRaises(UndoError, self._storage.undo,
-                          tid, t)
-        self._storage.tpc_abort(t)
diff --git a/branches/bug1734/src/ZODB/tests/Corruption.py b/branches/bug1734/src/ZODB/tests/Corruption.py
deleted file mode 100644
index 27fede11..00000000
--- a/branches/bug1734/src/ZODB/tests/Corruption.py
+++ /dev/null
@@ -1,79 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Do some minimal tests of data corruption"""
-
-import os
-import random
-import stat
-import tempfile
-
-import ZODB, ZODB.FileStorage
-from StorageTestBase import StorageTestBase
-
-class FileStorageCorruptTests(StorageTestBase):
-
-    def setUp(self):
-        self.path = tempfile.mktemp()
-        self._storage = ZODB.FileStorage.FileStorage(self.path, create=1)
-
-    def tearDown(self):
-        self._storage.close()
-        self._storage.cleanup()
-
-    def _do_stores(self):
-        oids = []
-        for i in range(5):
-            oid = self._storage.new_oid()
-            revid = self._dostore(oid)
-            oids.append((oid, revid))
-        return oids
-
-    def _check_stores(self, oids):
-        for oid, revid in oids:
-            data, s_revid = self._storage.load(oid, '')
-            self.assertEqual(s_revid, revid)
-
-    def checkTruncatedIndex(self):
-        oids = self._do_stores()
-        self._close()
-
-        # truncation the index file
-        path = self.path + '.index'
-        self.failUnless(os.path.exists(path))
-        f = open(path, 'r+')
-        f.seek(0, 2)
-        size = f.tell()
-        f.seek(size / 2)
-        f.truncate()
-        f.close()
-
-        self._storage = ZODB.FileStorage.FileStorage(self.path)
-        self._check_stores(oids)
-
-    def checkCorruptedIndex(self):
-        oids = self._do_stores()
-        self._close()
-
-        # truncation the index file
-        path = self.path + '.index'
-        self.failUnless(os.path.exists(path))
-        size = os.stat(path)[stat.ST_SIZE]
-        f = open(path, 'r+')
-        while f.tell() < size:
-            f.seek(random.randrange(1, size / 10), 1)
-            f.write('\000')
-        f.close()
-
-        self._storage = ZODB.FileStorage.FileStorage(self.path)
-        self._check_stores(oids)
diff --git a/branches/bug1734/src/ZODB/tests/HistoryStorage.py b/branches/bug1734/src/ZODB/tests/HistoryStorage.py
deleted file mode 100644
index 9d627edd..00000000
--- a/branches/bug1734/src/ZODB/tests/HistoryStorage.py
+++ /dev/null
@@ -1,230 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Run the history() related tests for a storage.
-
-Any storage that supports the history() method should be able to pass
-all these tests.
-"""
-
-from ZODB.tests.MinPO import MinPO
-from transaction import Transaction
-
-class HistoryStorage:
-    def checkSimpleHistory(self):
-        eq = self.assertEqual
-        # Store a couple of non-version revisions of the object
-        oid = self._storage.new_oid()
-        self.assertRaises(KeyError,self._storage.history,oid)
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        # Now get various snapshots of the object's history
-        h = self._storage.history(oid, size=1)
-        eq(len(h), 1)
-        d = h[0]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        # Try to get 2 historical revisions
-        h = self._storage.history(oid, size=2)
-        eq(len(h), 2)
-        d = h[0]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[1]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        # Try to get all 3 historical revisions
-        h = self._storage.history(oid, size=3)
-        eq(len(h), 3)
-        d = h[0]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[1]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        d = h[2]
-        eq(d['tid'], revid1)
-        eq(d['version'], '')
-        # There should be no more than 3 revisions
-        h = self._storage.history(oid, size=4)
-        eq(len(h), 3)
-        d = h[0]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[1]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        d = h[2]
-        eq(d['tid'], revid1)
-        eq(d['version'], '')
-
-    def checkVersionHistory(self):
-        if not self._storage.supportsVersions():
-            return
-        eq = self.assertEqual
-        # Store a couple of non-version revisions
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        # Now store some new revisions in a version
-        version = 'test-version'
-        revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
-                               version=version)
-        revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
-                               version=version)
-        revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
-                               version=version)
-        # Now, try to get the six historical revisions (first three are in
-        # 'test-version', followed by the non-version revisions).
-        h = self._storage.history(oid, version, 100)
-        eq(len(h), 6)
-        d = h[0]
-        eq(d['tid'], revid6)
-        eq(d['version'], version)
-        d = h[1]
-        eq(d['tid'], revid5)
-        eq(d['version'], version)
-        d = h[2]
-        eq(d['tid'], revid4)
-        eq(d['version'], version)
-        d = h[3]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[4]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        d = h[5]
-        eq(d['tid'], revid1)
-        eq(d['version'], '')
-
-    def checkHistoryAfterVersionCommit(self):
-        if not self._storage.supportsVersions():
-            return
-        eq = self.assertEqual
-        # Store a couple of non-version revisions
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        # Now store some new revisions in a version
-        version = 'test-version'
-        revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
-                               version=version)
-        revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
-                               version=version)
-        revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
-                               version=version)
-        # Now commit the version
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion(version, '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # After consultation with Jim, we agreed that the semantics of
-        # revision id's after a version commit is that the committed object
-        # gets a new serial number (a.k.a. revision id).  Note that
-        # FileStorage is broken here; the serial number in the post-commit
-        # non-version revision will be the same as the serial number of the
-        # previous in-version revision.
-        #
-        # BAW: Using load() is the only way to get the serial number of the
-        # current revision of the object.  But at least this works for both
-        # broken and working storages.
-        ign, revid7 = self._storage.load(oid, '')
-        # Now, try to get the six historical revisions (first three are in
-        # 'test-version', followed by the non-version revisions).
-        h = self._storage.history(oid, version, 100)
-        eq(len(h), 7)
-        d = h[0]
-        eq(d['tid'], revid7)
-        eq(d['version'], '')
-        d = h[1]
-        eq(d['tid'], revid6)
-        eq(d['version'], version)
-        d = h[2]
-        eq(d['tid'], revid5)
-        eq(d['version'], version)
-        d = h[3]
-        eq(d['tid'], revid4)
-        eq(d['version'], version)
-        d = h[4]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[5]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        d = h[6]
-        eq(d['tid'], revid1)
-        eq(d['version'], '')
-
-    def checkHistoryAfterVersionAbort(self):
-        if not self._storage.supportsVersions():
-            return
-        eq = self.assertEqual
-        # Store a couple of non-version revisions
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        # Now store some new revisions in a version
-        version = 'test-version'
-        revid4 = self._dostore(oid, revid=revid3, data=MinPO(14),
-                               version=version)
-        revid5 = self._dostore(oid, revid=revid4, data=MinPO(15),
-                               version=version)
-        revid6 = self._dostore(oid, revid=revid5, data=MinPO(16),
-                               version=version)
-        # Now commit the version
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.abortVersion(version, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # After consultation with Jim, we agreed that the semantics of
-        # revision id's after a version commit is that the committed object
-        # gets a new serial number (a.k.a. revision id).  Note that
-        # FileStorage is broken here; the serial number in the post-commit
-        # non-version revision will be the same as the serial number of the
-        # previous in-version revision.
-        #
-        # BAW: Using load() is the only way to get the serial number of the
-        # current revision of the object.  But at least this works for both
-        # broken and working storages.
-        ign, revid7 = self._storage.load(oid, '')
-        # Now, try to get the six historical revisions (first three are in
-        # 'test-version', followed by the non-version revisions).
-        h = self._storage.history(oid, version, 100)
-        eq(len(h), 7)
-        d = h[0]
-        eq(d['tid'], revid7)
-        eq(d['version'], '')
-        d = h[1]
-        eq(d['tid'], revid6)
-        eq(d['version'], version)
-        d = h[2]
-        eq(d['tid'], revid5)
-        eq(d['version'], version)
-        d = h[3]
-        eq(d['tid'], revid4)
-        eq(d['version'], version)
-        d = h[4]
-        eq(d['tid'], revid3)
-        eq(d['version'], '')
-        d = h[5]
-        eq(d['tid'], revid2)
-        eq(d['version'], '')
-        d = h[6]
-        eq(d['tid'], revid1)
-        eq(d['version'], '')
diff --git a/branches/bug1734/src/ZODB/tests/IteratorStorage.py b/branches/bug1734/src/ZODB/tests/IteratorStorage.py
deleted file mode 100644
index e2fa4b6b..00000000
--- a/branches/bug1734/src/ZODB/tests/IteratorStorage.py
+++ /dev/null
@@ -1,233 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Run tests against the iterator() interface for storages.
-
-Any storage that supports the iterator() method should be able to pass
-all these tests.
-"""
-
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
-from ZODB.utils import U64, p64
-
-from transaction import Transaction
-
-class IteratorCompare:
-
-    def iter_verify(self, txniter, revids, val0):
-        eq = self.assertEqual
-        oid = self._oid
-        val = val0
-        for reciter, revid in zip(txniter, revids + [None]):
-            eq(reciter.tid, revid)
-            for rec in reciter:
-                eq(rec.oid, oid)
-                eq(rec.tid, revid)
-                eq(rec.version, '')
-                eq(zodb_unpickle(rec.data), MinPO(val))
-                val = val + 1
-        eq(val, val0 + len(revids))
-        txniter.close()
-
-class IteratorStorage(IteratorCompare):
-
-    def checkSimpleIteration(self):
-        # Store a bunch of revisions of a single object
-        self._oid = oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        # Now iterate over all the transactions and compare carefully
-        txniter = self._storage.iterator()
-        self.iter_verify(txniter, [revid1, revid2, revid3], 11)
-
-    def checkClose(self):
-        self._oid = oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        txniter = self._storage.iterator()
-        txniter.close()
-        self.assertRaises(IOError, txniter.__getitem__, 0)
-
-    def checkVersionIterator(self):
-        if not self._storage.supportsVersions():
-            return
-        self._dostore()
-        self._dostore(version='abort')
-        self._dostore()
-        self._dostore(version='abort')
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.abortVersion('abort', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        self._dostore(version='commit')
-        self._dostore()
-        self._dostore(version='commit')
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion('commit', '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        txniter = self._storage.iterator()
-        for trans in txniter:
-            for data in trans:
-                pass
-
-    def checkUndoZombieNonVersion(self):
-        if not hasattr(self._storage, 'supportsTransactionalUndo'):
-            return
-        if not self._storage.supportsTransactionalUndo():
-            return
-
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(94))
-        # Get the undo information
-        info = self._storage.undoInfo()
-        tid = info[0]['id']
-        # Undo the creation of the object, rendering it a zombie
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # Now attempt to iterator over the storage
-        iter = self._storage.iterator()
-        for txn in iter:
-            for rec in txn:
-                pass
-
-        # The last transaction performed an undo of the transaction that
-        # created object oid.  (As Barry points out, the object is now in the
-        # George Bailey state.)  Assert that the final data record contains
-        # None in the data attribute.
-        self.assertEqual(rec.oid, oid)
-        self.assertEqual(rec.data, None)
-
-    def checkTransactionExtensionFromIterator(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(1))
-        iter = self._storage.iterator()
-        count = 0
-        for txn in iter:
-            self.assertEqual(txn._extension, {})
-            count +=1
-        self.assertEqual(count, 1)
-
-    def checkIterationIntraTransaction(self):
-        # TODO:  Try this test with logging enabled.  If you see something
-        # like
-        #
-        # ZODB FS FS21 warn: FileStorageTests.fs truncated, possibly due to
-        # damaged records at 4
-        #
-        # Then the code in FileIterator.next() hasn't yet been fixed.
-        # Should automate that check.
-        oid = self._storage.new_oid()
-        t = Transaction()
-        data = zodb_pickle(MinPO(0))
-        try:
-            self._storage.tpc_begin(t)
-            self._storage.store(oid, '\0'*8, data, '', t)
-            self._storage.tpc_vote(t)
-            # Don't do tpc_finish yet
-            it = self._storage.iterator()
-            for x in it:
-                pass
-        finally:
-            self._storage.tpc_finish(t)
-
-    def checkLoadEx(self):
-        oid = self._storage.new_oid()
-        self._dostore(oid, data=42)
-        data, tid, ver = self._storage.loadEx(oid, "")
-        self.assertEqual(zodb_unpickle(data), MinPO(42))
-        match = False
-        for txn in self._storage.iterator():
-            for rec in txn:
-                if rec.oid == oid and rec.tid == tid:
-                    self.assertEqual(txn.tid, tid)
-                    match = True
-        if not match:
-            self.fail("Could not find transaction with matching id")
-
-
-class ExtendedIteratorStorage(IteratorCompare):
-
-    def checkExtendedIteration(self):
-        # Store a bunch of revisions of a single object
-        self._oid = oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(13))
-        revid4 = self._dostore(oid, revid=revid3, data=MinPO(14))
-        # Note that the end points are included
-        # Iterate over all of the transactions with explicit start/stop
-        txniter = self._storage.iterator(revid1, revid4)
-        self.iter_verify(txniter, [revid1, revid2, revid3, revid4], 11)
-        # Iterate over some of the transactions with explicit start
-        txniter = self._storage.iterator(revid3)
-        self.iter_verify(txniter, [revid3, revid4], 13)
-        # Iterate over some of the transactions with explicit stop
-        txniter = self._storage.iterator(None, revid2)
-        self.iter_verify(txniter, [revid1, revid2], 11)
-        # Iterate over some of the transactions with explicit start+stop
-        txniter = self._storage.iterator(revid2, revid3)
-        self.iter_verify(txniter, [revid2, revid3], 12)
-        # Specify an upper bound somewhere in between values
-        revid3a = p64((U64(revid3) + U64(revid4)) / 2)
-        txniter = self._storage.iterator(revid2, revid3a)
-        self.iter_verify(txniter, [revid2, revid3], 12)
-        # Specify a lower bound somewhere in between values.
-        # revid2 == revid1+1 is very likely on Windows.  Adding 1 before
-        # dividing ensures that "the midpoint" we compute is strictly larger
-        # than revid1.
-        revid1a = p64((U64(revid1) + 1 + U64(revid2)) / 2)
-        assert revid1 < revid1a
-        txniter = self._storage.iterator(revid1a, revid3a)
-        self.iter_verify(txniter, [revid2, revid3], 12)
-        # Specify an empty range
-        txniter = self._storage.iterator(revid3, revid2)
-        self.iter_verify(txniter, [], 13)
-        # Specify a singleton range
-        txniter = self._storage.iterator(revid3, revid3)
-        self.iter_verify(txniter, [revid3], 13)
-
-class IteratorDeepCompare:
-    def compare(self, storage1, storage2):
-        eq = self.assertEqual
-        iter1 = storage1.iterator()
-        iter2 = storage2.iterator()
-        for txn1, txn2 in zip(iter1, iter2):
-            eq(txn1.tid,         txn2.tid)
-            eq(txn1.status,      txn2.status)
-            eq(txn1.user,        txn2.user)
-            eq(txn1.description, txn2.description)
-            eq(txn1._extension,  txn2._extension)
-            for rec1, rec2 in zip(txn1, txn2):
-                eq(rec1.oid,     rec2.oid)
-                eq(rec1.tid,  rec2.tid)
-                eq(rec1.version, rec2.version)
-                eq(rec1.data,    rec2.data)
-            # Make sure there are no more records left in rec1 and rec2,
-            # meaning they were the same length.
-            self.assertRaises(IndexError, txn1.next)
-            self.assertRaises(IndexError, txn2.next)
-        # Make sure ther are no more records left in txn1 and txn2, meaning
-        # they were the same length
-        self.assertRaises(IndexError, iter1.next)
-        self.assertRaises(IndexError, iter2.next)
-        iter1.close()
-        iter2.close()
diff --git a/branches/bug1734/src/ZODB/tests/LocalStorage.py b/branches/bug1734/src/ZODB/tests/LocalStorage.py
deleted file mode 100644
index 18d2057b..00000000
--- a/branches/bug1734/src/ZODB/tests/LocalStorage.py
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-class LocalStorage:
-    """A single test that only make sense for local storages.
-
-    A local storage is one that doens't use ZEO. The __len__()
-    implementation for ZEO is inexact.
-    """
-    def checkLen(self):
-        eq = self.assertEqual
-        # The length of the database ought to grow by one each time
-        eq(len(self._storage), 0)
-        self._dostore()
-        eq(len(self._storage), 1)
-        self._dostore()
-        eq(len(self._storage), 2)
diff --git a/branches/bug1734/src/ZODB/tests/MTStorage.py b/branches/bug1734/src/ZODB/tests/MTStorage.py
deleted file mode 100644
index d887e7f9..00000000
--- a/branches/bug1734/src/ZODB/tests/MTStorage.py
+++ /dev/null
@@ -1,226 +0,0 @@
-import random
-import sys
-import threading
-import time
-
-from persistent.mapping import PersistentMapping
-import transaction
-
-import ZODB
-from ZODB.tests.StorageTestBase \
-     import zodb_pickle, zodb_unpickle, handle_serials
-from ZODB.tests.MinPO import MinPO
-from ZODB.POSException import ConflictError
-
-SHORT_DELAY = 0.01
-
-def sort(l):
-    "Sort a list in place and return it."
-    l.sort()
-    return l
-
-class TestThread(threading.Thread):
-    """Base class for defining threads that run from unittest.
-
-    If the thread exits with an uncaught exception, catch it and
-    re-raise it when the thread is joined.  The re-raise will cause
-    the test to fail.
-
-    The subclass should define a runtest() method instead of a run()
-    method.
-    """
-
-    def __init__(self):
-        threading.Thread.__init__(self)
-        self._exc_info = None
-
-    def run(self):
-        try:
-            self.runtest()
-        except:
-            self._exc_info = sys.exc_info()
-
-    def join(self, timeout=None):
-        threading.Thread.join(self, timeout)
-        if self._exc_info:
-            raise self._exc_info[0], self._exc_info[1], self._exc_info[2]
-
-class ZODBClientThread(TestThread):
-
-    __super_init = TestThread.__init__
-
-    def __init__(self, db, test, commits=10, delay=SHORT_DELAY):
-        self.__super_init()
-        self.setDaemon(1)
-        self.db = db
-        self.test = test
-        self.commits = commits
-        self.delay = delay
-
-    def runtest(self):
-        conn = self.db.open()
-        conn.sync()
-        root = conn.root()
-        d = self.get_thread_dict(root)
-        if d is None:
-            self.test.fail()
-        else:
-            for i in range(self.commits):
-                self.commit(d, i)
-        self.test.assertEqual(sort(d.keys()), range(self.commits))
-
-    def commit(self, d, num):
-        d[num] = time.time()
-        time.sleep(self.delay)
-        transaction.commit()
-        time.sleep(self.delay)
-
-    def get_thread_dict(self, root):
-        name = self.getName()
-        # arbitrarily limit to 10 re-tries
-        for i in range(10):
-            try:
-                m = PersistentMapping()
-                root[name] = m
-                transaction.commit()
-                break
-            except ConflictError, err:
-                transaction.abort()
-                root._p_jar.sync()
-        for i in range(10):
-            try:
-                return root.get(name)
-            except ConflictError:
-                transaction.abort()
-
-class StorageClientThread(TestThread):
-
-    __super_init = TestThread.__init__
-
-    def __init__(self, storage, test, commits=10, delay=SHORT_DELAY):
-        self.__super_init()
-        self.storage = storage
-        self.test = test
-        self.commits = commits
-        self.delay = delay
-        self.oids = {}
-
-    def runtest(self):
-        for i in range(self.commits):
-            self.dostore(i)
-        self.check()
-
-    def check(self):
-        for oid, revid in self.oids.items():
-            data, serial = self.storage.load(oid, '')
-            self.test.assertEqual(serial, revid)
-            obj = zodb_unpickle(data)
-            self.test.assertEqual(obj.value[0], self.getName())
-
-    def pause(self):
-        time.sleep(self.delay)
-
-    def oid(self):
-        oid = self.storage.new_oid()
-        self.oids[oid] = None
-        return oid
-
-    def dostore(self, i):
-        data = zodb_pickle(MinPO((self.getName(), i)))
-        t = transaction.Transaction()
-        oid = self.oid()
-        self.pause()
-
-        self.storage.tpc_begin(t)
-        self.pause()
-
-        # Always create a new object, signified by None for revid
-        r1 = self.storage.store(oid, None, data, '', t)
-        self.pause()
-
-        r2 = self.storage.tpc_vote(t)
-        self.pause()
-
-        self.storage.tpc_finish(t)
-        self.pause()
-
-        revid = handle_serials(oid, r1, r2)
-        self.oids[oid] = revid
-
-class ExtStorageClientThread(StorageClientThread):
-
-    def runtest(self):
-        # pick some other storage ops to execute, depending in part
-        # on the features provided by the storage.
-        names = ["do_load", "do_modifiedInVersion"]
-        if self.storage.supportsUndo():
-            names += ["do_loadSerial", "do_undoLog", "do_iterator"]
-        ops = [getattr(self, meth) for meth in names]
-        assert ops, "Didn't find an storage ops in %s" % self.storage
-        # do a store to guarantee there's at least one oid in self.oids
-        self.dostore(0)
-
-        for i in range(self.commits - 1):
-            meth = random.choice(ops)
-            meth()
-            self.dostore(i)
-        self.check()
-
-    def pick_oid(self):
-        return random.choice(self.oids.keys())
-
-    def do_load(self):
-        oid = self.pick_oid()
-        self.storage.load(oid, '')
-
-    def do_loadSerial(self):
-        oid = self.pick_oid()
-        self.storage.loadSerial(oid, self.oids[oid])
-
-    def do_modifiedInVersion(self):
-        oid = self.pick_oid()
-        self.storage.modifiedInVersion(oid)
-
-    def do_undoLog(self):
-        self.storage.undoLog(0, -20)
-
-    def do_iterator(self):
-        try:
-            iter = self.storage.iterator()
-        except AttributeError:
-            # It's hard to detect that a ZEO ClientStorage
-            # doesn't have this method, but does have all the others.
-            return
-        for obj in iter:
-            pass
-
-class MTStorage:
-    "Test a storage with multiple client threads executing concurrently."
-
-    def _checkNThreads(self, n, constructor, *args):
-        threads = [constructor(*args) for i in range(n)]
-        for t in threads:
-            t.start()
-        for t in threads:
-            t.join(60)
-        for t in threads:
-            self.failIf(t.isAlive(), "thread failed to finish in 60 seconds")
-
-    def check2ZODBThreads(self):
-        db = ZODB.DB(self._storage)
-        self._checkNThreads(2, ZODBClientThread, db, self)
-        db.close()
-
-    def check7ZODBThreads(self):
-        db = ZODB.DB(self._storage)
-        self._checkNThreads(7, ZODBClientThread, db, self)
-        db.close()
-
-    def check2StorageThreads(self):
-        self._checkNThreads(2, StorageClientThread, self._storage, self)
-
-    def check7StorageThreads(self):
-        self._checkNThreads(7, StorageClientThread, self._storage, self)
-
-    def check4ExtStorageThread(self):
-        self._checkNThreads(4, ExtStorageClientThread, self._storage, self)
diff --git a/branches/bug1734/src/ZODB/tests/MinPO.py b/branches/bug1734/src/ZODB/tests/MinPO.py
deleted file mode 100644
index b71bdbfc..00000000
--- a/branches/bug1734/src/ZODB/tests/MinPO.py
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""A minimal persistent object to use for tests"""
-
-from persistent import Persistent
-
-class MinPO(Persistent):
-    def __init__(self, value=None):
-        self.value = value
-
-    def __cmp__(self, aMinPO):
-        return cmp(self.value, aMinPO.value)
-
-    def __repr__(self):
-        return "MinPO(%s)" % self.value
diff --git a/branches/bug1734/src/ZODB/tests/PackableStorage.py b/branches/bug1734/src/ZODB/tests/PackableStorage.py
deleted file mode 100644
index 31f20ffd..00000000
--- a/branches/bug1734/src/ZODB/tests/PackableStorage.py
+++ /dev/null
@@ -1,700 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Run some tests relevant for storages that support pack()."""
-
-try:
-    import cPickle
-    pickle = cPickle
-    #import cPickle as pickle
-except ImportError:
-    import pickle
-
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-import time
-
-from persistent import Persistent
-from persistent.mapping import PersistentMapping
-import transaction
-from ZODB import DB
-from ZODB.serialize import referencesf
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import snooze
-from ZODB.POSException import ConflictError, StorageError
-
-from ZODB.tests.MTStorage import TestThread
-
-ZERO = '\0'*8
-
-
-# This class is for the root object.  It must not contain a getoid() method
-# (really, attribute).  The persistent pickling machinery -- in the dumps()
-# function below -- will pickle Root objects as normal, but any attributes
-# which reference persistent Object instances will get pickled as persistent
-# ids, not as the object's state.  This makes the referencesf stuff work,
-# because it pickle sniffs for persistent ids (so we have to get those
-# persistent ids into the root object's pickle).
-class Root:
-    pass
-
-
-# This is the persistent Object class.  Because it has a getoid() method, the
-# persistent pickling machinery -- in the dumps() function below -- will
-# pickle the oid string instead of the object's actual state.  Yee haw, this
-# stuff is deep. ;)
-class Object:
-    def __init__(self, oid):
-        self._oid = oid
-
-    def getoid(self):
-        return self._oid
-
-
-class C(Persistent):
-    pass
-
-# Here's where all the magic occurs.  Sadly, the pickle module is a bit
-# underdocumented, but here's what happens: by setting the persistent_id
-# attribute to getpersid() on the pickler, that function gets called for every
-# object being pickled.  By returning None when the object has no getoid
-# attribute, it signals pickle to serialize the object as normal.  That's how
-# the Root instance gets pickled correctly.  But, if the object has a getoid
-# attribute, then by returning that method's value, we tell pickle to
-# serialize the persistent id of the object instead of the object's state.
-# That sets the pickle up for proper sniffing by the referencesf machinery.
-# Fun, huh?
-def dumps(obj):
-    def getpersid(obj):
-        if hasattr(obj, 'getoid'):
-            return obj.getoid()
-        return None
-    s = StringIO()
-    p = pickle.Pickler(s)
-    p.persistent_id = getpersid
-    p.dump(obj)
-    return s.getvalue()
-
-
-
-class PackableStorageBase:
-    # We keep a cache of object ids to instances so that the unpickler can
-    # easily return any persistent object.
-    _cache = {}
-
-    def _newobj(self):
-        # This is a convenience method to create a new persistent Object
-        # instance.  It asks the storage for a new object id, creates the
-        # instance with the given oid, populates the cache and returns the
-        # object.
-        oid = self._storage.new_oid()
-        obj = Object(oid)
-        self._cache[obj.getoid()] = obj
-        return obj
-
-    def _makeloader(self):
-        # This is the other side of the persistent pickling magic.  We need a
-        # custom unpickler to mirror our custom pickler above.  By setting the
-        # persistent_load function of the unpickler to self._cache.get(),
-        # whenever a persistent id is unpickled, it will actually return the
-        # Object instance out of the cache.  As far as returning a function
-        # with an argument bound to an instance attribute method, we do it
-        # this way because it makes the code in the tests more succinct.
-        #
-        # BUT!  Be careful in your use of loads() vs. pickle.loads().  loads()
-        # should only be used on the Root object's pickle since it's the only
-        # special one.  All the Object instances should use pickle.loads().
-        def loads(str, persfunc=self._cache.get):
-            fp = StringIO(str)
-            u = pickle.Unpickler(fp)
-            u.persistent_load = persfunc
-            return u.load()
-        return loads
-
-    def _initroot(self):
-        try:
-            self._storage.load(ZERO, '')
-        except KeyError:
-            from transaction import Transaction
-            file = StringIO()
-            p = cPickle.Pickler(file, 1)
-            p.dump((PersistentMapping, None))
-            p.dump({'_container': {}})
-            t=Transaction()
-            t.description='initial database creation'
-            self._storage.tpc_begin(t)
-            self._storage.store(ZERO, None, file.getvalue(), '', t)
-            self._storage.tpc_vote(t)
-            self._storage.tpc_finish(t)
-
-class PackableStorage(PackableStorageBase):
-
-    def checkPackEmptyStorage(self):
-        self._storage.pack(time.time(), referencesf)
-
-    def checkPackTomorrow(self):
-        self._initroot()
-        self._storage.pack(time.time() + 10000, referencesf)
-
-    def checkPackYesterday(self):
-        self._initroot()
-        self._storage.pack(time.time() - 10000, referencesf)
-
-    def _PackWhileWriting(self, pack_now):
-        # A storage should allow some reading and writing during
-        # a pack.  This test attempts to exercise locking code
-        # in the storage to test that it is safe.  It generates
-        # a lot of revisions, so that pack takes a long time.
-
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        for i in range(10):
-            root[i] = MinPO(i)
-        transaction.commit()
-
-        snooze()
-        packt = time.time()
-
-        choices = range(10)
-        for dummy in choices:
-            for i in choices:
-                root[i].value = MinPO(i)
-                transaction.commit()
-
-        # How many client threads should we run, and how long should we
-        # wait for them to finish?  Hard to say.  Running 4 threads and
-        # waiting 30 seconds too often left a thread still alive on Tim's
-        # Win98SE box, during ZEO flavors of this test.  Those tend to
-        # run one thread at a time to completion, and take about 10 seconds
-        # per thread.  There doesn't appear to be a compelling reason to
-        # run that many threads.  Running 3 threads and waiting up to a
-        # minute seems to work well in practice.  The ZEO tests normally
-        # finish faster than that, and the non-ZEO tests very much faster
-        # than that.
-        NUM_LOOP_TRIP = 50
-        timer = ElapsedTimer(time.time())
-        threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
-                   for i in range(3)]
-        for t in threads:
-            t.start()
-
-        if pack_now:
-            db.pack(time.time())
-        else:
-            db.pack(packt)
-
-        for t in threads:
-            t.join(60)
-        liveness = [t.isAlive() for t in threads]
-        if True in liveness:
-            # They should have finished by now.
-            print 'Liveness:', liveness
-            # Combine the outcomes, and sort by start time.
-            outcomes = []
-            for t in threads:
-                outcomes.extend(t.outcomes)
-            # each outcome list has as many of these as a loop trip got thru:
-            #     thread_id
-            #     elapsed millis at loop top
-            #     elapsed millis at attempt to assign to self.root[index]
-            #     index into self.root getting replaced
-            #     elapsed millis when outcome known
-            #     'OK' or 'Conflict'
-            #     True if we got beyond this line, False if it raised an
-            #         exception (one possible Conflict cause):
-            #             self.root[index].value = MinPO(j)
-            def cmp_by_time(a, b):
-                return cmp((a[1], a[0]), (b[1], b[0]))
-            outcomes.sort(cmp_by_time)
-            counts = [0] * 4
-            for outcome in outcomes:
-                n = len(outcome)
-                assert n >= 2
-                tid = outcome[0]
-                print 'tid:%d top:%5d' % (tid, outcome[1]),
-                if n > 2:
-                    print 'commit:%5d' % outcome[2],
-                    if n > 3:
-                        print 'index:%2d' % outcome[3],
-                        if n > 4:
-                            print 'known:%5d' % outcome[4],
-                            if n > 5:
-                                print '%8s' % outcome[5],
-                                if n > 6:
-                                    print 'assigned:%5s' % outcome[6],
-                counts[tid] += 1
-                if counts[tid] == NUM_LOOP_TRIP:
-                    print 'thread %d done' % tid,
-                print
-
-            self.fail('a thread is still alive')
-
-        # Iterate over the storage to make sure it's sane, but not every
-        # storage supports iterators.
-        if not hasattr(self._storage, "iterator"):
-            return
-
-        it = self._storage.iterator()
-        for txn in it:
-            for data in txn:
-                pass
-        it.close()
-
-    def checkPackWhileWriting(self):
-        self._PackWhileWriting(pack_now=False)
-
-    def checkPackNowWhileWriting(self):
-        self._PackWhileWriting(pack_now=True)
-
-    def checkPackLotsWhileWriting(self):
-        # This is like the other pack-while-writing tests, except it packs
-        # repeatedly until the client thread is done.  At the time it was
-        # introduced, it reliably provoked
-        #     CorruptedError:  ... transaction with checkpoint flag set
-        # in the ZEO flavor of the FileStorage tests.
-
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        choices = range(10)
-        for i in choices:
-            root[i] = MinPO(i)
-        transaction.commit()
-
-        snooze()
-        packt = time.time()
-
-        for dummy in choices:
-            for i in choices:
-                root[i].value = MinPO(i)
-                transaction.commit()
-
-        NUM_LOOP_TRIP = 100
-        timer = ElapsedTimer(time.time())
-        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
-        thread.start()
-        while thread.isAlive():
-            db.pack(packt)
-            snooze()
-            packt = time.time()
-        thread.join()
-
-        # Iterate over the storage to make sure it's sane.
-        if not hasattr(self._storage, "iterator"):
-            return
-        it = self._storage.iterator()
-        for txn in it:
-            for data in txn:
-                pass
-        it.close()
-
-class PackableUndoStorage(PackableStorageBase):
-
-    def checkPackAllRevisions(self):
-        self._initroot()
-        eq = self.assertEqual
-        raises = self.assertRaises
-        # Create a `persistent' object
-        obj = self._newobj()
-        oid = obj.getoid()
-        obj.value = 1
-        # Commit three different revisions
-        revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
-        obj.value = 2
-        revid2 = self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
-        obj.value = 3
-        revid3 = self._dostoreNP(oid, revid=revid2, data=pickle.dumps(obj))
-        # Now make sure all three revisions can be extracted
-        data = self._storage.loadSerial(oid, revid1)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 1)
-        data = self._storage.loadSerial(oid, revid2)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 2)
-        data = self._storage.loadSerial(oid, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 3)
-        # Now pack all transactions; need to sleep a second to make
-        # sure that the pack time is greater than the last commit time.
-        now = packtime = time.time()
-        while packtime <= now:
-            packtime = time.time()
-        self._storage.pack(packtime, referencesf)
-        # All revisions of the object should be gone, since there is no
-        # reference from the root object to this object.
-        raises(KeyError, self._storage.loadSerial, oid, revid1)
-        raises(KeyError, self._storage.loadSerial, oid, revid2)
-        raises(KeyError, self._storage.loadSerial, oid, revid3)
-
-    def checkPackJustOldRevisions(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        loads = self._makeloader()
-        # Create a root object.  This can't be an instance of Object,
-        # otherwise the pickling machinery will serialize it as a persistent
-        # id and not as an object that contains references (persistent ids) to
-        # other objects.
-        root = Root()
-        # Create a persistent object, with some initial state
-        obj = self._newobj()
-        oid = obj.getoid()
-        # Link the root object to the persistent object, in order to keep the
-        # persistent object alive.  Store the root object.
-        root.obj = obj
-        root.value = 0
-        revid0 = self._dostoreNP(ZERO, data=dumps(root))
-        # Make sure the root can be retrieved
-        data, revid = self._storage.load(ZERO, '')
-        eq(revid, revid0)
-        eq(loads(data).value, 0)
-        # Commit three different revisions of the other object
-        obj.value = 1
-        revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
-        obj.value = 2
-        revid2 = self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
-        obj.value = 3
-        revid3 = self._dostoreNP(oid, revid=revid2, data=pickle.dumps(obj))
-        # Now make sure all three revisions can be extracted
-        data = self._storage.loadSerial(oid, revid1)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 1)
-        data = self._storage.loadSerial(oid, revid2)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 2)
-        data = self._storage.loadSerial(oid, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 3)
-        # Now pack just revisions 1 and 2.  The object's current revision
-        # should stay alive because it's pointed to by the root.
-        now = packtime = time.time()
-        while packtime <= now:
-            packtime = time.time()
-        self._storage.pack(packtime, referencesf)
-        # Make sure the revisions are gone, but that object zero and revision
-        # 3 are still there and correct
-        data, revid = self._storage.load(ZERO, '')
-        eq(revid, revid0)
-        eq(loads(data).value, 0)
-        raises(KeyError, self._storage.loadSerial, oid, revid1)
-        raises(KeyError, self._storage.loadSerial, oid, revid2)
-        data = self._storage.loadSerial(oid, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 3)
-        data, revid = self._storage.load(oid, '')
-        eq(revid, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid)
-        eq(pobj.value, 3)
-
-    def checkPackOnlyOneObject(self):
-        eq = self.assertEqual
-        raises = self.assertRaises
-        loads = self._makeloader()
-        # Create a root object.  This can't be an instance of Object,
-        # otherwise the pickling machinery will serialize it as a persistent
-        # id and not as an object that contains references (persistent ids) to
-        # other objects.
-        root = Root()
-        # Create a persistent object, with some initial state
-        obj1 = self._newobj()
-        oid1 = obj1.getoid()
-        # Create another persistent object, with some initial state.  Make
-        # sure it's oid is greater than the first object's oid.
-        obj2 = self._newobj()
-        oid2 = obj2.getoid()
-        self.failUnless(oid2 > oid1)
-        # Link the root object to the persistent objects, in order to keep
-        # them alive.  Store the root object.
-        root.obj1 = obj1
-        root.obj2 = obj2
-        root.value = 0
-        revid0 = self._dostoreNP(ZERO, data=dumps(root))
-        # Make sure the root can be retrieved
-        data, revid = self._storage.load(ZERO, '')
-        eq(revid, revid0)
-        eq(loads(data).value, 0)
-        # Commit three different revisions of the first object
-        obj1.value = 1
-        revid1 = self._dostoreNP(oid1, data=pickle.dumps(obj1))
-        obj1.value = 2
-        revid2 = self._dostoreNP(oid1, revid=revid1, data=pickle.dumps(obj1))
-        obj1.value = 3
-        revid3 = self._dostoreNP(oid1, revid=revid2, data=pickle.dumps(obj1))
-        # Now make sure all three revisions can be extracted
-        data = self._storage.loadSerial(oid1, revid1)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid1)
-        eq(pobj.value, 1)
-        data = self._storage.loadSerial(oid1, revid2)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid1)
-        eq(pobj.value, 2)
-        data = self._storage.loadSerial(oid1, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid1)
-        eq(pobj.value, 3)
-        # Now commit a revision of the second object
-        obj2.value = 11
-        revid4 = self._dostoreNP(oid2, data=pickle.dumps(obj2))
-        # And make sure the revision can be extracted
-        data = self._storage.loadSerial(oid2, revid4)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid2)
-        eq(pobj.value, 11)
-        # Now pack just revisions 1 and 2 of object1.  Object1's current
-        # revision should stay alive because it's pointed to by the root, as
-        # should Object2's current revision.
-        now = packtime = time.time()
-        while packtime <= now:
-            packtime = time.time()
-        self._storage.pack(packtime, referencesf)
-        # Make sure the revisions are gone, but that object zero, object2, and
-        # revision 3 of object1 are still there and correct.
-        data, revid = self._storage.load(ZERO, '')
-        eq(revid, revid0)
-        eq(loads(data).value, 0)
-        raises(KeyError, self._storage.loadSerial, oid1, revid1)
-        raises(KeyError, self._storage.loadSerial, oid1, revid2)
-        data = self._storage.loadSerial(oid1, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid1)
-        eq(pobj.value, 3)
-        data, revid = self._storage.load(oid1, '')
-        eq(revid, revid3)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid1)
-        eq(pobj.value, 3)
-        data, revid = self._storage.load(oid2, '')
-        eq(revid, revid4)
-        eq(loads(data).value, 11)
-        data = self._storage.loadSerial(oid2, revid4)
-        pobj = pickle.loads(data)
-        eq(pobj.getoid(), oid2)
-        eq(pobj.value, 11)
-
-    def checkPackUnlinkedFromRoot(self):
-        eq = self.assertEqual
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        txn = transaction.get()
-        txn.note('root')
-        txn.commit()
-
-        now = packtime = time.time()
-        while packtime <= now:
-            packtime = time.time()
-
-        obj = C()
-        obj.value = 7
-
-        root['obj'] = obj
-        txn = transaction.get()
-        txn.note('root -> o1')
-        txn.commit()
-
-        del root['obj']
-        txn = transaction.get()
-        txn.note('root -x-> o1')
-        txn.commit()
-
-        self._storage.pack(packtime, referencesf)
-
-        log = self._storage.undoLog()
-        tid = log[0]['id']
-        db.undo(tid)
-        txn = transaction.get()
-        txn.note('undo root -x-> o1')
-        txn.commit()
-
-        conn.sync()
-
-        eq(root['obj'].value, 7)
-
-    def checkRedundantPack(self):
-        # It is an error to perform a pack with a packtime earlier
-        # than a previous packtime.  The storage can't do a full
-        # traversal as of the packtime, because the previous pack may
-        # have removed revisions necessary for a full traversal.
-
-        # It should be simple to test that a storage error is raised,
-        # but this test case goes to the trouble of constructing a
-        # scenario that would lose data if the earlier packtime was
-        # honored.
-
-        self._initroot()
-
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        root["d"] = d = PersistentMapping()
-        transaction.commit()
-        snooze()
-
-        obj = d["obj"] = C()
-        obj.value = 1
-        transaction.commit()
-        snooze()
-        packt1 = time.time()
-        lost_oid = obj._p_oid
-
-        obj = d["anotherobj"] = C()
-        obj.value = 2
-        transaction.commit()
-        snooze()
-        packt2 = time.time()
-
-        db.pack(packt2)
-        # BDBStorage allows the second pack, but doesn't lose data.
-        try:
-            db.pack(packt1)
-        except StorageError:
-            pass
-        # This object would be removed by the second pack, even though
-        # it is reachable.
-        self._storage.load(lost_oid, "")
-
-    def checkPackUndoLog(self):
-        self._initroot()
-        # Create a `persistent' object
-        obj = self._newobj()
-        oid = obj.getoid()
-        obj.value = 1
-        # Commit two different revisions
-        revid1 = self._dostoreNP(oid, data=pickle.dumps(obj))
-        obj.value = 2
-        snooze()
-        packtime = time.time()
-        snooze()
-        self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
-        # Now pack the first transaction
-        self.assertEqual(3, len(self._storage.undoLog()))
-        self._storage.pack(packtime, referencesf)
-        # The undo log contains only the most resent transaction
-        self.assertEqual(1,len(self._storage.undoLog()))
-
-    def dont_checkPackUndoLogUndoable(self):
-        # A disabled test. I wanted to test that the content of the
-        # undo log was consistent, but every storage appears to
-        # include something slightly different. If the result of this
-        # method is only used to fill a GUI then this difference
-        # doesnt matter.  Perhaps re-enable this test once we agree
-        # what should be asserted.
-
-        self._initroot()
-        # Create two `persistent' object
-        obj1 = self._newobj()
-        oid1 = obj1.getoid()
-        obj1.value = 1
-        obj2 = self._newobj()
-        oid2 = obj2.getoid()
-        obj2.value = 2
-
-        # Commit the first revision of each of them
-        revid11 = self._dostoreNP(oid1, data=pickle.dumps(obj1),
-                                  description="1-1")
-        revid22 = self._dostoreNP(oid2, data=pickle.dumps(obj2),
-                                  description="2-2")
-
-        # remember the time. everything above here will be packed away
-        snooze()
-        packtime = time.time()
-        snooze()
-        # Commit two revisions of the first object
-        obj1.value = 3
-        revid13 = self._dostoreNP(oid1, revid=revid11,
-                                  data=pickle.dumps(obj1), description="1-3")
-        obj1.value = 4
-        self._dostoreNP(oid1, revid=revid13,
-                        data=pickle.dumps(obj1), description="1-4")
-        # Commit one revision of the second object
-        obj2.value = 5
-        self._dostoreNP(oid2, revid=revid22,
-                        data=pickle.dumps(obj2), description="2-5")
-        # Now pack
-        self.assertEqual(6,len(self._storage.undoLog()))
-        print '\ninitial undoLog was'
-        for r in self._storage.undoLog(): print r
-        self._storage.pack(packtime, referencesf)
-        # The undo log contains only two undoable transaction.
-        print '\nafter packing undoLog was'
-        for r in self._storage.undoLog(): print r
-        # what can we assert about that?
-
-
-# A number of these threads are kicked off by _PackWhileWriting().  Their
-# purpose is to abuse the database passed to the constructor with lots of
-# random write activity while the main thread is packing it.
-class ClientThread(TestThread):
-
-    def __init__(self, db, choices, loop_trip, timer, thread_id):
-        TestThread.__init__(self)
-        self.root = db.open().root()
-        self.choices = choices
-        self.loop_trip = loop_trip
-        self.millis = timer.elapsed_millis
-        self.thread_id = thread_id
-        # list of lists; each list has as many of these as a loop trip
-        # got thru:
-        #     thread_id
-        #     elapsed millis at loop top
-        #     elapsed millis at attempt
-        #     index into self.root getting replaced
-        #     elapsed millis when outcome known
-        #     'OK' or 'Conflict'
-        #     True if we got beyond this line, False if it raised an exception:
-        #          self.root[index].value = MinPO(j)
-        self.outcomes = []
-
-    def runtest(self):
-        from random import choice
-
-        for j in range(self.loop_trip):
-            assign_worked = False
-            alist = [self.thread_id, self.millis()]
-            self.outcomes.append(alist)
-            try:
-                index = choice(self.choices)
-                alist.extend([self.millis(), index])
-                self.root[index].value = MinPO(j)
-                assign_worked = True
-                transaction.commit()
-                alist.append(self.millis())
-                alist.append('OK')
-            except ConflictError:
-                alist.append(self.millis())
-                alist.append('Conflict')
-                transaction.abort()
-            alist.append(assign_worked)
-
-class ElapsedTimer:
-    def __init__(self, start_time):
-        self.start_time = start_time
-
-    def elapsed_millis(self):
-        return int((time.time() - self.start_time) * 1000)
diff --git a/branches/bug1734/src/ZODB/tests/PersistentStorage.py b/branches/bug1734/src/ZODB/tests/PersistentStorage.py
deleted file mode 100644
index 4009eb06..00000000
--- a/branches/bug1734/src/ZODB/tests/PersistentStorage.py
+++ /dev/null
@@ -1,55 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test that a storage's values persist across open and close."""
-
-class PersistentStorage:
-
-    def checkUpdatesPersist(self):
-        oids = []
-
-        def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
-            oid = new_oid()
-            l.append(oid)
-            return oid
-
-        self._storage.new_oid = new_oid_wrapper
-
-        self._dostore()
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid)
-        if self._storage.supportsVersions():
-            self._dostore(oid, revid, data=8, version='b')
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=1)
-        revid = self._dostore(oid, revid, data=2)
-        self._dostore(oid, revid, data=3)
-
-        # keep copies of all the objects
-        objects = []
-        for oid in oids:
-            p, s = self._storage.load(oid, '')
-            objects.append((oid, '', p, s))
-            ver = self._storage.modifiedInVersion(oid)
-            if ver:
-                p, s = self._storage.load(oid, ver)
-                objects.append((oid, ver, p, s))
-
-        self._storage.close()
-        self.open()
-
-        # keep copies of all the objects
-        for oid, ver, p, s in objects:
-            _p, _s = self._storage.load(oid, ver)
-            self.assertEquals(p, _p)
-            self.assertEquals(s, _s)
diff --git a/branches/bug1734/src/ZODB/tests/ReadOnlyStorage.py b/branches/bug1734/src/ZODB/tests/ReadOnlyStorage.py
deleted file mode 100644
index 6eae82f2..00000000
--- a/branches/bug1734/src/ZODB/tests/ReadOnlyStorage.py
+++ /dev/null
@@ -1,64 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-from ZODB.POSException import ReadOnlyError, Unsupported
-import transaction
-
-class ReadOnlyStorage:
-
-    def _create_data(self):
-        # test a read-only storage that already has some data
-        self.oids = {}
-        for i in range(10):
-            oid = self._storage.new_oid()
-            revid = self._dostore(oid)
-            self.oids[oid] = revid
-
-    def _make_readonly(self):
-        self._storage.close()
-        self.open(read_only=True)
-        self.assert_(self._storage.isReadOnly())
-
-    def checkReadMethods(self):
-        self._create_data()
-        self._make_readonly()
-        # Note that this doesn't check _all_ read methods.
-        for oid in self.oids.keys():
-            data, revid = self._storage.load(oid, '')
-            self.assertEqual(revid, self.oids[oid])
-            self.assert_(not self._storage.modifiedInVersion(oid))
-            # Storages without revisions may not have loadSerial().
-            try:
-                _data = self._storage.loadSerial(oid, revid)
-                self.assertEqual(data, _data)
-            except Unsupported:
-                pass
-
-    def checkWriteMethods(self):
-        self._make_readonly()
-        self.assertRaises(ReadOnlyError, self._storage.new_oid)
-        t = transaction.Transaction()
-        self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
-
-        if self._storage.supportsVersions():
-            self.assertRaises(ReadOnlyError, self._storage.abortVersion,
-                              '', t)
-            self.assertRaises(ReadOnlyError, self._storage.commitVersion,
-                              '', '', t)
-
-        self.assertRaises(ReadOnlyError, self._storage.store,
-                          '\000' * 8, None, '', '', t)
-
-        if self._storage.supportsTransactionalUndo():
-            self.assertRaises(ReadOnlyError, self._storage.undo,
-                              '\000' * 8, t)
diff --git a/branches/bug1734/src/ZODB/tests/RecoveryStorage.py b/branches/bug1734/src/ZODB/tests/RecoveryStorage.py
deleted file mode 100644
index 2a056ec9..00000000
--- a/branches/bug1734/src/ZODB/tests/RecoveryStorage.py
+++ /dev/null
@@ -1,288 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""More recovery and iterator tests."""
-
-import transaction
-from transaction import Transaction
-from ZODB.tests.IteratorStorage import IteratorDeepCompare
-from ZODB.tests.StorageTestBase import MinPO, zodb_unpickle, snooze
-from ZODB import DB
-from ZODB.serialize import referencesf
-
-import time
-
-class RecoveryStorage(IteratorDeepCompare):
-    # Requires a setUp() that creates a self._dst destination storage
-    def checkSimpleRecovery(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=11)
-        revid = self._dostore(oid, revid=revid, data=12)
-        revid = self._dostore(oid, revid=revid, data=13)
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
-
-    def checkRecoveryAcrossVersions(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=21)
-        revid = self._dostore(oid, revid=revid, data=22)
-        revid = self._dostore(oid, revid=revid, data=23, version='one')
-        revid = self._dostore(oid, revid=revid, data=34, version='one')
-        # Now commit the version
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion('one', '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
-
-    def checkRecoverAbortVersion(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=21, version="one")
-        revid = self._dostore(oid, revid=revid, data=23, version='one')
-        revid = self._dostore(oid, revid=revid, data=34, version='one')
-        # Now abort the version and the creation
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.abortVersion('one', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        self.assertEqual(oids, [oid])
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
-        # Also make sure the the last transaction has a data record
-        # with None for its data attribute, because we've undone the
-        # object.
-        for s in self._storage, self._dst:
-            iter = s.iterator()
-            for trans in iter:
-                pass # iterate until we get the last one
-            data = trans[0]
-            self.assertRaises(IndexError, lambda i, t=trans: t[i], 1)
-            self.assertEqual(data.oid, oid)
-            self.assertEqual(data.data, None)
-
-    def checkRecoverUndoInVersion(self):
-        oid = self._storage.new_oid()
-        version = "aVersion"
-        revid_a = self._dostore(oid, data=MinPO(91))
-        revid_b = self._dostore(oid, revid=revid_a, version=version,
-                                data=MinPO(92))
-        revid_c = self._dostore(oid, revid=revid_b, version=version,
-                                data=MinPO(93))
-        self._undo(self._storage.undoInfo()[0]['id'], [oid])
-        self._commitVersion(version, '')
-        self._undo(self._storage.undoInfo()[0]['id'], [oid])
-
-        # now copy the records to a new storage
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
-
-        # The last two transactions were applied directly rather than
-        # copied.  So we can't use compare() to verify that they new
-        # transactions are applied correctly.  (The new transactions
-        # will have different timestamps for each storage.)
-
-        self._abortVersion(version)
-        self.assert_(self._storage.versionEmpty(version))
-        self._undo(self._storage.undoInfo()[0]['id'], [oid])
-        self.assert_(not self._storage.versionEmpty(version))
-
-        # check the data is what we expect it to be
-        data, revid = self._storage.load(oid, version)
-        self.assertEqual(zodb_unpickle(data), MinPO(92))
-        data, revid = self._storage.load(oid, '')
-        self.assertEqual(zodb_unpickle(data), MinPO(91))
-
-        # and swap the storages
-        tmp = self._storage
-        self._storage = self._dst
-        self._abortVersion(version)
-        self.assert_(self._storage.versionEmpty(version))
-        self._undo(self._storage.undoInfo()[0]['id'], [oid])
-        self.assert_(not self._storage.versionEmpty(version))
-
-        # check the data is what we expect it to be
-        data, revid = self._storage.load(oid, version)
-        self.assertEqual(zodb_unpickle(data), MinPO(92))
-        data, revid = self._storage.load(oid, '')
-        self.assertEqual(zodb_unpickle(data), MinPO(91))
-
-        # swap them back
-        self._storage = tmp
-
-        # Now remove _dst and copy all the transactions a second time.
-        # This time we will be able to confirm via compare().
-        self._dst.close()
-        self._dst.cleanup()
-        self._dst = self.new_dest()
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
-
-    def checkRestoreAcrossPack(self):
-        db = DB(self._storage)
-        c = db.open()
-        r = c.root()
-        obj = r["obj1"] = MinPO(1)
-        transaction.commit()
-        obj = r["obj2"] = MinPO(1)
-        transaction.commit()
-
-        self._dst.copyTransactionsFrom(self._storage)
-        self._dst.pack(time.time(), referencesf)
-
-        self._undo(self._storage.undoInfo()[0]['id'])
-
-        # copy the final transaction manually.  even though there
-        # was a pack, the restore() ought to succeed.
-        it = self._storage.iterator()
-        final = list(it)[-1]
-        self._dst.tpc_begin(final, final.tid, final.status)
-        for r in final:
-            self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
-                              final)
-        it.close()
-        self._dst.tpc_vote(final)
-        self._dst.tpc_finish(final)
-
-    def checkPackWithGCOnDestinationAfterRestore(self):
-        raises = self.assertRaises
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-        root.obj = obj1 = MinPO(1)
-        txn = transaction.get()
-        txn.note('root -> obj')
-        txn.commit()
-        root.obj.obj = obj2 = MinPO(2)
-        txn = transaction.get()
-        txn.note('root -> obj -> obj')
-        txn.commit()
-        del root.obj
-        txn = transaction.get()
-        txn.note('root -X->')
-        txn.commit()
-        # Now copy the transactions to the destination
-        self._dst.copyTransactionsFrom(self._storage)
-        # Now pack the destination.
-        snooze()
-        self._dst.pack(time.time(),  referencesf)
-        # And check to see that the root object exists, but not the other
-        # objects.
-        data, serial = self._dst.load(root._p_oid, '')
-        raises(KeyError, self._dst.load, obj1._p_oid, '')
-        raises(KeyError, self._dst.load, obj2._p_oid, '')
-
-    def checkRestoreWithMultipleObjectsInUndoRedo(self):
-        from ZODB.FileStorage import FileStorage
-
-        # Undo creates backpointers in (at least) FileStorage.  ZODB 3.2.1
-        # FileStorage._data_find() had an off-by-8 error, neglecting to
-        # account for the size of the backpointer when searching a
-        # transaction with multiple data records.  The results were
-        # unpredictable.  For example, it could raise a Python exception
-        # due to passing a negative offset to file.seek(), or could
-        # claim that a transaction didn't have data for an oid despite
-        # that it actually did.
-        #
-        # The former failure mode was seen in real life, in a ZRS secondary
-        # doing recovery.  On my box today, the second failure mode is
-        # what happens in this test (with an unpatched _data_find, of
-        # course).  Note that the error can only "bite" if more than one
-        # data record is in a transaction, and the oid we're looking for
-        # follows at least one data record with a backpointer.
-        #
-        # Unfortunately, _data_find() is a low-level implementation detail,
-        # and this test does some horrid white-box abuse to test it.
-
-        is_filestorage = isinstance(self._storage, FileStorage)
-
-        db = DB(self._storage)
-        c = db.open()
-        r = c.root()
-
-        # Create some objects.
-        r["obj1"] = MinPO(1)
-        r["obj2"] = MinPO(1)
-        transaction.commit()
-
-        # Add x attributes to them.
-        r["obj1"].x = 'x1'
-        r["obj2"].x = 'x2'
-        transaction.commit()
-
-        r = db.open().root()
-        self.assertEquals(r["obj1"].x, 'x1')
-        self.assertEquals(r["obj2"].x, 'x2')
-
-        # Dirty tricks.
-        if is_filestorage:
-            obj1_oid = r["obj1"]._p_oid
-            obj2_oid = r["obj2"]._p_oid
-            # This will be the offset of the next transaction, which
-            # will contain two backpointers.
-            pos = self._storage.getSize()
-
-        # Undo the attribute creation.
-        info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        r = db.open().root()
-        self.assertRaises(AttributeError, getattr, r["obj1"], 'x')
-        self.assertRaises(AttributeError, getattr, r["obj2"], 'x')
-
-        if is_filestorage:
-            # _data_find should find data records for both objects in that
-            # transaction.  Without the patch, the second assert failed
-            # (it claimed it couldn't find a data record for obj2) on my
-            # box, but other failure modes were possible.
-            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
-            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
-
-            # The offset of the next ("redo") transaction.
-            pos = self._storage.getSize()
-
-        # Undo the undo (restore the attributes).
-        info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        r = db.open().root()
-        self.assertEquals(r["obj1"].x, 'x1')
-        self.assertEquals(r["obj2"].x, 'x2')
-
-        if is_filestorage:
-            # Again _data_find should find both objects in this txn, and
-            # again the second assert failed on my box.
-            self.assert_(self._storage._data_find(pos, obj1_oid, '') > 0)
-            self.assert_(self._storage._data_find(pos, obj2_oid, '') > 0)
-
-        # Indirectly provoke .restore().  .restore in turn indirectly
-        # provokes _data_find too, but not usefully for the purposes of
-        # the specific bug this test aims at:  copyTransactionsFrom() uses
-        # storage iterators that chase backpointers themselves, and
-        # return the data they point at instead.  The result is that
-        # _data_find didn't actually see anything dangerous in this
-        # part of the test.
-        self._dst.copyTransactionsFrom(self._storage)
-        self.compare(self._storage, self._dst)
diff --git a/branches/bug1734/src/ZODB/tests/RevisionStorage.py b/branches/bug1734/src/ZODB/tests/RevisionStorage.py
deleted file mode 100644
index 28d47f9f..00000000
--- a/branches/bug1734/src/ZODB/tests/RevisionStorage.py
+++ /dev/null
@@ -1,175 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Check loadSerial() on storages that support historical revisions."""
-
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import \
-     zodb_unpickle, zodb_pickle, snooze, handle_serials
-from ZODB.utils import p64, u64
-
-import transaction
-
-ZERO = '\0'*8
-
-class RevisionStorage:
-
-    def checkLoadSerial(self):
-        oid = self._storage.new_oid()
-        revid = ZERO
-        revisions = {}
-        for i in range(31, 38):
-            revid = self._dostore(oid, revid=revid, data=MinPO(i))
-            revisions[revid] = MinPO(i)
-        # Now make sure all the revisions have the correct value
-        for revid, value in revisions.items():
-            data = self._storage.loadSerial(oid, revid)
-            self.assertEqual(zodb_unpickle(data), value)
-
-    def checkLoadBefore(self):
-        # Store 10 revisions of one object and then make sure that we
-        # can get all the non-current revisions back.
-        oid = self._storage.new_oid()
-        revs = []
-        revid = None
-        for i in range(10):
-            # We need to ensure that successive timestamps are at least
-            # two apart, so that a timestamp exists that's unambiguously
-            # between successive timestamps.  Each call to snooze()
-            # guarantees that the next timestamp will be at least one
-            # larger (and probably much more than that) than the previous
-            # one.
-            snooze()
-            snooze()
-            revid = self._dostore(oid, revid, data=MinPO(i))
-            revs.append(self._storage.loadEx(oid, ""))
-
-        prev = u64(revs[0][1])
-        for i in range(1, 10):
-            tid = revs[i][1]
-            cur = u64(tid)
-            middle = prev + (cur - prev) // 2
-            assert prev < middle < cur  # else the snooze() trick failed
-            prev = cur
-            t = self._storage.loadBefore(oid, p64(middle))
-            self.assert_(t is not None)
-            data, start, end = t
-            self.assertEqual(revs[i-1][0], data)
-            self.assertEqual(tid, end)
-
-    def checkLoadBeforeEdges(self):
-        # Check the edges cases for a non-current load.
-        oid = self._storage.new_oid()
-
-        self.assertRaises(KeyError, self._storage.loadBefore,
-                          oid, p64(0))
-
-        revid1 = self._dostore(oid, data=MinPO(1))
-
-        self.assertEqual(self._storage.loadBefore(oid, p64(0)), None)
-        self.assertEqual(self._storage.loadBefore(oid, revid1), None)
-
-        cur = p64(u64(revid1) + 1)
-        data, start, end = self._storage.loadBefore(oid, cur)
-        self.assertEqual(zodb_unpickle(data), MinPO(1))
-        self.assertEqual(start, revid1)
-        self.assertEqual(end, None)
-
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(2))
-        data, start, end = self._storage.loadBefore(oid, cur)
-        self.assertEqual(zodb_unpickle(data), MinPO(1))
-        self.assertEqual(start, revid1)
-        self.assertEqual(end, revid2)
-
-    def checkLoadBeforeOld(self):
-        # Look for a very old revision.  With the BaseStorage implementation
-        # this should require multple history() calls.
-        oid = self._storage.new_oid()
-        revs = []
-        revid = None
-        for i in range(50):
-            revid = self._dostore(oid, revid, data=MinPO(i))
-            revs.append(revid)
-
-        data, start, end = self._storage.loadBefore(oid, revs[12])
-        self.assertEqual(zodb_unpickle(data), MinPO(11))
-        self.assertEqual(start, revs[11])
-        self.assertEqual(end, revs[12])
-
-
-    # Unsure:  Is it okay to assume everyone testing against RevisionStorage
-    # implements undo?
-
-    def checkLoadBeforeUndo(self):
-        # Do several transactions then undo them.
-        oid = self._storage.new_oid()
-        revid = None
-        for i in range(5):
-            revid = self._dostore(oid, revid, data=MinPO(i))
-        revs = []
-        for i in range(4):
-            info = self._storage.undoInfo()
-            tid = info[0]["id"]
-            # Always undo the most recent txn, so the value will
-            # alternate between 3 and 4.
-            self._undo(tid, [oid], note="undo %d" % i)
-            revs.append(self._storage.loadEx(oid, ""))
-
-        prev_tid = None
-        for i, (data, tid, ver) in enumerate(revs):
-            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
-            self.assertEqual(data, t[0])
-            self.assertEqual(tid, t[1])
-            if prev_tid:
-                self.assert_(prev_tid < t[1])
-            prev_tid = t[1]
-            if i < 3:
-                self.assertEqual(revs[i+1][1], t[2])
-            else:
-                self.assertEqual(None, t[2])
-
-    def checkLoadBeforeConsecutiveTids(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        def helper(tid, revid, x):
-            data = zodb_pickle(MinPO(x))
-            t = transaction.Transaction()
-            try:
-                self._storage.tpc_begin(t, p64(tid))
-                r1 = self._storage.store(oid, revid, data, '', t)
-                # Finish the transaction
-                r2 = self._storage.tpc_vote(t)
-                newrevid = handle_serials(oid, r1, r2)
-                self._storage.tpc_finish(t)
-            except:
-                self._storage.tpc_abort(t)
-                raise
-            return newrevid
-        revid1 = helper(1, None, 1)
-        revid2 = helper(2, revid1, 2)
-        revid3 = helper(3, revid2, 3)
-        data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
-        eq(zodb_unpickle(data), MinPO(1))
-        eq(u64(start_tid), 1)
-        eq(u64(end_tid), 2)
-
-    def checkLoadBeforeCreation(self):
-        eq = self.assertEqual
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        revid1 = self._dostore(oid1)
-        revid2 = self._dostore(oid2)
-        results = self._storage.loadBefore(oid2, revid2)
-        eq(results, None)
-
-    # TODO:  There are other edge cases to handle, including pack.
diff --git a/branches/bug1734/src/ZODB/tests/StorageTestBase.py b/branches/bug1734/src/ZODB/tests/StorageTestBase.py
deleted file mode 100644
index e3c291bc..00000000
--- a/branches/bug1734/src/ZODB/tests/StorageTestBase.py
+++ /dev/null
@@ -1,242 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Provide a mixin base class for storage tests.
-
-The StorageTestBase class provides basic setUp() and tearDown()
-semantics (which you can override), and it also provides a helper
-method _dostore() which performs a complete store transaction for a
-single object revision.
-"""
-
-import sys
-import time
-import types
-import unittest
-from cPickle import Pickler, Unpickler
-from cStringIO import StringIO
-
-import transaction
-
-from ZODB.utils import u64
-from ZODB.tests.MinPO import MinPO
-
-ZERO = '\0'*8
-
-def snooze():
-    # In Windows, it's possible that two successive time.time() calls return
-    # the same value.  Tim guarantees that time never runs backwards.  You
-    # usually want to call this before you pack a storage, or must make other
-    # guarantees about increasing timestamps.
-    now = time.time()
-    while now == time.time():
-        time.sleep(0.1)
-
-def _persistent_id(obj):
-    oid = getattr(obj, "_p_oid", None)
-    if getattr(oid, "__get__", None) is not None:
-        return None
-    else:
-        return oid
-
-def zodb_pickle(obj):
-    """Create a pickle in the format expected by ZODB."""
-    f = StringIO()
-    p = Pickler(f, 1)
-    p.persistent_id = _persistent_id
-    klass = obj.__class__
-    assert not hasattr(obj, '__getinitargs__'), "not ready for constructors"
-    args = None
-
-    mod = getattr(klass, '__module__', None)
-    if mod is not None:
-        klass = mod, klass.__name__
-
-    state = obj.__getstate__()
-
-    p.dump((klass, args))
-    p.dump(state)
-    return f.getvalue(1)
-
-def persistent_load(pid):
-    # helper for zodb_unpickle
-    return "ref to %s.%s oid=%s" % (pid[1][0], pid[1][1], u64(pid[0]))
-
-def zodb_unpickle(data):
-    """Unpickle an object stored using the format expected by ZODB."""
-    f = StringIO(data)
-    u = Unpickler(f)
-    u.persistent_load = persistent_load
-    klass_info = u.load()
-    if isinstance(klass_info, types.TupleType):
-        if isinstance(klass_info[0], type):
-            # Unclear:  what is the second part of klass_info?
-            klass, xxx = klass_info
-            assert not xxx
-        else:
-            if isinstance(klass_info[0], tuple):
-                modname, klassname = klass_info[0]
-            else:
-                modname, klassname = klass_info
-            if modname == "__main__":
-                ns = globals()
-            else:
-                mod = import_helper(modname)
-                ns = mod.__dict__
-            try:
-                klass = ns[klassname]
-            except KeyError:
-                print >> sys.stderr, "can't find %s in %r" % (klassname, ns)
-        inst = klass()
-    else:
-        raise ValueError, "expected class info: %s" % repr(klass_info)
-    state = u.load()
-    inst.__setstate__(state)
-    return inst
-
-def handle_all_serials(oid, *args):
-    """Return dict of oid to serialno from store() and tpc_vote().
-
-    Raises an exception if one of the calls raised an exception.
-
-    The storage interface got complicated when ZEO was introduced.
-    Any individual store() call can return None or a sequence of
-    2-tuples where the 2-tuple is either oid, serialno or an
-    exception to be raised by the client.
-
-    The original interface just returned the serialno for the
-    object.
-    """
-    d = {}
-    for arg in args:
-        if isinstance(arg, types.StringType):
-            d[oid] = arg
-        elif arg is None:
-            pass
-        else:
-            for oid, serial in arg:
-                if not isinstance(serial, types.StringType):
-                    raise serial # error from ZEO server
-                d[oid] = serial
-    return d
-
-def handle_serials(oid, *args):
-    """Return the serialno for oid based on multiple return values.
-
-    A helper for function _handle_all_serials().
-    """
-    return handle_all_serials(oid, *args)[oid]
-
-def import_helper(name):
-    __import__(name)
-    return sys.modules[name]
-
-
-class StorageTestBase(unittest.TestCase):
-
-    # It would be simpler if concrete tests didn't need to extend
-    # setUp() and tearDown().
-
-    def setUp(self):
-        # You need to override this with a setUp that creates self._storage
-        self._storage = None
-
-    def _close(self):
-        # You should override this if closing your storage requires additional
-        # shutdown operations.
-        if self._storage is not None:
-            self._storage.close()
-
-    def tearDown(self):
-        self._close()
-
-    def _dostore(self, oid=None, revid=None, data=None, version=None,
-                 already_pickled=0, user=None, description=None):
-        """Do a complete storage transaction.  The defaults are:
-
-         - oid=None, ask the storage for a new oid
-         - revid=None, use a revid of ZERO
-         - data=None, pickle up some arbitrary data (the integer 7)
-         - version=None, use the empty string version
-
-        Returns the object's new revision id.
-        """
-        if oid is None:
-            oid = self._storage.new_oid()
-        if revid is None:
-            revid = ZERO
-        if data is None:
-            data = MinPO(7)
-        if type(data) == types.IntType:
-            data = MinPO(data)
-        if not already_pickled:
-            data = zodb_pickle(data)
-        if version is None:
-            version = ''
-        # Begin the transaction
-        t = transaction.Transaction()
-        if user is not None:
-            t.user = user
-        if description is not None:
-            t.description = description
-        try:
-            self._storage.tpc_begin(t)
-            # Store an object
-            r1 = self._storage.store(oid, revid, data, version, t)
-            # Finish the transaction
-            r2 = self._storage.tpc_vote(t)
-            revid = handle_serials(oid, r1, r2)
-            self._storage.tpc_finish(t)
-        except:
-            self._storage.tpc_abort(t)
-            raise
-        return revid
-
-    def _dostoreNP(self, oid=None, revid=None, data=None, version=None,
-                   user=None, description=None):
-        return self._dostore(oid, revid, data, version, 1, user, description)
-
-    # The following methods depend on optional storage features.
-
-    def _undo(self, tid, expected_oids=None, note=None):
-        # Undo a tid that affects a single object (oid).
-        # This is very specialized.
-        t = transaction.Transaction()
-        t.note(note or "undo")
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        if expected_oids is not None:
-            self.assertEqual(len(oids), len(expected_oids), repr(oids))
-            for oid in expected_oids:
-                self.assert_(oid in oids)
-        return self._storage.lastTransaction()
-
-    def _commitVersion(self, src, dst):
-        t = transaction.Transaction()
-        t.note("commit %r to %r" % (src, dst))
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.commitVersion(src, dst, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        return oids
-
-    def _abortVersion(self, ver):
-        t = transaction.Transaction()
-        t.note("abort %r" % ver)
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.abortVersion(ver, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        return oids
diff --git a/branches/bug1734/src/ZODB/tests/Synchronization.py b/branches/bug1734/src/ZODB/tests/Synchronization.py
deleted file mode 100644
index 250684fa..00000000
--- a/branches/bug1734/src/ZODB/tests/Synchronization.py
+++ /dev/null
@@ -1,145 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the storage's implemenetation of the storage synchronization spec.
-
-The Synchronization spec
-    http://www.zope.org/Documentation/Developer/Models/ZODB/
-    ZODB_Architecture_Storage_Interface_State_Synchronization_Diag.html
-
-It specifies two states committing and non-committing.  A storage
-starts in the non-committing state.  tpc_begin() transfers to the
-committting state; tpc_abort() and tpc_finish() transfer back to
-non-committing.
-
-Several other methods are only allowed in one state or another.  Many
-methods allowed only in the committing state require that they apply
-to the currently committing transaction.
-
-The spec is silent on a variety of methods that don't appear to modify
-the state, e.g. load(), undoLog(), pack().  It's unclear whether there
-is a separate set of synchronization rules that apply to these methods
-or if the synchronization is implementation dependent, i.e. only what
-is need to guarantee a corrected implementation.
-
-The synchronization spec is also silent on whether there is any
-contract implied with the caller.  If the storage can assume that a
-single client is single-threaded and that it will not call, e.g., store()
-until after it calls tpc_begin(), the implementation can be
-substantially simplified.
-
-New and/or unspecified methods:
-
-tpc_vote(): handled like tpc_abort
-undo(): how's that handled?
-
-Methods that have nothing to do with committing/non-committing:
-load(), loadSerial(), getName(), getSize(), __len__(), history(),
-undoLog(), modifiedInVersion(), versionEmpty(), versions(), pack().
-
-Specific questions:
-
-The spec & docs say that undo() takes three arguments, the second
-being a transaction.  If the specified arg isn't the current
-transaction, the undo() should raise StorageTransactionError.  This
-isn't implemented anywhere.  It looks like undo can be called at
-anytime.
-
-FileStorage does not allow undo() during a pack.  How should this be
-tested?  Is it a general restriction?
-
-
-
-"""
-
-from transaction import Transaction
-from ZODB.POSException import StorageTransactionError
-
-VERSION = "testversion"
-OID = "\000" * 8
-SERIALNO = "\000" * 8
-TID = "\000" * 8
-
-class SynchronizedStorage:
-
-##    def verifyCommitting(self, callable, *args):
-##        self.assertRaises(StorageTransactionError, callable *args)
-
-    def verifyNotCommitting(self, callable, *args):
-        self.assertRaises(StorageTransactionError, callable, *args)
-
-    def verifyWrongTrans(self, callable, *args):
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self.assertRaises(StorageTransactionError, callable, *args)
-        self._storage.tpc_abort(t)
-
-    def checkAbortVersionNotCommitting(self):
-        self.verifyNotCommitting(self._storage.abortVersion,
-                                 VERSION, Transaction())
-
-    def checkAbortVersionWrongTrans(self):
-        self.verifyWrongTrans(self._storage.abortVersion,
-                              VERSION, Transaction())
-
-    def checkCommitVersionNotCommitting(self):
-        self.verifyNotCommitting(self._storage.commitVersion,
-                                 VERSION, "", Transaction())
-
-    def checkCommitVersionWrongTrans(self):
-        self.verifyWrongTrans(self._storage.commitVersion,
-                              VERSION, "", Transaction())
-
-
-    def checkStoreNotCommitting(self):
-        self.verifyNotCommitting(self._storage.store,
-                                 OID, SERIALNO, "", "", Transaction())
-
-    def checkStoreWrongTrans(self):
-        self.verifyWrongTrans(self._storage.store,
-                              OID, SERIALNO, "", "", Transaction())
-
-##    def checkNewOidNotCommitting(self):
-##        self.verifyNotCommitting(self._storage.new_oid)
-
-##    def checkNewOidWrongTrans(self):
-##        self.verifyWrongTrans(self._storage.new_oid)
-
-
-    def checkAbortNotCommitting(self):
-        self._storage.tpc_abort(Transaction())
-
-    def checkAbortWrongTrans(self):
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.tpc_abort(Transaction())
-        self._storage.tpc_abort(t)
-
-    def checkFinishNotCommitting(self):
-        t = Transaction()
-        self._storage.tpc_finish(t)
-        self._storage.tpc_abort(t)
-
-    def checkFinishWrongTrans(self):
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.tpc_finish(Transaction())
-        self._storage.tpc_abort(t)
-
-    def checkBeginCommitting(self):
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._storage.tpc_begin(t)
-        self._storage.tpc_abort(t)
-
-    # TODO:  how to check undo?
diff --git a/branches/bug1734/src/ZODB/tests/TransactionalUndoStorage.py b/branches/bug1734/src/ZODB/tests/TransactionalUndoStorage.py
deleted file mode 100644
index cc3e399a..00000000
--- a/branches/bug1734/src/ZODB/tests/TransactionalUndoStorage.py
+++ /dev/null
@@ -1,726 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Check undo().
-
-Any storage that supports undo() must pass these tests.
-"""
-
-import time
-import types
-
-from persistent import Persistent
-import transaction
-from transaction import Transaction
-
-from ZODB import POSException
-from ZODB.serialize import referencesf
-from ZODB.utils import p64
-from ZODB import DB
-
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
-
-ZERO = '\0'*8
-
-class C(Persistent):
-    pass
-
-def snooze():
-    # In Windows, it's possible that two successive time.time() calls return
-    # the same value.  Tim guarantees that time never runs backwards.  You
-    # usually want to call this before you pack a storage, or must make other
-    # guarantees about increasing timestamps.
-    now = time.time()
-    while now == time.time():
-        time.sleep(0.1)
-
-def listeq(L1, L2):
-    """Return True if L1.sort() == L2.sort()"""
-    c1 = L1[:]
-    c2 = L2[:]
-    c1.sort()
-    c2.sort()
-    return c1 == c2
-
-class TransactionalUndoStorage:
-
-    def _transaction_begin(self):
-        self.__serials = {}
-
-    def _transaction_store(self, oid, rev, data, vers, trans):
-        r = self._storage.store(oid, rev, data, vers, trans)
-        if r:
-            if type(r) == types.StringType:
-                self.__serials[oid] = r
-            else:
-                for oid, serial in r:
-                    self.__serials[oid] = serial
-
-    def _transaction_vote(self, trans):
-        r = self._storage.tpc_vote(trans)
-        if r:
-            for oid, serial in r:
-                self.__serials[oid] = serial
-
-    def _transaction_newserial(self, oid):
-        return self.__serials[oid]
-
-    def _multi_obj_transaction(self, objs):
-        newrevs = {}
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        for oid, rev, data in objs:
-            self._transaction_store(oid, rev, data, '', t)
-            newrevs[oid] = None
-        self._transaction_vote(t)
-        self._storage.tpc_finish(t)
-        for oid in newrevs.keys():
-            newrevs[oid] = self._transaction_newserial(oid)
-        return newrevs
-
-    def _iterate(self):
-        """Iterate over the storage in its final state."""
-        # This is testing that the iterator() code works correctly.
-        # The hasattr() guards against ZEO, which doesn't support iterator.
-        if not hasattr(self._storage, "iterator"):
-            return
-        iter = self._storage.iterator()
-        for txn in iter:
-            for rec in txn:
-                pass
-
-    def undo(self, tid, note):
-        t = Transaction()
-        t.note(note)
-        self._storage.tpc_begin(t)
-        oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        return oids
-
-    def checkSimpleTransactionalUndo(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(23))
-        revid = self._dostore(oid, revid=revid, data=MinPO(24))
-        revid = self._dostore(oid, revid=revid, data=MinPO(25))
-
-        info = self._storage.undoInfo()
-        # Now start an undo transaction
-        self._undo(info[0]["id"], [oid], note="undo1")
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(24))
-
-        # Do another one
-        info = self._storage.undoInfo()
-        self._undo(info[2]["id"], [oid], note="undo2")
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(23))
-
-        # Try to undo the first record
-        info = self._storage.undoInfo()
-        self._undo(info[4]["id"], [oid], note="undo3")
-        # This should fail since we've undone the object's creation
-        self.assertRaises(KeyError,
-                          self._storage.load, oid, '')
-
-        # And now let's try to redo the object's creation
-        info = self._storage.undoInfo()
-        self._undo(info[0]["id"], [oid])
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(23))
-        self._iterate()
-
-    def checkCreationUndoneGetSerial(self):
-        # create an object
-        oid = self._storage.new_oid()
-        self._dostore(oid, data=MinPO(23))
-        # undo its creation
-        info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        t.note('undo1')
-        self._storage.tpc_begin(t)
-        self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # Check that calling getSerial on an uncreated object raises a KeyError
-        # The current version of FileStorage fails this test
-        self.assertRaises(KeyError, self._storage.getSerial, oid)
-
-    def checkUndoCreationBranch1(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(11))
-        revid = self._dostore(oid, revid=revid, data=MinPO(12))
-        # Undo the last transaction
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(11))
-
-        # Now from here, we can either redo the last undo, or undo the object
-        # creation.  Let's undo the object creation.
-        info = self._storage.undoInfo()
-        self._undo(info[2]['id'], [oid])
-        self.assertRaises(KeyError, self._storage.load, oid, '')
-        self._iterate()
-
-    def checkUndoCreationBranch2(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(11))
-        revid = self._dostore(oid, revid=revid, data=MinPO(12))
-        # Undo the last transaction
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(11))
-        # Now from here, we can either redo the last undo, or undo the object
-        # creation.  Let's redo the last undo
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(12))
-        self._iterate()
-
-    def checkTwoObjectUndo(self):
-        eq = self.assertEqual
-        # Convenience
-        p31, p32, p51, p52 = map(zodb_pickle,
-                                 map(MinPO, (31, 32, 51, 52)))
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        revid1 = revid2 = ZERO
-        # Store two objects in the same transaction
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        self._transaction_store(oid1, revid1, p31, '', t)
-        self._transaction_store(oid2, revid2, p51, '', t)
-        # Finish the transaction
-        self._transaction_vote(t)
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        self._storage.tpc_finish(t)
-        eq(revid1, revid2)
-        # Update those same two objects
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        self._transaction_store(oid1, revid1, p32, '', t)
-        self._transaction_store(oid2, revid2, p52, '', t)
-        # Finish the transaction
-        self._transaction_vote(t)
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        self._storage.tpc_finish(t)
-        eq(revid1, revid2)
-        # Make sure the objects have the current value
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(32))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(52))
-
-        # Now attempt to undo the transaction containing two objects
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid1, oid2])
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(31))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(51))
-        self._iterate()
-
-    def checkTwoObjectUndoAtOnce(self):
-        # Convenience
-        eq = self.assertEqual
-        unless = self.failUnless
-        p30, p31, p32, p50, p51, p52 = map(zodb_pickle,
-                                           map(MinPO,
-                                               (30, 31, 32, 50, 51, 52)))
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        revid1 = revid2 = ZERO
-        # Store two objects in the same transaction
-        d = self._multi_obj_transaction([(oid1, revid1, p30),
-                                         (oid2, revid2, p50),
-                                         ])
-        eq(d[oid1], d[oid2])
-        # Update those same two objects
-        d = self._multi_obj_transaction([(oid1, d[oid1], p31),
-                                         (oid2, d[oid2], p51),
-                                         ])
-        eq(d[oid1], d[oid2])
-        # Update those same two objects
-        d = self._multi_obj_transaction([(oid1, d[oid1], p32),
-                                         (oid2, d[oid2], p52),
-                                         ])
-        eq(d[oid1], d[oid2])
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        eq(revid1, revid2)
-        # Make sure the objects have the current value
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(32))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(52))
-        # Now attempt to undo the transaction containing two objects
-        info = self._storage.undoInfo()
-        tid = info[0]['id']
-        tid1 = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.undo(tid, t)
-        tid, oids1 = self._storage.undo(tid1, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # We get the finalization stuff called an extra time:
-        eq(len(oids), 2)
-        eq(len(oids1), 2)
-        unless(oid1 in oids)
-        unless(oid2 in oids)
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(30))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(50))
-
-        # Now try to undo the one we just did to undo, whew
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid1, oid2])
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(32))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(52))
-        self._iterate()
-
-    def checkTwoObjectUndoAgain(self):
-        eq = self.assertEqual
-        p31, p32, p33, p51, p52, p53 = map(
-            zodb_pickle,
-            map(MinPO, (31, 32, 33, 51, 52, 53)))
-        # Like the above, but the first revision of the objects are stored in
-        # different transactions.
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        revid1 = self._dostore(oid1, data=p31, already_pickled=1)
-        revid2 = self._dostore(oid2, data=p51, already_pickled=1)
-        # Update those same two objects
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        self._transaction_store(oid1, revid1, p32, '', t)
-        self._transaction_store(oid2, revid2, p52, '', t)
-        # Finish the transaction
-        self._transaction_vote(t)
-        self._storage.tpc_finish(t)
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        eq(revid1, revid2)
-        # Now attempt to undo the transaction containing two objects
-        info = self._storage.undoInfo()
-        self._undo(info[0]["id"], [oid1, oid2])
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(31))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(51))
-        # Like the above, but this time, the second transaction contains only
-        # one object.
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        self._transaction_store(oid1, revid1, p33, '', t)
-        self._transaction_store(oid2, revid2, p53, '', t)
-        # Finish the transaction
-        self._transaction_vote(t)
-        self._storage.tpc_finish(t)
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        eq(revid1, revid2)
-        # Update in different transactions
-        revid1 = self._dostore(oid1, revid=revid1, data=MinPO(34))
-        revid2 = self._dostore(oid2, revid=revid2, data=MinPO(54))
-        # Now attempt to undo the transaction containing two objects
-        info = self._storage.undoInfo()
-        tid = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        self.failUnless(oid1 in oids)
-        self.failUnless(not oid2 in oids)
-        data, revid1 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(33))
-        data, revid2 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(54))
-        self._iterate()
-
-
-    def checkNotUndoable(self):
-        eq = self.assertEqual
-        # Set things up so we've got a transaction that can't be undone
-        oid = self._storage.new_oid()
-        revid_a = self._dostore(oid, data=MinPO(51))
-        revid_b = self._dostore(oid, revid=revid_a, data=MinPO(52))
-        revid_c = self._dostore(oid, revid=revid_b, data=MinPO(53))
-        # Start the undo
-        info = self._storage.undoInfo()
-        tid = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self.assertRaises(POSException.UndoError,
-                          self._storage.undo,
-                          tid, t)
-        self._storage.tpc_abort(t)
-        # Now have more fun: object1 and object2 are in the same transaction,
-        # which we'll try to undo to, but one of them has since modified in
-        # different transaction, so the undo should fail.
-        oid1 = oid
-        revid1 = revid_c
-        oid2 = self._storage.new_oid()
-        revid2 = ZERO
-        p81, p82, p91, p92 = map(zodb_pickle,
-                                 map(MinPO, (81, 82, 91, 92)))
-
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self._transaction_begin()
-        self._transaction_store(oid1, revid1, p81, '', t)
-        self._transaction_store(oid2, revid2, p91, '', t)
-        self._transaction_vote(t)
-        self._storage.tpc_finish(t)
-        revid1 = self._transaction_newserial(oid1)
-        revid2 = self._transaction_newserial(oid2)
-        eq(revid1, revid2)
-        # Make sure the objects have the expected values
-        data, revid_11 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(81))
-        data, revid_22 = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(91))
-        eq(revid_11, revid1)
-        eq(revid_22, revid2)
-        # Now modify oid2
-        revid2 = self._dostore(oid2, revid=revid2, data=MinPO(92))
-        self.assertNotEqual(revid1, revid2)
-        self.assertNotEqual(revid2, revid_22)
-        info = self._storage.undoInfo()
-        tid = info[1]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        self.assertRaises(POSException.UndoError,
-                          self._storage.undo,
-                          tid, t)
-        self._storage.tpc_abort(t)
-        self._iterate()
-
-    def checkTransactionalUndoAfterPack(self):
-        eq = self.assertEqual
-        # Add a few object revisions
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(51))
-        packtime = time.time()
-        snooze()                # time.time() now distinct from packtime
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(52))
-        self._dostore(oid, revid=revid2, data=MinPO(53))
-        # Now get the undo log
-        info = self._storage.undoInfo()
-        eq(len(info), 3)
-        tid = info[0]['id']
-        # Now pack just the initial revision of the object.  We need the
-        # second revision otherwise we won't be able to undo the third
-        # revision!
-        self._storage.pack(packtime, referencesf)
-        # Make some basic assertions about the undo information now
-        info2 = self._storage.undoInfo()
-        eq(len(info2), 2)
-        # And now attempt to undo the last transaction
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.undo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
-        data, revid = self._storage.load(oid, '')
-        # The object must now be at the second state
-        eq(zodb_unpickle(data), MinPO(52))
-        self._iterate()
-
-    def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
-        eq = self.assertEqual
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        o1 = C()
-        o2 = C()
-        root['obj'] = o1
-        o1.obj = o2
-        txn = transaction.get()
-        txn.note('o1 -> o2')
-        txn.commit()
-        now = packtime = time.time()
-        while packtime <= now:
-            packtime = time.time()
-
-        o3 = C()
-        o2.obj = o3
-        txn = transaction.get()
-        txn.note('o1 -> o2 -> o3')
-        txn.commit()
-
-        o1.obj = o3
-        txn = transaction.get()
-        txn.note('o1 -> o3')
-        txn.commit()
-
-        log = self._storage.undoLog()
-        eq(len(log), 4)
-        for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3',
-                               'o1 -> o2', 'initial database creation')):
-            eq(entry[0]['description'], entry[1])
-
-        self._storage.pack(packtime, referencesf)
-
-        log = self._storage.undoLog()
-        for entry in zip(log, ('o1 -> o3', 'o1 -> o2 -> o3')):
-            eq(entry[0]['description'], entry[1])
-
-        tid = log[0]['id']
-        db.undo(tid)
-        txn = transaction.get()
-        txn.note('undo')
-        txn.commit()
-        # undo does a txn-undo, but doesn't invalidate
-        conn.sync()
-
-        log = self._storage.undoLog()
-        for entry in zip(log, ('undo', 'o1 -> o3', 'o1 -> o2 -> o3')):
-            eq(entry[0]['description'], entry[1])
-
-        eq(o1.obj, o2)
-        eq(o1.obj.obj, o3)
-        self._iterate()
-
-    def checkPackAfterUndoDeletion(self):
-        db = DB(self._storage)
-        cn = db.open()
-        root = cn.root()
-
-        pack_times = []
-        def set_pack_time():
-            pack_times.append(time.time())
-            snooze()
-
-        root["key0"] = MinPO(0)
-        root["key1"] = MinPO(1)
-        root["key2"] = MinPO(2)
-        txn = transaction.get()
-        txn.note("create 3 keys")
-        txn.commit()
-
-        set_pack_time()
-
-        del root["key1"]
-        txn = transaction.get()
-        txn.note("delete 1 key")
-        txn.commit()
-
-        set_pack_time()
-
-        root._p_deactivate()
-        cn.sync()
-        self.assert_(listeq(root.keys(), ["key0", "key2"]))
-
-        L = db.undoInfo()
-        db.undo(L[0]["id"])
-        txn = transaction.get()
-        txn.note("undo deletion")
-        txn.commit()
-
-        set_pack_time()
-
-        root._p_deactivate()
-        cn.sync()
-        self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))
-
-        for t in pack_times:
-            self._storage.pack(t, referencesf)
-
-            root._p_deactivate()
-            cn.sync()
-            self.assert_(listeq(root.keys(), ["key0", "key1", "key2"]))
-            for i in range(3):
-                obj = root["key%d" % i]
-                self.assertEqual(obj.value, i)
-            root.items()
-            self._inter_pack_pause()
-
-    def checkPackAfterUndoManyTimes(self):
-        db = DB(self._storage)
-        cn = db.open()
-        rt = cn.root()
-
-        rt["test"] = MinPO(1)
-        transaction.commit()
-        rt["test2"] = MinPO(2)
-        transaction.commit()
-        rt["test"] = MinPO(3)
-        txn = transaction.get()
-        txn.note("root of undo")
-        txn.commit()
-
-        packtimes = []
-        for i in range(10):
-            L = db.undoInfo()
-            db.undo(L[0]["id"])
-            txn = transaction.get()
-            txn.note("undo %d" % i)
-            txn.commit()
-            rt._p_deactivate()
-            cn.sync()
-
-            self.assertEqual(rt["test"].value, i % 2 and 3 or 1)
-            self.assertEqual(rt["test2"].value, 2)
-
-            packtimes.append(time.time())
-            snooze()
-
-        for t in packtimes:
-            self._storage.pack(t, referencesf)
-            cn.sync()
-
-            # TODO:  Is _cache supposed to have a clear() method, or not?
-            # cn._cache.clear()
-
-            # The last undo set the value to 3 and pack should
-            # never change that.
-            self.assertEqual(rt["test"].value, 3)
-            self.assertEqual(rt["test2"].value, 2)
-            self._inter_pack_pause()
-
-    def _inter_pack_pause(self):
-        # DirectoryStorage needs a pause between packs,
-        # most other storages dont.
-        pass
-
-    def checkTransactionalUndoIterator(self):
-        # check that data_txn set in iterator makes sense
-        if not hasattr(self._storage, "iterator"):
-            return
-
-        s = self._storage
-
-        BATCHES = 4
-        OBJECTS = 4
-
-        orig = []
-        for i in range(BATCHES):
-            t = Transaction()
-            tid = p64(i + 1)
-            s.tpc_begin(t, tid)
-            for j in range(OBJECTS):
-                oid = s.new_oid()
-                obj = MinPO(i * OBJECTS + j)
-                revid = s.store(oid, None, zodb_pickle(obj), '', t)
-                orig.append((tid, oid, revid))
-            s.tpc_vote(t)
-            s.tpc_finish(t)
-
-        i = 0
-        for tid, oid, revid in orig:
-            self._dostore(oid, revid=revid, data=MinPO(revid),
-                          description="update %s" % i)
-
-        # Undo the OBJECTS transactions that modified objects created
-        # in the ith original transaction.
-
-        def undo(i):
-            info = s.undoInfo()
-            t = Transaction()
-            s.tpc_begin(t)
-            base = i * OBJECTS + i
-            for j in range(OBJECTS):
-                tid = info[base + j]['id']
-                s.undo(tid, t)
-            s.tpc_vote(t)
-            s.tpc_finish(t)
-
-        for i in range(BATCHES):
-            undo(i)
-
-        # There are now (2 + OBJECTS) * BATCHES transactions:
-        #     BATCHES original transactions, followed by
-        #     OBJECTS * BATCHES modifications, followed by
-        #     BATCHES undos
-
-        iter = s.iterator()
-        offset = 0
-
-        eq = self.assertEqual
-
-        for i in range(BATCHES):
-            txn = iter[offset]
-            offset += 1
-
-            tid = p64(i + 1)
-            eq(txn.tid, tid)
-
-            L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
-            L2 = [(oid, revid, None) for _tid, oid, revid in orig
-                  if _tid == tid]
-
-            eq(L1, L2)
-
-        for i in range(BATCHES * OBJECTS):
-            txn = iter[offset]
-            offset += 1
-            eq(len([rec for rec in txn if rec.data_txn is None]), 1)
-
-        for i in range(BATCHES):
-            txn = iter[offset]
-            offset += 1
-
-            # The undos are performed in reverse order.
-            otid = p64(BATCHES - i)
-            L1 = [(rec.oid, rec.data_txn) for rec in txn]
-            L2 = [(oid, otid) for _tid, oid, revid in orig
-                  if _tid == otid]
-            L1.sort()
-            L2.sort()
-            eq(L1, L2)
-
-        self.assertRaises(IndexError, iter.__getitem__, offset)
-
-    def checkUndoLogMetadata(self):
-        # test that the metadata is correct in the undo log
-        t = transaction.get()
-        t.note('t1')
-        t.setExtendedInfo('k2','this is transaction metadata')
-        t.setUser('u3',path='p3')
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-        o1 = C()
-        root['obj'] = o1
-        txn = transaction.get()
-        txn.commit()
-        l = self._storage.undoLog()
-        self.assertEqual(len(l),2)
-        d = l[0]
-        self.assertEqual(d['description'],'t1')
-        self.assertEqual(d['k2'],'this is transaction metadata')
-        self.assertEqual(d['user_name'],'p3 u3')
diff --git a/branches/bug1734/src/ZODB/tests/TransactionalUndoVersionStorage.py b/branches/bug1734/src/ZODB/tests/TransactionalUndoVersionStorage.py
deleted file mode 100644
index 3efd054e..00000000
--- a/branches/bug1734/src/ZODB/tests/TransactionalUndoVersionStorage.py
+++ /dev/null
@@ -1,198 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-# Check interactions between undo() and versions.  Any storage that
-# supports both undo() and versions must pass these tests.
-
-import time
-
-import transaction
-
-from ZODB.serialize import referencesf
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle
-
-
-class TransactionalUndoVersionStorage:
-
-    def _x_dostore(self, *args, **kwargs):
-        # ugh: backwards compatibilty for ZEO 1.0 which runs these
-        # tests but has a _dostore() method that does not support the
-        # description kwarg.
-        try:
-            return self._dostore(*args, **kwargs)
-        except TypeError:
-            # assume that the type error means we've got a _dostore()
-            # without the description kwarg
-            try:
-                del kwargs['description']
-            except KeyError:
-                pass # not expected
-        return self._dostore(*args, **kwargs)
-
-    def checkUndoInVersion(self):
-        eq = self.assertEqual
-        unless = self.failUnless
-
-        def check_objects(nonversiondata, versiondata):
-            data, revid = self._storage.load(oid, version)
-            self.assertEqual(zodb_unpickle(data), MinPO(versiondata))
-            data, revid = self._storage.load(oid, '')
-            self.assertEqual(zodb_unpickle(data), MinPO(nonversiondata))
-
-        oid = self._storage.new_oid()
-        version = 'one'
-        revid_a = self._dostore(oid, data=MinPO(91))
-        revid_b = self._dostore(oid, revid=revid_a, data=MinPO(92),
-                                version=version)
-        revid_c = self._dostore(oid, revid=revid_b, data=MinPO(93),
-                                version=version)
-
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-
-        data, revid = self._storage.load(oid, '')
-##        eq(revid, revid_a)
-        eq(zodb_unpickle(data), MinPO(91))
-        data, revid = self._storage.load(oid, version)
-        unless(revid > revid_b and revid > revid_c)
-        eq(zodb_unpickle(data), MinPO(92))
-
-        # Now commit the version...
-        oids = self._commitVersion(version, "")
-        eq(len(oids), 1)
-        eq(oids[0], oid)
-
-        check_objects(92, 92)
-
-        # ...and undo the commit
-        info = self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-
-        check_objects(91, 92)
-
-        oids = self._abortVersion(version)
-        assert len(oids) == 1
-        assert oids[0] == oid
-
-        check_objects(91, 91)
-
-        # Now undo the abort
-        info=self._storage.undoInfo()
-        self._undo(info[0]['id'], [oid])
-
-        check_objects(91, 92)
-
-    def checkUndoCommitVersion(self):
-        def load_value(oid, version=''):
-            data, revid = self._storage.load(oid, version)
-            return zodb_unpickle(data).value
-
-        # create a bunch of packable transactions
-        oid = self._storage.new_oid()
-        revid = '\000' * 8
-        for i in range(4):
-            revid = self._x_dostore(oid, revid, description='packable%d' % i)
-        pt = time.time()
-        time.sleep(1)
-
-        oid1 = self._storage.new_oid()
-        version = 'version'
-        revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
-        revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
-                                 version=version, description='version1')
-        self._x_dostore(oid1, data=MinPO(2), revid=revid2,
-                        version=version, description='version2')
-        self._x_dostore(description='create2')
-
-        t = transaction.Transaction()
-        t.description = 'commit version'
-        self._storage.tpc_begin(t)
-        self._storage.commitVersion(version, '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-
-        info = self._storage.undoInfo()
-        t_id = info[0]['id']
-
-        self.assertEqual(load_value(oid1), 2)
-        self.assertEqual(load_value(oid1, version), 2)
-
-        self._storage.pack(pt, referencesf)
-
-        self._undo(t_id, note="undo commit version")
-
-        self.assertEqual(load_value(oid1), 0)
-        self.assertEqual(load_value(oid1, version), 2)
-
-        data, tid, ver = self._storage.loadEx(oid1, "")
-        # After undoing the version commit, the non-version data
-        # once again becomes the non-version data from 'create1'.
-        self.assertEqual(tid, self._storage.lastTransaction())
-        self.assertEqual(ver, "")
-
-        # The current version data comes from an undo record, which
-        # means that it gets data via the backpointer but tid from the
-        # current txn.
-        data, tid, ver = self._storage.loadEx(oid1, version)
-        self.assertEqual(ver, version)
-        self.assertEqual(tid, self._storage.lastTransaction())
-
-    def checkUndoAbortVersion(self):
-        def load_value(oid, version=''):
-            data, revid = self._storage.load(oid, version)
-            return zodb_unpickle(data).value
-
-        # create a bunch of packable transactions
-        oid = self._storage.new_oid()
-        revid = '\000' * 8
-        for i in range(3):
-            revid = self._x_dostore(oid, revid, description='packable%d' % i)
-        pt = time.time()
-        time.sleep(1)
-
-        oid1 = self._storage.new_oid()
-        version = 'version'
-        revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
-        revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
-                                 version=version, description='version1')
-        self._x_dostore(oid1, data=MinPO(2), revid=revid2,
-                        version=version, description='version2')
-        self._x_dostore(description='create2')
-
-        self._abortVersion(version)
-
-        info = self._storage.undoInfo()
-        t_id = info[0]['id']
-
-        self.assertEqual(load_value(oid1), 0)
-        # after abort, we should see non-version data
-        self.assertEqual(load_value(oid1, version), 0)
-
-        self._undo(t_id, note="undo abort version")
-
-        self.assertEqual(load_value(oid1), 0)
-        # t undo will re-create the version
-        self.assertEqual(load_value(oid1, version), 2)
-
-        info = self._storage.undoInfo()
-        t_id = info[0]['id']
-
-        self._storage.pack(pt, referencesf)
-
-        self._undo(t_id, note="undo undo")
-
-        # undo of undo will put as back where we started
-        self.assertEqual(load_value(oid1), 0)
-        # after abort, we should see non-version data
-        self.assertEqual(load_value(oid1, version), 0)
diff --git a/branches/bug1734/src/ZODB/tests/VersionStorage.py b/branches/bug1734/src/ZODB/tests/VersionStorage.py
deleted file mode 100644
index 5cc24f38..00000000
--- a/branches/bug1734/src/ZODB/tests/VersionStorage.py
+++ /dev/null
@@ -1,545 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Run the version related tests for a storage.
-
-Any storage that supports versions should be able to pass all these tests.
-"""
-
-import time
-
-import transaction
-from transaction import Transaction
-
-from ZODB import POSException
-from ZODB.serialize import referencesf
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle, snooze
-from ZODB import DB
-
-class VersionStorage:
-
-    def checkCommitVersionSerialno(self):
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(12))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
-                               version="version")
-        oids = self._commitVersion("version", "")
-        self.assertEqual([oid], oids)
-        data, revid3 = self._storage.load(oid, "")
-        # use repr() to avoid getting binary data in a traceback on error
-        self.assertNotEqual(`revid1`, `revid3`)
-        self.assertNotEqual(`revid2`, `revid3`)
-
-    def checkAbortVersionSerialno(self):
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=MinPO(12))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
-                               version="version")
-        data, tid, ver = self._storage.loadEx(oid, "version")
-        self.assertEqual(revid2, tid)
-        self.assertEqual(zodb_unpickle(data), MinPO(13))
-        oids = self._abortVersion("version")
-        self.assertEqual([oid], oids)
-        data, revid3 = self._storage.load(oid, "")
-        # use repr() to avoid getting binary data in a traceback on error
-        self.assertNotEqual(revid1, revid3)
-        self.assertNotEqual(revid2, revid3)
-        data, tid, ver = self._storage.loadEx(oid, "")
-        self.assertEqual(revid3, tid)
-        self.assertEqual(zodb_unpickle(data), MinPO(12))
-        self.assertEqual(tid, self._storage.lastTransaction())
-
-    def checkVersionedStoreAndLoad(self):
-        eq = self.assertEqual
-        # Store a couple of non-version revisions of the object
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(11))
-        revid1 = self._dostore(oid, revid=revid, data=MinPO(12))
-        # And now store some new revisions in a version
-        version = 'test-version'
-        revid = self._dostore(oid, revid=revid1, data=MinPO(13),
-                              version=version)
-        revid = self._dostore(oid, revid=revid, data=MinPO(14),
-                              version=version)
-        revid2 = self._dostore(oid, revid=revid, data=MinPO(15),
-                              version=version)
-        # Now read back the object in both the non-version and version and
-        # make sure the values jive.
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(12))
-        data, vrevid = self._storage.load(oid, version)
-        eq(zodb_unpickle(data), MinPO(15))
-        if hasattr(self._storage, 'getSerial'):
-            s = self._storage.getSerial(oid)
-            eq(s, max(revid, vrevid))
-        data, tid, ver = self._storage.loadEx(oid, version)
-        eq(zodb_unpickle(data), MinPO(15))
-        eq(tid, revid2)
-        data, tid, ver = self._storage.loadEx(oid, "other version")
-        eq(zodb_unpickle(data), MinPO(12))
-        eq(tid, revid2)
-        # loadSerial returns non-version data
-        try:
-            data = self._storage.loadSerial(oid, revid)
-            eq(zodb_unpickle(data), MinPO(12))
-            data = self._storage.loadSerial(oid, revid2)
-            eq(zodb_unpickle(data), MinPO(12))
-        except POSException.Unsupported:
-            pass
-
-    def checkVersionedLoadErrors(self):
-        oid = self._storage.new_oid()
-        version = 'test-version'
-        revid = self._dostore(oid, data=MinPO(11))
-        revid = self._dostore(oid, revid=revid, data=MinPO(12),
-                              version=version)
-        # Try to load a bogus oid
-        self.assertRaises(KeyError,
-                          self._storage.load,
-                          self._storage.new_oid(), '')
-        data, revid = self._storage.load(oid, 'bogus')
-        self.assertEqual(zodb_unpickle(data), MinPO(11))
-
-
-    def checkVersionLock(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(11))
-        version = 'test-version'
-        revid = self._dostore(oid, revid=revid, data=MinPO(12),
-                              version=version)
-        self.assertRaises(POSException.VersionLockError,
-                          self._dostore,
-                          oid, revid=revid, data=MinPO(14),
-                          version='another-version')
-
-    def checkVersionEmpty(self):
-        # Before we store anything, these versions ought to be empty
-        version = 'test-version'
-        self.failUnless(self._storage.versionEmpty(version))
-        # Now store some objects
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(11))
-        revid = self._dostore(oid, revid=revid, data=MinPO(12))
-        revid = self._dostore(oid, revid=revid, data=MinPO(13),
-                              version=version)
-        revid = self._dostore(oid, revid=revid, data=MinPO(14),
-                              version=version)
-        # The blank version should not be empty
-        # Neither should 'test-version'
-        self.failUnless(not self._storage.versionEmpty(version))
-        # But this non-existant version should be empty
-        self.failUnless(self._storage.versionEmpty('bogus'))
-
-    def checkVersions(self):
-        unless = self.failUnless
-        # Store some objects in the non-version
-        oid1 = self._storage.new_oid()
-        oid2 = self._storage.new_oid()
-        oid3 = self._storage.new_oid()
-        revid1 = self._dostore(oid1, data=MinPO(11))
-        revid2 = self._dostore(oid2, data=MinPO(12))
-        revid3 = self._dostore(oid3, data=MinPO(13))
-        # Now create some new versions
-        revid1 = self._dostore(oid1, revid=revid1, data=MinPO(14),
-                               version='one')
-        revid2 = self._dostore(oid2, revid=revid2, data=MinPO(15),
-                               version='two')
-        revid3 = self._dostore(oid3, revid=revid3, data=MinPO(16),
-                               version='three')
-        # Ask for the versions
-        versions = self._storage.versions()
-        unless('one' in versions)
-        unless('two' in versions)
-        unless('three' in versions)
-        # Now flex the `max' argument
-        versions = self._storage.versions(1)
-        self.assertEqual(len(versions), 1)
-        unless('one' in versions or 'two' in versions or 'three' in versions)
-
-    def _setup_version(self, version='test-version'):
-        # Store some revisions in the non-version
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(49))
-        revid = self._dostore(oid, revid=revid, data=MinPO(50))
-        revid = self._dostore(oid, revid=revid, data=MinPO(51))
-        # Now do some stores in a version
-        revid = self._dostore(oid, revid=revid, data=MinPO(52),
-                              version=version)
-        revid = self._dostore(oid, revid=revid, data=MinPO(53),
-                              version=version)
-        revid = self._dostore(oid, revid=revid, data=MinPO(54),
-                              version=version)
-        return oid, version
-
-    def checkAbortVersion(self):
-        eq = self.assertEqual
-        oid, version = self._setup_version()
-
-        # Not sure I can write a test for getSerial() in the
-        # presence of aborted versions, because FileStorage and
-        # Berkeley storage give a different answer. I think Berkeley
-        # is right and FS is wrong.
-
-        oids = self._abortVersion(version)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-    def checkAbortVersionNonCurrent(self):
-        # Make sure the non-current serial number is correctly
-        # after a version is aborted.
-        oid, version = self._setup_version()
-        self._abortVersion(version)
-        data, tid, ver = self._storage.loadEx(oid, "")
-        # write a new revision of oid so that the aborted-version txn
-        # is not current
-        self._dostore(oid, revid=tid, data=MinPO(17))
-        ltid = self._storage.lastTransaction()
-        ncdata, ncstart, end = self._storage.loadBefore(oid, ltid)
-        self.assertEqual(data, ncdata)
-        self.assertEqual(tid, ncstart)
-
-    def checkAbortVersionErrors(self):
-        eq = self.assertEqual
-        oid, version = self._setup_version()
-        # Now abort a bogus version
-        t = Transaction()
-        self._storage.tpc_begin(t)
-
-        # And try to abort the empty version
-        if (hasattr(self._storage, 'supportsTransactionalUndo') and
-                self._storage.supportsTransactionalUndo()):
-            self.assertRaises(POSException.VersionError,
-                              self._storage.abortVersion,
-                              '', t)
-
-        # But now we really try to abort the version
-        tid, oids = self._storage.abortVersion(version, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-    def checkCommitVersionErrors(self):
-        if not (hasattr(self._storage, 'supportsTransactionalUndo') and
-                self._storage.supportsTransactionalUndo()):
-            return
-        eq = self.assertEqual
-        oid1, version1 = self._setup_version('one')
-        data, revid1 = self._storage.load(oid1, version1)
-        eq(zodb_unpickle(data), MinPO(54))
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        try:
-            self.assertRaises(POSException.VersionCommitError,
-                              self._storage.commitVersion,
-                              'one', 'one', t)
-        finally:
-            self._storage.tpc_abort(t)
-
-    def checkNewSerialOnCommitVersionToVersion(self):
-        oid, version = self._setup_version()
-        data, vtid = self._storage.load(oid, version)
-        data, ntid = self._storage.load(oid, '')
-
-        version2 = 'test version 2'
-        self._commitVersion(version, version2)
-        data, tid = self._storage.load(oid, version2)
-
-        self.failUnless(tid != vtid and tid != ntid,
-                        "New tid, %r, should be different from the old "
-                        "version, %r, and non-version, %r, tids."
-                        % (tid, vtid, ntid))
-
-    def checkModifyAfterAbortVersion(self):
-        eq = self.assertEqual
-        oid, version = self._setup_version()
-        self._abortVersion(version)
-        data, revid = self._storage.load(oid, '')
-        # And modify it a few times
-        revid = self._dostore(oid, revid=revid, data=MinPO(52))
-        revid = self._dostore(oid, revid=revid, data=MinPO(53))
-        revid = self._dostore(oid, revid=revid, data=MinPO(54))
-        data, newrevid = self._storage.load(oid, '')
-        eq(newrevid, revid)
-        eq(zodb_unpickle(data), MinPO(54))
-
-    def checkCommitToNonVersion(self):
-        eq = self.assertEqual
-        oid, version = self._setup_version()
-        data, revid = self._storage.load(oid, version)
-        eq(zodb_unpickle(data), MinPO(54))
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(51))
-        self._commitVersion(version, '')
-        data, revid = self._storage.load(oid, '')
-        eq(zodb_unpickle(data), MinPO(54))
-
-    def checkCommitToOtherVersion(self):
-        eq = self.assertEqual
-        oid1, version1 = self._setup_version('one')
-
-        data, revid1 = self._storage.load(oid1, version1)
-        eq(zodb_unpickle(data), MinPO(54))
-        oid2, version2 = self._setup_version('two')
-        data, revid2 = self._storage.load(oid2, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-
-        # make sure we see the non-version data when appropriate
-        data, revid2 = self._storage.load(oid1, version2)
-        eq(zodb_unpickle(data), MinPO(51))
-        data, revid2 = self._storage.load(oid2, version1)
-        eq(zodb_unpickle(data), MinPO(51))
-        data, revid2 = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-        # Okay, now let's commit object1 to version2
-        oids = self._commitVersion(version1, version2)
-        eq(len(oids), 1)
-        eq(oids[0], oid1)
-        data, revid = self._storage.load(oid1, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-        data, revid = self._storage.load(oid2, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-
-        # an object can only exist in one version, so a load from
-        # version1 should now give the non-version data
-        data, revid2 = self._storage.load(oid1, version1)
-        eq(zodb_unpickle(data), MinPO(51))
-
-        # as should a version that has never been used
-        data, revid2 = self._storage.load(oid1, 'bela lugosi')
-        eq(zodb_unpickle(data), MinPO(51))
-
-    def checkAbortOneVersionCommitTheOther(self):
-        eq = self.assertEqual
-        oid1, version1 = self._setup_version('one')
-        data, revid1 = self._storage.load(oid1, version1)
-        eq(zodb_unpickle(data), MinPO(54))
-        oid2, version2 = self._setup_version('two')
-        data, revid2 = self._storage.load(oid2, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-
-        # Let's make sure we can't get object1 in version2
-        data, revid2 = self._storage.load(oid1, version2)
-        eq(zodb_unpickle(data), MinPO(51))
-
-        oids = self._abortVersion(version1)
-        eq(len(oids), 1)
-        eq(oids[0], oid1)
-        data, revid = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-        data, revid = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(51))
-        data, revid = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-        data, revid = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(51))
-        data, revid = self._storage.load(oid2, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-        # Okay, now let's commit version2 back to the trunk
-        oids = self._commitVersion(version2, '')
-        eq(len(oids), 1)
-        eq(oids[0], oid2)
-        data, revid = self._storage.load(oid1, '')
-        eq(zodb_unpickle(data), MinPO(51))
-
-        # But the trunk should be up to date now
-        data, revid = self._storage.load(oid2, '')
-        eq(zodb_unpickle(data), MinPO(54))
-        data, revid = self._storage.load(oid2, version2)
-        eq(zodb_unpickle(data), MinPO(54))
-
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, revid=revid, data=MinPO(54), version='one')
-        self.assertRaises(KeyError,
-                          self._storage.load, oid, '')
-        self.assertRaises(KeyError,
-                          self._storage.load, oid, 'two')
-
-    def checkCreateObjectInVersionWithAbort(self):
-        oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=21, version="one")
-        revid = self._dostore(oid, revid=revid, data=23, version='one')
-        revid = self._dostore(oid, revid=revid, data=34, version='one')
-        # Now abort the version and the creation
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        tid, oids = self._storage.abortVersion('one', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        self.assertEqual(oids, [oid])
-
-    def checkPackVersions(self):
-        db = DB(self._storage)
-        cn = db.open(version="testversion")
-        root = cn.root()
-
-        obj = root["obj"] = MinPO("obj")
-        root["obj2"] = MinPO("obj2")
-        txn = transaction.get()
-        txn.note("create 2 objs in version")
-        txn.commit()
-
-        obj.value = "77"
-        txn = transaction.get()
-        txn.note("modify obj in version")
-        txn.commit()
-
-        # undo the modification to generate a mix of backpointers
-        # and versions for pack to chase
-        info = db.undoInfo()
-        db.undo(info[0]["id"])
-        txn = transaction.get()
-        txn.note("undo modification")
-        txn.commit()
-
-        snooze()
-        self._storage.pack(time.time(), referencesf)
-
-        db.commitVersion("testversion")
-        txn = transaction.get()
-        txn.note("commit version")
-        txn.commit()
-
-        cn = db.open()
-        root = cn.root()
-        root["obj"] = "no version"
-
-        txn = transaction.get()
-        txn.note("modify obj")
-        txn.commit()
-
-        self._storage.pack(time.time(), referencesf)
-
-    def checkPackVersionsInPast(self):
-        db = DB(self._storage)
-        cn = db.open(version="testversion")
-        root = cn.root()
-
-        obj = root["obj"] = MinPO("obj")
-        root["obj2"] = MinPO("obj2")
-        txn = transaction.get()
-        txn.note("create 2 objs in version")
-        txn.commit()
-
-        obj.value = "77"
-        txn = transaction.get()
-        txn.note("modify obj in version")
-        txn.commit()
-
-        t0 = time.time()
-        snooze()
-
-        # undo the modification to generate a mix of backpointers
-        # and versions for pack to chase
-        info = db.undoInfo()
-        db.undo(info[0]["id"])
-        txn = transaction.get()
-        txn.note("undo modification")
-        txn.commit()
-
-        self._storage.pack(t0, referencesf)
-
-        db.commitVersion("testversion")
-        txn = transaction.get()
-        txn.note("commit version")
-        txn.commit()
-
-        cn = db.open()
-        root = cn.root()
-        root["obj"] = "no version"
-
-        txn = transaction.get()
-        txn.note("modify obj")
-        txn.commit()
-
-        self._storage.pack(time.time(), referencesf)
-
-    def checkPackVersionReachable(self):
-        db = DB(self._storage)
-        cn = db.open()
-        root = cn.root()
-
-        names = "a", "b", "c"
-
-        for name in names:
-            root[name] = MinPO(name)
-            transaction.commit()
-
-        for name in names:
-            cn2 = db.open(version=name)
-            rt2 = cn2.root()
-            obj = rt2[name]
-            obj.value = MinPO("version")
-            transaction.commit()
-            cn2.close()
-
-        root["d"] = MinPO("d")
-        transaction.commit()
-        snooze()
-
-        self._storage.pack(time.time(), referencesf)
-        cn.sync()
-
-        # make sure all the non-version data is there
-        for name, obj in root.items():
-            self.assertEqual(name, obj.value)
-
-        # make sure all the version-data is there,
-        # and create a new revision in the version
-        for name in names:
-            cn2 = db.open(version=name)
-            rt2 = cn2.root()
-            obj = rt2[name].value
-            self.assertEqual(obj.value, "version")
-            obj.value = "still version"
-            transaction.commit()
-            cn2.close()
-
-        db.abortVersion("b")
-        txn = transaction.get()
-        txn.note("abort version b")
-        txn.commit()
-
-        t = time.time()
-        snooze()
-
-        L = db.undoInfo()
-        db.undo(L[0]["id"])
-        txn = transaction.get()
-        txn.note("undo abort")
-        txn.commit()
-
-        self._storage.pack(t, referencesf)
-
-        cn2 = db.open(version="b")
-        rt2 = cn2.root()
-        self.assertEqual(rt2["b"].value.value, "still version")
-
-    def checkLoadBeforeVersion(self):
-        eq = self.assertEqual
-        oid = self._storage.new_oid()
-        revid1 = self._dostore(oid, data=1)
-        revid2 = self._dostore(oid, data=2, revid=revid1, version="kobe")
-        revid3 = self._dostore(oid, data=3, revid=revid2, version="kobe")
-        data, start_tid, end_tid = self._storage.loadBefore(oid, revid3)
-        eq(zodb_unpickle(data), MinPO(1))
-        eq(start_tid, revid1)
-        eq(end_tid, None)
diff --git a/branches/bug1734/src/ZODB/tests/__init__.py b/branches/bug1734/src/ZODB/tests/__init__.py
deleted file mode 100644
index 669dd672..00000000
--- a/branches/bug1734/src/ZODB/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Having this makes debugging better.
diff --git a/branches/bug1734/src/ZODB/tests/dangle.py b/branches/bug1734/src/ZODB/tests/dangle.py
deleted file mode 100755
index 252a3703..00000000
--- a/branches/bug1734/src/ZODB/tests/dangle.py
+++ /dev/null
@@ -1,65 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Functional test to produce a dangling reference."""
-
-import time
-
-import transaction
-from ZODB.FileStorage import FileStorage
-from ZODB import DB
-
-from persistent import Persistent
-
-class P(Persistent):
-    pass
-
-def create_dangling_ref(db):
-    rt = db.open().root()
-
-    rt[1] = o1 = P()
-    transaction.get().note("create o1")
-    transaction.commit()
-
-    rt[2] = o2 = P()
-    transaction.get().note("create o2")
-    transaction.commit()
-
-    c = o1.child = P()
-    transaction.get().note("set child on o1")
-    transaction.commit()
-
-    o1.child = P()
-    transaction.get().note("replace child on o1")
-    transaction.commit()
-
-    time.sleep(2)
-    # The pack should remove the reference to c, because it is no
-    # longer referenced from o1.  But the object still exists and has
-    # an oid, so a new commit of it won't create a new object.
-    db.pack()
-
-    print repr(c._p_oid)
-    o2.child = c
-    transaction.get().note("set child on o2")
-    transaction.commit()
-
-def main():
-    fs = FileStorage("dangle.fs")
-    db = DB(fs)
-    create_dangling_ref(db)
-    db.close()
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/ZODB/tests/dbopen.txt b/branches/bug1734/src/ZODB/tests/dbopen.txt
deleted file mode 100644
index d6cc4874..00000000
--- a/branches/bug1734/src/ZODB/tests/dbopen.txt
+++ /dev/null
@@ -1,278 +0,0 @@
-=====================
-Connection Management
-=====================
-
-
-Here we exercise the connection management done by the DB class.
-
-    >>> from ZODB import DB
-    >>> from ZODB.MappingStorage import MappingStorage as Storage
-
-Capturing log messages from DB is important for some of the examples:
-
-    >>> from zope.testing.loggingsupport import InstalledHandler
-    >>> handler = InstalledHandler('ZODB.DB')
-
-Create a storage, and wrap it in a DB wrapper:
-
-    >>> st = Storage()
-    >>> db = DB(st)
-
-By default, we can open 7 connections without any log messages:
-
-    >>> conns = [db.open() for dummy in range(7)]
-    >>> handler.records
-    []
-
-Open one more, and we get a warning:
-
-    >>> conns.append(db.open())
-    >>> len(handler.records)
-    1
-    >>> msg = handler.records[0]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB WARNING DB.open() has 8 open connections with a pool_size of 7
-
-Open 6 more, and we get 6 more warnings:
-
-    >>> conns.extend([db.open() for dummy in range(6)])
-    >>> len(conns)
-    14
-    >>> len(handler.records)
-    7
-    >>> msg = handler.records[-1]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB WARNING DB.open() has 14 open connections with a pool_size of 7
-
-Add another, so that it's more than twice the default, and the level
-rises to critical:
-
-    >>> conns.append(db.open())
-    >>> len(conns)
-    15
-    >>> len(handler.records)
-    8
-    >>> msg = handler.records[-1]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB CRITICAL DB.open() has 15 open connections with a pool_size of 7
-
-While it's boring, it's important to verify that the same relationships
-hold if the default pool size is overridden.
-
-    >>> handler.clear()
-    >>> st.close()
-    >>> st = Storage()
-    >>> PS = 2 # smaller pool size
-    >>> db = DB(st, pool_size=PS)
-    >>> conns = [db.open() for dummy in range(PS)]
-    >>> handler.records
-    []
-
-A warning for opening one more:
-
-    >>> conns.append(db.open())
-    >>> len(handler.records)
-    1
-    >>> msg = handler.records[0]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB WARNING DB.open() has 3 open connections with a pool_size of 2
-
-More warnings through 4 connections:
-
-    >>> conns.extend([db.open() for dummy in range(PS-1)])
-    >>> len(conns)
-    4
-    >>> len(handler.records)
-    2
-    >>> msg = handler.records[-1]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
-
-And critical for going beyond that:
-
-    >>> conns.append(db.open())
-    >>> len(conns)
-    5
-    >>> len(handler.records)
-    3
-    >>> msg = handler.records[-1]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB CRITICAL DB.open() has 5 open connections with a pool_size of 2
-
-We can change the pool size on the fly:
-
-    >>> handler.clear()
-    >>> db.setPoolSize(6)
-    >>> conns.append(db.open())
-    >>> handler.records  # no log msg -- the pool is bigger now
-    []
-    >>> conns.append(db.open()) # but one more and there's a warning again
-    >>> len(handler.records)
-    1
-    >>> msg = handler.records[0]
-    >>> print msg.name, msg.levelname, msg.getMessage()
-    ZODB.DB WARNING DB.open() has 7 open connections with a pool_size of 6
-
-Enough of that.
-
-    >>> handler.clear()
-    >>> st.close()
-
-More interesting is the stack-like nature of connection reuse.  So long as
-we keep opening new connections, and keep them alive, all connections
-returned are distinct:
-
-    >>> st = Storage()
-    >>> db = DB(st)
-    >>> c1 = db.open()
-    >>> c2 = db.open()
-    >>> c3 = db.open()
-    >>> c1 is c2 or c1 is c3 or c2 is c3
-    False
-
-Let's put some markers on the connections, so we can identify these
-specific objects later:
-
-    >>> c1.MARKER = 'c1'
-    >>> c2.MARKER = 'c2'
-    >>> c3.MARKER = 'c3'
-
-Now explicitly close c1 and c2:
-
-    >>> c1.close()
-    >>> c2.close()
-
-Reaching into the internals, we can see that db's connection pool now has
-two connections available for reuse, and knows about three connections in
-all:
-
-    >>> pool = db._pools['']
-    >>> len(pool.available)
-    2
-    >>> len(pool.all)
-    3
-
-Since we closed c2 last, it's at the top of the available stack, so will
-be reused by the next open():
-
-    >>> c1 = db.open()
-    >>> c1.MARKER
-    'c2'
-    >>> len(pool.available), len(pool.all)
-    (1, 3)
-
-    >>> c3.close()  # now the stack has c3 on top, then c1
-    >>> c2 = db.open()
-    >>> c2.MARKER
-    'c3'
-    >>> len(pool.available), len(pool.all)
-    (1, 3)
-    >>> c3 = db.open()
-    >>> c3.MARKER
-    'c1'
-    >>> len(pool.available), len(pool.all)
-    (0, 3)
-
-What about the 3 in pool.all?  We've seen that closing connections doesn't
-reduce pool.all, and it would be bad if DB kept connections alive forever.
-
-In fact pool.all is a "weak set" of connections -- it holds weak references
-to connections.  That alone doesn't keep connection objects alive.  The
-weak set allows DB's statistics methods to return info about connections
-that are still alive.
-
-
-    >>> len(db.cacheDetailSize())  # one result for each connection's cache
-    3
-
-If a connection object is abandoned (it becomes unreachable), then it
-will vanish from pool.all automatically.  However, connections are
-involved in cycles, so exactly when a connection vanishes from pool.all
-isn't predictable.  It can be forced by running gc.collect():
-
-    >>> import gc
-    >>> dummy = gc.collect()
-    >>> len(pool.all)
-    3
-    >>> c3 = None
-    >>> dummy = gc.collect()  # removes c3 from pool.all
-    >>> len(pool.all)
-    2
-
-Note that c3 is really gone; in particular it didn't get added back to
-the stack of available connections by magic:
-
-    >>> len(pool.available)
-    0
-
-Nothing in that last block should have logged any msgs:
-
-    >>> handler.records
-    []
-
-If "too many" connections are open, then closing one may kick an older
-closed one out of the available connection stack.
-
-    >>> st.close()
-    >>> st = Storage()
-    >>> db = DB(st, pool_size=3)
-    >>> conns = [db.open() for dummy in range(6)]
-    >>> len(handler.records)  # 3 warnings for the "excess" connections
-    3
-    >>> pool = db._pools['']
-    >>> len(pool.available), len(pool.all)
-    (0, 6)
-
-Let's mark them:
-
-    >>> for i, c in enumerate(conns):
-    ...     c.MARKER = i
-
-Closing connections adds them to the stack:
-
-    >>> for i in range(3):
-    ...     conns[i].close()
-    >>> len(pool.available), len(pool.all)
-    (3, 6)
-    >>> del conns[:3]  # leave the ones with MARKERs 3, 4 and 5
-
-Closing another one will purge the one with MARKER 0 from the stack
-(since it was the first added to the stack):
-
-    >>> [c.MARKER for c in pool.available]
-    [0, 1, 2]
-    >>> conns[0].close()  # MARKER 3
-    >>> len(pool.available), len(pool.all)
-    (3, 5)
-    >>> [c.MARKER for c in pool.available]
-    [1, 2, 3]
-
-Similarly for the other two:
-
-    >>> conns[1].close(); conns[2].close()
-    >>> len(pool.available), len(pool.all)
-    (3, 3)
-    >>> [c.MARKER for c in pool.available]
-    [3, 4, 5]
-
-Reducing the pool size may also purge the oldest closed connections:
-
-    >>> db.setPoolSize(2)  # gets rid of MARKER 3
-    >>> len(pool.available), len(pool.all)
-    (2, 2)
-    >>> [c.MARKER for c in pool.available]
-    [4, 5]
-
-Since MARKER 5 is still the last one added to the stack, it will be the
-first popped:
-
-    >>> c1 = db.open(); c2 = db.open()
-    >>> c1.MARKER, c2.MARKER
-    (5, 4)
-    >>> len(pool.available), len(pool.all)
-    (0, 2)
-
-Clean up.
-
-    >>> st.close()
-    >>> handler.uninstall()
diff --git a/branches/bug1734/src/ZODB/tests/multidb.txt b/branches/bug1734/src/ZODB/tests/multidb.txt
deleted file mode 100644
index 5394decf..00000000
--- a/branches/bug1734/src/ZODB/tests/multidb.txt
+++ /dev/null
@@ -1,146 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2005 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-Multi-database tests
-====================
-
-Multi-database support adds the ability to tie multiple databases into a
-collection.  The original proposal is in the fishbowl:
-
-    http://www.zope.org/Wikis/ZODB/MultiDatabases/
-
-It was implemented during the PyCon 2005 sprints, but in a simpler form,
-by Jim Fulton, Christian Theune, and Tim Peters.  Overview:
-
-No private attributes were added, and one new method was introduced.
-
-DB:
-
-- a new .database_name attribute holds the name of this database
-
-- a new .databases attribute maps from database name to DB object; all DBs
-  in a multi-database collection share the same .databases object
-
-- the DB constructor has new optional arguments with the same names
-  (database_name= and databases=).
-
-Connection:
-
-- a new .connections attribute maps from database name to a Connection for
-  the database with that name; the .connections mapping object is also
-  shared among databases in a collection
-
-- a new .get_connection(database_name) method returns a Connection for a
-  database in the collection; if a connection is already open, it's returned
-  (this is the value .connections[database_name]), else a new connection is
-  opened (and stored as .connections[database_name])
-
-
-Creating a multi-database starts with creating a named DB:
-
-    >>> from ZODB.tests.test_storage import MinimalMemoryStorage
-    >>> from ZODB import DB
-    >>> dbmap = {}
-    >>> db = DB(MinimalMemoryStorage(), database_name='root', databases=dbmap)
-
-The database name is accessible afterwards and in a newly created collection:
-
-    >>> db.database_name
-    'root'
-    >>> db.databases        # doctest: +ELLIPSIS
-    {'root': <ZODB.DB.DB object at ...>}
-    >>> db.databases is dbmap
-    True
-
-Adding another database to the collection works like this:
-
-    >>> db2 = DB(MinimalMemoryStorage(),
-    ...     database_name='notroot',
-    ...     databases=dbmap)
-
-The new db2 now shares the 'databases' dictionary with db and has two entries:
-
-    >>> db2.databases is db.databases is dbmap
-    True
-    >>> len(db2.databases)
-    2
-    >>> names = dbmap.keys(); names.sort(); print names
-    ['notroot', 'root']
-
-It's an error to try to insert a database with a name already in use:
-
-    >>> db3 = DB(MinimalMemoryStorage(),
-    ...     database_name='root',
-    ...     databases=dbmap)
-    Traceback (most recent call last):
-        ...
-    ValueError: database_name 'root' already in databases
-
-Because that failed, db.databases wasn't changed:
-
-    >>> len(db.databases)  # still 2
-    2
-
-You can (still) get a connection to a database this way:
-
-    >>> cn = db.open()
-    >>> cn                  # doctest: +ELLIPSIS
-    <Connection at ...>
-
-This is the only connection in this collection right now:
-
-    >>> cn.connections      # doctest: +ELLIPSIS
-    {'root': <Connection at ...>}
-
-Getting a connection to a different database from an existing connection in the
-same database collection (this enables 'connection binding' within a given
-thread/transaction/context ...):
-
-    >>> cn2 = cn.get_connection('notroot')
-    >>> cn2                  # doctest: +ELLIPSIS
-    <Connection at ...>
-
-Now there are two connections in that collection:
-
-    >>> cn2.connections is cn.connections
-    True
-    >>> len(cn2.connections)
-    2
-    >>> names = cn.connections.keys(); names.sort(); print names
-    ['notroot', 'root']
-
-So long as this database group remains open, the same Connection objects
-are returned:
-
-    >>> cn.get_connection('root') is cn
-    True
-    >>> cn.get_connection('notroot') is cn2
-    True
-    >>> cn2.get_connection('root') is cn
-    True
-    >>> cn2.get_connection('notroot') is cn2
-    True
-
-Of course trying to get a connection for a database not in the group raises
-an exception:
-
-    >>> cn.get_connection('no way')
-    Traceback (most recent call last):
-      ...
-    KeyError: 'no way'
-
-Clean up:
-
-    >>> for a_db in dbmap.values():
-    ...     a_db.close()
diff --git a/branches/bug1734/src/ZODB/tests/sampledm.py b/branches/bug1734/src/ZODB/tests/sampledm.py
deleted file mode 100644
index af874cb7..00000000
--- a/branches/bug1734/src/ZODB/tests/sampledm.py
+++ /dev/null
@@ -1,412 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Sample objects for use in tests
-
-$Id$
-"""
-
-class DataManager(object):
-    """Sample data manager
-
-       This class provides a trivial data-manager implementation and doc
-       strings to illustrate the the protocol and to provide a tool for
-       writing tests.
-
-       Our sample data manager has state that is updated through an inc
-       method and through transaction operations.
-
-       When we create a sample data manager:
-
-       >>> dm = DataManager()
-
-       It has two bits of state, state:
-
-       >>> dm.state
-       0
-
-       and delta:
-
-       >>> dm.delta
-       0
-
-       Both of which are initialized to 0.  state is meant to model
-       committed state, while delta represents tentative changes within a
-       transaction.  We change the state by calling inc:
-
-       >>> dm.inc()
-
-       which updates delta:
-
-       >>> dm.delta
-       1
-
-       but state isn't changed until we commit the transaction:
-
-       >>> dm.state
-       0
-
-       To commit the changes, we use 2-phase commit. We execute the first
-       stage by calling prepare.  We need to pass a transation. Our
-       sample data managers don't really use the transactions for much,
-       so we'll be lazy and use strings for transactions:
-
-       >>> t1 = '1'
-       >>> dm.prepare(t1)
-
-       The sample data manager updates the state when we call prepare:
-
-       >>> dm.state
-       1
-       >>> dm.delta
-       1
-
-       This is mainly so we can detect some affect of calling the methods.
-
-       Now if we call commit:
-
-       >>> dm.commit(t1)
-
-       Our changes are"permanent".  The state reflects the changes and the
-       delta has been reset to 0.
-
-       >>> dm.state
-       1
-       >>> dm.delta
-       0
-       """
-
-    def __init__(self):
-        self.state = 0
-        self.sp = 0
-        self.transaction = None
-        self.delta = 0
-        self.prepared = False
-
-    def inc(self, n=1):
-        self.delta += n
-
-    def prepare(self, transaction):
-        """Prepare to commit data
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> t1 = '1'
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-        >>> dm.state
-        1
-        >>> dm.inc()
-        >>> t2 = '2'
-        >>> dm.prepare(t2)
-        >>> dm.abort(t2)
-        >>> dm.state
-        1
-
-        It is en error to call prepare more than once without an intervening
-        commit or abort:
-
-        >>> dm.prepare(t1)
-
-        >>> dm.prepare(t1)
-        Traceback (most recent call last):
-        ...
-        TypeError: Already prepared
-
-        >>> dm.prepare(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: Already prepared
-
-        >>> dm.abort(t1)
-
-        If there was a preceeding savepoint, the transaction must match:
-
-        >>> rollback = dm.savepoint(t1)
-        >>> dm.prepare(t2)
-        Traceback (most recent call last):
-        ,,,
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.prepare(t1)
-
-        """
-        if self.prepared:
-            raise TypeError('Already prepared')
-        self._checkTransaction(transaction)
-        self.prepared = True
-        self.transaction = transaction
-        self.state += self.delta
-
-    def _checkTransaction(self, transaction):
-        if (transaction is not self.transaction
-            and self.transaction is not None):
-            raise TypeError("Transaction missmatch",
-                            transaction, self.transaction)
-
-    def abort(self, transaction):
-        """Abort a transaction
-
-        The abort method can be called before two-phase commit to
-        throw away work done in the transaction:
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> t1 = '1'
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        The abort method also throws away work done in savepoints:
-
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (0, 2)
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        If savepoints are used, abort must be passed the same
-        transaction:
-
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> t2 = '2'
-        >>> dm.abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.abort(t1)
-
-        The abort method is also used to abort a two-phase commit:
-
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.prepare(t1)
-        >>> dm.state, dm.delta
-        (1, 1)
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        Of course, the transactions passed to prepare and abort must
-        match:
-
-        >>> dm.prepare(t1)
-        >>> dm.abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.abort(t1)
-
-
-        """
-        self._checkTransaction(transaction)
-        if self.transaction is not None:
-            self.transaction = None
-
-        if self.prepared:
-            self.state -= self.delta
-            self.prepared = False
-
-        self.delta = 0
-
-    def commit(self, transaction):
-        """Complete two-phase commit
-
-        >>> dm = DataManager()
-        >>> dm.state
-        0
-        >>> dm.inc()
-
-        We start two-phase commit by calling prepare:
-
-        >>> t1 = '1'
-        >>> dm.prepare(t1)
-
-        We complete it by calling commit:
-
-        >>> dm.commit(t1)
-        >>> dm.state
-        1
-
-        It is an error ro call commit without calling prepare first:
-
-        >>> dm.inc()
-        >>> t2 = '2'
-        >>> dm.commit(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: Not prepared to commit
-
-        >>> dm.prepare(t2)
-        >>> dm.commit(t2)
-
-        If course, the transactions given to prepare and commit must
-        be the same:
-
-        >>> dm.inc()
-        >>> t3 = '3'
-        >>> dm.prepare(t3)
-        >>> dm.commit(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '3')
-
-        """
-        if not self.prepared:
-            raise TypeError('Not prepared to commit')
-        self._checkTransaction(transaction)
-        self.delta = 0
-        self.transaction = None
-        self.prepared = False
-
-    def savepoint(self, transaction):
-        """Provide the ability to rollback transaction state
-
-        Savepoints provide a way to:
-
-        - Save partial transaction work. For some data managers, this
-          could allow resources to be used more efficiently.
-
-        - Provide the ability to revert state to a point in a
-          transaction without aborting the entire transaction.  In
-          other words, savepoints support partial aborts.
-
-        Savepoints don't use two-phase commit. If there are errors in
-        setting or rolling back to savepoints, the application should
-        abort the containing transaction.  This is *not* the
-        responsibility of the data manager.
-
-        Savepoints are always associated with a transaction. Any work
-        done in a savepoint's transaction is tentative until the
-        transaction is committed using two-phase commit.
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> t1 = '1'
-        >>> r = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 2)
-        >>> r.rollback()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        Savepoints must have the same transaction:
-
-        >>> r1 = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (1, 0)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 1)
-        >>> t2 = '2'
-        >>> r2 = dm.savepoint(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> r2 = dm.savepoint(t1)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 2)
-
-        If we rollback to an earlier savepoint, we discard all work
-        done later:
-
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        and we can no longer rollback to the later savepoint:
-
-        >>> r2.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Attempt to roll back to invalid save point', 3, 2)
-
-        We can roll back to a savepoint as often as we like:
-
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        >>> dm.inc()
-        >>> dm.inc()
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 3)
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        But we can't rollback to a savepoint after it has been
-        committed:
-
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-
-        >>> r1.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: Attempt to rollback stale rollback
-
-        """
-        if self.prepared:
-            raise TypeError("Can't get savepoint during two-phase commit")
-        self._checkTransaction(transaction)
-        self.transaction = transaction
-        self.sp += 1
-        return Rollback(self)
-
-class Rollback(object):
-
-    def __init__(self, dm):
-        self.dm = dm
-        self.sp = dm.sp
-        self.delta = dm.delta
-        self.transaction = dm.transaction
-
-    def rollback(self):
-        if self.transaction is not self.dm.transaction:
-            raise TypeError("Attempt to rollback stale rollback")
-        if self.dm.sp < self.sp:
-            raise TypeError("Attempt to roll back to invalid save point",
-                            self.sp, self.dm.sp)
-        self.dm.sp = self.sp
-        self.dm.delta = self.delta
-
-
-def test_suite():
-    from doctest import DocTestSuite
-    return DocTestSuite()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/ZODB/tests/speed.py b/branches/bug1734/src/ZODB/tests/speed.py
deleted file mode 100644
index 14ce828d..00000000
--- a/branches/bug1734/src/ZODB/tests/speed.py
+++ /dev/null
@@ -1,125 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-usage="""Test speed of a ZODB storage
-
-Options:
-
-    -d file    The data file to use as input.
-               The default is this script.
-
-    -n n       The number of repititions
-
-    -s module  A module that defines a 'Storage'
-               attribute, which is an open storage.
-               If not specified, a FileStorage will ne
-               used.
-
-    -z         Test compressing data
-
-    -D         Run in debug mode
-
-    -L         Test loads as well as stores by minimizing
-               the cache after eachrun
-
-    -M         Output means only
-"""
-
-import sys, os, getopt, string, time
-sys.path.insert(0, os.getcwd())
-
-import ZODB, ZODB.FileStorage
-import persistent
-import transaction
-
-class P(persistent.Persistent): pass
-
-def main(args):
-
-    opts, args = getopt.getopt(args, 'zd:n:Ds:LM')
-    z=s=None
-    data=sys.argv[0]
-    nrep=5
-    minimize=0
-    detailed=1
-    for o, v in opts:
-        if o=='-n': nrep=string.atoi(v)
-        elif o=='-d': data=v
-        elif o=='-s': s=v
-        elif o=='-z':
-            global zlib
-            import zlib
-            z=compress
-        elif o=='-L':
-            minimize=1
-        elif o=='-M':
-            detailed=0
-        elif o=='-D':
-            global debug
-            os.environ['STUPID_LOG_FILE']=''
-            os.environ['STUPID_LOG_SEVERITY']='-999'
-
-    if s:
-        s=__import__(s, globals(), globals(), ('__doc__',))
-        s=s.Storage
-    else:
-        s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
-
-    data=open(data).read()
-    db=ZODB.DB(s,
-               # disable cache deactivation
-               cache_size=4000,
-               cache_deactivate_after=6000,)
-
-    results={1:0, 10:0, 100:0, 1000:0}
-    for j in range(nrep):
-        for r in 1, 10, 100, 1000:
-            t=time.time()
-            jar=db.open()
-            transaction.begin()
-            rt=jar.root()
-            key='s%s' % r
-            if rt.has_key(key): p=rt[key]
-            else: rt[key]=p=P()
-            for i in range(r):
-                if z is not None: d=z(data)
-                else: d=data
-                v=getattr(p, str(i), P())
-                v.d=d
-                setattr(p,str(i),v)
-            transaction.commit()
-            jar.close()
-            t=time.time()-t
-            if detailed:
-                sys.stderr.write("%s\t%s\t%.4f\n" % (j, r, t))
-                sys.stdout.flush()
-            results[r]=results[r]+t
-            rt=d=p=v=None # release all references
-            if minimize:
-                time.sleep(3)
-                jar.cacheMinimize(3)
-
-    if detailed: print '-'*24
-    for r in 1, 10, 100, 1000:
-        t=results[r]/nrep
-        sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
-
-    db.close()
-
-
-def compress(s):
-    c=zlib.compressobj()
-    o=c.compress(s)
-    return o+c.flush()
-
-if __name__=='__main__': main(sys.argv[1:])
diff --git a/branches/bug1734/src/ZODB/tests/testActivityMonitor.py b/branches/bug1734/src/ZODB/tests/testActivityMonitor.py
deleted file mode 100644
index 671565d3..00000000
--- a/branches/bug1734/src/ZODB/tests/testActivityMonitor.py
+++ /dev/null
@@ -1,107 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the default activity monitor.
-
-See ZODB/ActivityMonitor.py
-
-$Id$
-"""
-
-import unittest
-import time
-
-from ZODB.ActivityMonitor import ActivityMonitor
-
-
-class FakeConnection:
-
-    loads = 0
-    stores = 0
-
-    def _transferred(self, loads, stores):
-        self.loads = self.loads + loads
-        self.stores = self.stores + stores
-
-    def getTransferCounts(self, clear=0):
-        res = self.loads, self.stores
-        if clear:
-            self.loads = self.stores = 0
-        return res
-
-
-class Tests(unittest.TestCase):
-
-    def testAddLogEntries(self):
-        am = ActivityMonitor(history_length=3600)
-        self.assertEqual(len(am.log), 0)
-        c = FakeConnection()
-        c._transferred(1, 2)
-        am.closedConnection(c)
-        c._transferred(3, 7)
-        am.closedConnection(c)
-        self.assertEqual(len(am.log), 2)
-
-    def testTrim(self):
-        am = ActivityMonitor(history_length=0.1)
-        c = FakeConnection()
-        c._transferred(1, 2)
-        am.closedConnection(c)
-        time.sleep(0.2)
-        c._transferred(3, 7)
-        am.closedConnection(c)
-        self.assert_(len(am.log) <= 1)
-
-    def testSetHistoryLength(self):
-        am = ActivityMonitor(history_length=3600)
-        c = FakeConnection()
-        c._transferred(1, 2)
-        am.closedConnection(c)
-        time.sleep(0.2)
-        c._transferred(3, 7)
-        am.closedConnection(c)
-        self.assertEqual(len(am.log), 2)
-        am.setHistoryLength(0.1)
-        self.assertEqual(am.getHistoryLength(), 0.1)
-        self.assert_(len(am.log) <= 1)
-
-    def testActivityAnalysis(self):
-        am = ActivityMonitor(history_length=3600)
-        c = FakeConnection()
-        c._transferred(1, 2)
-        am.closedConnection(c)
-        c._transferred(3, 7)
-        am.closedConnection(c)
-        res = am.getActivityAnalysis(start=0, end=0, divisions=10)
-        lastend = 0
-        for n in range(9):
-            div = res[n]
-            self.assertEqual(div['stores'], 0)
-            self.assertEqual(div['loads'], 0)
-            self.assert_(div['start'] > 0)
-            self.assert_(div['start'] >= lastend)
-            self.assert_(div['start'] < div['end'])
-            lastend = div['end']
-        div = res[9]
-        self.assertEqual(div['stores'], 9)
-        self.assertEqual(div['loads'], 4)
-        self.assert_(div['start'] > 0)
-        self.assert_(div['start'] >= lastend)
-        self.assert_(div['start'] < div['end'])
-
-
-def test_suite():
-    return unittest.makeSuite(Tests)
-
-if __name__=='__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZODB/tests/testBroken.py b/branches/bug1734/src/ZODB/tests/testBroken.py
deleted file mode 100644
index 6666e169..00000000
--- a/branches/bug1734/src/ZODB/tests/testBroken.py
+++ /dev/null
@@ -1,91 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test broken-object suppport
-
-$Id$
-"""
-
-import sys
-import unittest
-import persistent
-import transaction
-from doctest import DocTestSuite
-from ZODB.tests.util import DB
-
-def test_integration():
-    """Test the integration of broken object support with the databse:
-
-    >>> db = DB()
-
-    We'll create a fake module with a class:
-
-    >>> class NotThere:
-    ...     Atall = type('Atall', (persistent.Persistent, ),
-    ...                  {'__module__': 'ZODB.not.there'})
-
-    And stuff this into sys.modules to simulate a regular module:
-
-    >>> sys.modules['ZODB.not.there'] = NotThere
-    >>> sys.modules['ZODB.not'] = NotThere
-
-    Now, we'll create and save an instance, and make sure we can
-    load it in another connection:
-
-    >>> a = NotThere.Atall()
-    >>> a.x = 1
-    >>> conn1 = db.open()
-    >>> conn1.root()['a'] = a
-    >>> transaction.commit()
-
-    >>> conn2 = db.open()
-    >>> a2 = conn2.root()['a']
-    >>> a2.__class__ is a.__class__
-    True
-    >>> a2.x
-    1
-
-    Now, we'll uninstall the module, simulating having the module
-    go away:
-
-    >>> del sys.modules['ZODB.not.there']
-
-    and we'll try to load the object in another connection:
-
-    >>> conn3 = db.open()
-    >>> a3 = conn3.root()['a']
-    >>> a3
-    <persistent broken ZODB.not.there.Atall instance """ \
-       r"""'\x00\x00\x00\x00\x00\x00\x00\x01'>
-
-    >>> a3.__Broken_state__
-    {'x': 1}
-
-    Let's clean up:
-
-    >>> db.close()
-    >>> del sys.modules['ZODB.not']
-
-    Cleanup:
-
-    >>> import ZODB.broken
-    >>> ZODB.broken.broken_cache.clear()
-    """
-
-def test_suite():
-    return unittest.TestSuite((
-        DocTestSuite('ZODB.broken'),
-        DocTestSuite(),
-        ))
-
-if __name__ == '__main__': unittest.main()
diff --git a/branches/bug1734/src/ZODB/tests/testCache.py b/branches/bug1734/src/ZODB/tests/testCache.py
deleted file mode 100644
index 4ddc5409..00000000
--- a/branches/bug1734/src/ZODB/tests/testCache.py
+++ /dev/null
@@ -1,426 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""A few simple tests of the public cache API.
-
-Each DB Connection has a separate PickleCache.  The Cache serves two
-purposes. It acts like a memo for unpickling.  It also keeps recent
-objects in memory under the assumption that they may be used again.
-"""
-
-import gc
-import time
-import unittest
-import threading
-
-from persistent.cPickleCache import PickleCache
-from persistent.mapping import PersistentMapping
-import transaction
-import ZODB
-import ZODB.MappingStorage
-from ZODB.tests.MinPO import MinPO
-from ZODB.utils import p64
-
-from persistent import Persistent
-
-class CacheTestBase(unittest.TestCase):
-
-    def setUp(self):
-        store = ZODB.MappingStorage.MappingStorage()
-        self.db = ZODB.DB(store,
-                          cache_size = self.CACHE_SIZE)
-        self.conns = []
-
-    def tearDown(self):
-        for conn in self.conns:
-            conn.close()
-        self.db.close()
-
-    CACHE_SIZE = 20
-
-    def noodle_new_connection(self):
-        """Do some reads and writes on a new connection."""
-
-        c = self.db.open()
-        self.conns.append(c)
-        self.noodle_connection(c)
-
-    def noodle_connection(self, c):
-        r = c.root()
-
-        i = len(self.conns)
-        d = r.get(i)
-        if d is None:
-            d = r[i] = PersistentMapping()
-            transaction.commit()
-
-        for i in range(15):
-            o = d.get(i)
-            if o is None:
-                o = d[i] = MinPO(i)
-            o.value += 1
-        transaction.commit()
-
-
-
-# CantGetRidOfMe is used by checkMinimizeTerminates.
-make_trouble = True
-class CantGetRidOfMe(MinPO):
-    def __init__(self, value):
-        MinPO.__init__(self, value)
-        self.an_attribute = 42
-
-    def __del__(self):
-        # Referencing an attribute of self causes self to be
-        # loaded into the cache again, which also resurrects
-        # self.
-        if make_trouble:
-            self.an_attribute
-
-class DBMethods(CacheTestBase):
-
-    __super_setUp = CacheTestBase.setUp
-
-    def setUp(self):
-        self.__super_setUp()
-        for i in range(4):
-            self.noodle_new_connection()
-
-    def checkCacheDetail(self):
-        for name, count in self.db.cacheDetail():
-            self.assert_(isinstance(name, str))
-            self.assert_(isinstance(count, int))
-
-    def checkCacheExtremeDetail(self):
-        expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
-        for dict in self.db.cacheExtremeDetail():
-            for k, v in dict.items():
-                self.assert_(k in expected)
-
-    # TODO:  not really sure how to do a black box test of the cache.
-    # Should the full sweep and minimize calls always remove things?
-
-    def checkFullSweep(self):
-        old_size = self.db.cacheSize()
-        self.db.cacheFullSweep()
-        new_size = self.db.cacheSize()
-        self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
-
-    def checkMinimize(self):
-        old_size = self.db.cacheSize()
-        self.db.cacheMinimize()
-        new_size = self.db.cacheSize()
-        self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
-
-    def checkMinimizeTerminates(self):
-        # This is tricky.  cPickleCache had a case where it could get into
-        # an infinite loop, but we don't want the test suite to hang
-        # if this bug reappears.  So this test spawns a thread to run the
-        # dangerous operation, and the main thread complains if the worker
-        # thread hasn't finished in 30 seconds (arbitrary, but way more
-        # than enough).  In that case, the worker thread will continue
-        # running forever (until killed externally), but at least the
-        # test suite will move on.
-        #
-        # The bug was triggered by having a persistent object whose __del__
-        # method references an attribute of the object.  An attempt to
-        # ghostify such an object will clear the attribute, and if the
-        # cache also releases the last Python reference to the object then
-        # (due to ghostifying it), the __del__ method gets invoked.
-        # Referencing the attribute loads the object again, and also
-        # puts it back into the cPickleCache.  If the cache implementation
-        # isn't looking out for this, it can get into an infinite loop
-        # then, endlessly trying to ghostify an object that in turn keeps
-        # unghostifying itself again.
-        class Worker(threading.Thread):
-
-            def __init__(self, testcase):
-                threading.Thread.__init__(self)
-                self.testcase = testcase
-
-            def run(self):
-                global make_trouble
-                # Make CantGetRidOfMe.__del__ dangerous.
-                make_trouble = True
-
-                conn = self.testcase.conns[0]
-                r = conn.root()
-                d = r[1]
-                for i in range(len(d)):
-                    d[i] = CantGetRidOfMe(i)
-                transaction.commit()
-
-                self.testcase.db.cacheMinimize()
-
-                # Defang the nasty objects.  Else, because they're
-                # immortal now, they hang around and create trouble
-                # for subsequent tests.
-                make_trouble = False
-                self.testcase.db.cacheMinimize()
-
-        w = Worker(self)
-        w.start()
-        w.join(30)
-        if w.isAlive():
-            self.fail("cacheMinimize still running after 30 seconds -- "
-                      "almost certainly in an infinite loop")
-
-    # TODO:  don't have an explicit test for incrgc, because the
-    # connection and database call it internally.
-    # Same for the get and invalidate methods.
-
-    def checkLRUitems(self):
-        # get a cache
-        c = self.conns[0]._cache
-        c.lru_items()
-
-    def checkClassItems(self):
-        c = self.conns[0]._cache
-        c.klass_items()
-
-class LRUCacheTests(CacheTestBase):
-
-    def checkLRU(self):
-        # verify the LRU behavior of the cache
-        dataset_size = 5
-        CACHE_SIZE = dataset_size*2+1
-        # a cache big enough to hold the objects added in two
-        # transactions, plus the root object
-        self.db.setCacheSize(CACHE_SIZE)
-        c = self.db.open()
-        r = c.root()
-        l = {}
-        # the root is the only thing in the cache, because all the
-        # other objects are new
-        self.assertEqual(len(c._cache), 1)
-        # run several transactions
-        for t in range(5):
-            for i in range(dataset_size):
-                l[(t,i)] = r[i] = MinPO(i)
-            transaction.commit()
-            # commit() will register the objects, placing them in the
-            # cache.  at the end of commit, the cache will be reduced
-            # down to CACHE_SIZE items
-            if len(l)>CACHE_SIZE:
-                self.assertEqual(c._cache.ringlen(), CACHE_SIZE)
-        for i in range(dataset_size):
-            # Check objects added in the first two transactions.
-            # They must all be ghostified.
-            self.assertEqual(l[(0,i)]._p_changed, None)
-            self.assertEqual(l[(1,i)]._p_changed, None)
-            # Check objects added in the last two transactions.
-            # They must all still exist in memory, but have
-            # had their changes flushed
-            self.assertEqual(l[(3,i)]._p_changed, 0)
-            self.assertEqual(l[(4,i)]._p_changed, 0)
-            # Of the objects added in the middle transaction, most
-            # will have been ghostified. There is one cache slot
-            # that may be occupied by either one of those objects or
-            # the root, depending on precise order of access. We do
-            # not bother to check this
-
-    def checkSize(self):
-        self.assertEqual(self.db.cacheSize(), 0)
-        self.assertEqual(self.db.cacheDetailSize(), [])
-
-        CACHE_SIZE = 10
-        self.db.setCacheSize(CACHE_SIZE)
-
-        CONNS = 3
-        for i in range(CONNS):
-            self.noodle_new_connection()
-
-        self.assertEquals(self.db.cacheSize(), CACHE_SIZE * CONNS)
-        details = self.db.cacheDetailSize()
-        self.assertEquals(len(details), CONNS)
-        for d in details:
-            self.assertEquals(d['ngsize'], CACHE_SIZE)
-
-            # The assertion below is non-sensical
-            # The (poorly named) cache size is a target for non-ghosts.
-            # The cache *usually* contains non-ghosts, so that the
-            # size normally exceeds the target size.
-
-            #self.assertEquals(d['size'], CACHE_SIZE)
-
-    def checkDetail(self):
-        CACHE_SIZE = 10
-        self.db.setCacheSize(CACHE_SIZE)
-
-        CONNS = 3
-        for i in range(CONNS):
-            self.noodle_new_connection()
-
-        gc.collect()
-
-        # Obscure:  The above gc.collect call is necessary to make this test
-        # pass.
-        #
-        # This test then only works because the order of computations
-        # and object accesses in the "noodle" calls is such that the
-        # persistent mapping containing the MinPO objects is
-        # deactivated before the MinPO objects.
-        #
-        # - Without the gc call, the cache will contain ghost MinPOs
-        #   and the check of the MinPO count below will fail. That's
-        #   because the counts returned by cacheDetail include ghosts.
-        #
-        # - If the mapping object containing the MinPOs isn't
-        #   deactivated, there will be one fewer non-ghost MinPO and
-        #   the test will fail anyway.
-        #
-        # This test really needs to be thought through and documented
-        # better.
-
-
-        for klass, count in self.db.cacheDetail():
-            if klass.endswith('MinPO'):
-                self.assertEqual(count, CONNS * CACHE_SIZE)
-            if klass.endswith('PersistentMapping'):
-                # one root per connection
-                self.assertEqual(count, CONNS)
-
-        for details in self.db.cacheExtremeDetail():
-            # one 'details' dict per object
-            if details['klass'].endswith('PersistentMapping'):
-                self.assertEqual(details['state'], None)
-            else:
-                self.assert_(details['klass'].endswith('MinPO'))
-                self.assertEqual(details['state'], 0)
-            # The cache should never hold an unreferenced ghost.
-            if details['state'] is None:    # i.e., it's a ghost
-                self.assert_(details['rc'] > 0)
-
-class StubDataManager:
-    def setklassstate(self, object):
-        pass
-
-class StubObject(Persistent):
-    pass
-
-class CacheErrors(unittest.TestCase):
-
-    def setUp(self):
-        self.jar = StubDataManager()
-        self.cache = PickleCache(self.jar)
-
-    def checkGetBogusKey(self):
-        self.assertEqual(self.cache.get(p64(0)), None)
-        try:
-            self.cache[12]
-        except KeyError:
-            pass
-        else:
-            self.fail("expected KeyError")
-        try:
-            self.cache[12] = 12
-        except TypeError:
-            pass
-        else:
-            self.fail("expected TyepError")
-        try:
-            del self.cache[12]
-        except TypeError:
-            pass
-        else:
-            self.fail("expected TypeError")
-
-    def checkBogusObject(self):
-        def add(key, obj):
-            self.cache[key] = obj
-
-        key = p64(2)
-        # value isn't persistent
-        self.assertRaises(TypeError, add, key, 12)
-
-        o = StubObject()
-        # o._p_oid == None
-        self.assertRaises(TypeError, add, key, o)
-
-        o._p_oid = p64(3)
-        self.assertRaises(ValueError, add, key, o)
-
-        o._p_oid = key
-        # o._p_jar == None
-        self.assertRaises(Exception, add, key, o)
-
-        o._p_jar = self.jar
-        self.cache[key] = o
-        # make sure it can be added multiple times
-        self.cache[key] = o
-
-        # same object, different keys
-        self.assertRaises(ValueError, add, p64(0), o)
-
-    def checkTwoCaches(self):
-        jar2 = StubDataManager()
-        cache2 = PickleCache(jar2)
-
-        o = StubObject()
-        key = o._p_oid = p64(1)
-        o._p_jar = jar2
-
-        cache2[key] = o
-
-        try:
-            self.cache[key] = o
-        except ValueError:
-            pass
-        else:
-            self.fail("expected ValueError because object already in cache")
-
-    def checkReadOnlyAttrsWhenCached(self):
-        o = StubObject()
-        key = o._p_oid = p64(1)
-        o._p_jar = self.jar
-        self.cache[key] = o
-        try:
-            o._p_oid = p64(2)
-        except ValueError:
-            pass
-        else:
-            self.fail("expect that you can't change oid of cached object")
-        try:
-            del o._p_jar
-        except ValueError:
-            pass
-        else:
-            self.fail("expect that you can't delete jar of cached object")
-
-    def checkTwoObjsSameOid(self):
-        # Try to add two distinct objects with the same oid to the cache.
-        # This has always been an error, but the error message prior to
-        # ZODB 3.2.6 didn't make sense.  This test verifies that (a) an
-        # exception is raised; and, (b) the error message is the intended
-        # one.
-        obj1 = StubObject()
-        key = obj1._p_oid = p64(1)
-        obj1._p_jar = self.jar
-        self.cache[key] = obj1
-
-        obj2 = StubObject()
-        obj2._p_oid = key
-        obj2._p_jar = self.jar
-        try:
-            self.cache[key] = obj2
-        except ValueError, detail:
-            self.assertEqual(str(detail),
-                             "A different object already has the same oid")
-        else:
-            self.fail("two objects with the same oid should have failed")
-
-def test_suite():
-    s = unittest.makeSuite(DBMethods, 'check')
-    s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
-    s.addTest(unittest.makeSuite(CacheErrors, 'check'))
-    return s
diff --git a/branches/bug1734/src/ZODB/tests/testConfig.py b/branches/bug1734/src/ZODB/tests/testConfig.py
deleted file mode 100644
index 3f135596..00000000
--- a/branches/bug1734/src/ZODB/tests/testConfig.py
+++ /dev/null
@@ -1,124 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-import tempfile
-import unittest
-
-import transaction
-import ZODB.config
-from ZODB.POSException import ReadOnlyError
-
-
-class ConfigTestBase(unittest.TestCase):
-    def _opendb(self, s):
-        return ZODB.config.databaseFromString(s)
-
-    def tearDown(self):
-        if getattr(self, "storage", None) is not None:
-            self.storage.cleanup()
-
-    def _test(self, s):
-        db = self._opendb(s)
-        self.storage = db._storage
-        # Do something with the database to make sure it works
-        cn = db.open()
-        rt = cn.root()
-        rt["test"] = 1
-        transaction.commit()
-        db.close()
-
-
-class ZODBConfigTest(ConfigTestBase):
-    def test_map_config1(self):
-        self._test(
-            """
-            <zodb>
-              <mappingstorage/>
-            </zodb>
-            """)
-
-    def test_map_config2(self):
-        self._test(
-            """
-            <zodb>
-              <mappingstorage/>
-              cache-size 1000
-            </zodb>
-            """)
-
-    def test_file_config1(self):
-        path = tempfile.mktemp()
-        self._test(
-            """
-            <zodb>
-              <filestorage>
-                path %s
-              </filestorage>
-            </zodb>
-            """ % path)
-
-    def test_file_config2(self):
-        path = tempfile.mktemp()
-        cfg = """
-        <zodb>
-          <filestorage>
-            path %s
-            create false
-            read-only true
-          </filestorage>
-        </zodb>
-        """ % path
-        self.assertRaises(ReadOnlyError, self._test, cfg)
-
-    def test_demo_config(self):
-        cfg = """
-        <zodb unused-name>
-          <demostorage>
-            name foo
-            <mappingstorage/>
-          </demostorage>
-        </zodb>
-        """
-        self._test(cfg)
-
-
-class ZEOConfigTest(ConfigTestBase):
-    def test_zeo_config(self):
-        # We're looking for a port that doesn't exist so a
-        # connection attempt will fail.  Instead of elaborate
-        # logic to loop over a port calculation, we'll just pick a
-        # simple "random", likely to not-exist port number and add
-        # an elaborate comment explaining this instead.  Go ahead,
-        # grep for 9.
-        from ZEO.ClientStorage import ClientDisconnected
-        cfg = """
-        <zodb>
-          <zeoclient>
-            server localhost:56897
-            wait false
-          </zeoclient>
-        </zodb>
-        """
-        self.assertRaises(ClientDisconnected, self._test, cfg)
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(ZODBConfigTest))
-    suite.addTest(unittest.makeSuite(ZEOConfigTest))
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/ZODB/tests/testConnection.py b/branches/bug1734/src/ZODB/tests/testConnection.py
deleted file mode 100644
index 0a392303..00000000
--- a/branches/bug1734/src/ZODB/tests/testConnection.py
+++ /dev/null
@@ -1,660 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Unit tests for the Connection class."""
-
-import doctest
-import unittest
-import warnings
-
-from persistent import Persistent
-import transaction
-from ZODB.config import databaseFromString
-from ZODB.utils import p64, u64
-from ZODB.tests.warnhook import WarningsHook
-from zope.interface.verify import verifyObject
-
-class ConnectionDotAdd(unittest.TestCase):
-
-    def setUp(self):
-        from ZODB.Connection import Connection
-        self.datamgr = Connection()
-        self.db = StubDatabase()
-        self.datamgr._setDB(self.db)
-        self.transaction = StubTransaction()
-
-    def tearDown(self):
-        transaction.abort()
-
-    def check_add(self):
-        from ZODB.POSException import InvalidObjectReference
-        obj = StubObject()
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.datamgr.add(obj)
-        self.assert_(obj._p_oid is not None)
-        self.assert_(obj._p_jar is self.datamgr)
-        self.assert_(self.datamgr.get(obj._p_oid) is obj)
-
-        # Only first-class persistent objects may be added.
-        self.assertRaises(TypeError, self.datamgr.add, object())
-
-        # Adding to the same connection does not fail. Object keeps the
-        # same oid.
-        oid = obj._p_oid
-        self.datamgr.add(obj)
-        self.assertEqual(obj._p_oid, oid)
-
-        # Cannot add an object from a different connection.
-        obj2 = StubObject()
-        obj2._p_jar = object()
-        self.assertRaises(InvalidObjectReference, self.datamgr.add, obj2)
-
-    def checkResetOnAbort(self):
-        # Check that _p_oid and _p_jar are reset when a transaction is
-        # aborted.
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-
-    def checkResetOnTpcAbort(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-
-        # Simulate an error while committing some other object.
-
-        self.datamgr.tpc_begin(self.transaction)
-        # Let's pretend something bad happens here.
-        # Call tpc_abort, clearing everything.
-        self.datamgr.tpc_abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-
-    def checkTpcAbortAfterCommit(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        # Let's pretend something bad happened here.
-        self.datamgr.tpc_abort(self.transaction)
-        self.assert_(obj._p_oid is None)
-        self.assert_(obj._p_jar is None)
-        self.assertRaises(KeyError, self.datamgr.get, oid)
-        self.assertEquals(self.db._storage._stored, [oid])
-
-    def checkCommit(self):
-        obj = StubObject()
-        self.datamgr.add(obj)
-        oid = obj._p_oid
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        self.assert_(obj._p_oid is oid)
-        self.assert_(obj._p_jar is self.datamgr)
-
-        # This next assert_ is covered by an assert in tpc_finish.
-        ##self.assert_(not self.datamgr._added)
-
-        self.assertEquals(self.db._storage._stored, [oid])
-        self.assertEquals(self.db._storage._finished, [oid])
-
-    def checkModifyOnGetstate(self):
-        member = StubObject()
-        subobj = StubObject()
-        subobj.member = member
-        obj = ModifyOnGetStateObject(subobj)
-        self.datamgr.add(obj)
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.commit(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        storage = self.db._storage
-        self.assert_(obj._p_oid in storage._stored, "object was not stored")
-        self.assert_(subobj._p_oid in storage._stored,
-                "subobject was not stored")
-        self.assert_(member._p_oid in storage._stored, "member was not stored")
-        self.assert_(self.datamgr._added_during_commit is None)
-
-    def checkUnusedAddWorks(self):
-        # When an object is added, but not committed, it shouldn't be stored,
-        # but also it should be an error.
-        obj = StubObject()
-        self.datamgr.add(obj)
-        self.datamgr.tpc_begin(self.transaction)
-        self.datamgr.tpc_finish(self.transaction)
-        self.assert_(obj._p_oid not in self.datamgr._storage._stored)
-
-class UserMethodTests(unittest.TestCase):
-
-    # add isn't tested here, because there are a bunch of traditional
-    # unit tests for it.
-
-    # The version tests would require a storage that supports versions
-    # which is a bit more work.
-
-    def test_root(self):
-        r"""doctest of root() method
-
-        The root() method is simple, and the tests are pretty minimal.
-        Ensure that a new database has a root and that it is a
-        PersistentMapping.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> root = cn.root()
-        >>> type(root).__name__
-        'PersistentMapping'
-        >>> root._p_oid
-        '\x00\x00\x00\x00\x00\x00\x00\x00'
-        >>> root._p_jar is cn
-        True
-        >>> db.close()
-        """
-
-    def test_get(self):
-        r"""doctest of get() method
-
-        The get() method return the persistent object corresponding to
-        an oid.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> obj = cn.get(p64(0))
-        >>> obj._p_oid
-        '\x00\x00\x00\x00\x00\x00\x00\x00'
-
-        The object is a ghost.
-
-        >>> obj._p_state
-        -1
-
-        And multiple calls with the same oid, return the same object.
-
-        >>> obj2 = cn.get(p64(0))
-        >>> obj is obj2
-        True
-
-        If all references to the object are released, then a new
-        object will be returned. The cache doesn't keep unreferenced
-        ghosts alive.  (The next object returned my still have the
-        same id, because Python may re-use the same memory.)
-
-        >>> del obj, obj2
-        >>> cn._cache.get(p64(0), None)
-
-        If the object is unghosted, then it will stay in the cache
-        after the last reference is released.  (This is true only if
-        there is room in the cache and the object is recently used.)
-
-        >>> obj = cn.get(p64(0))
-        >>> obj._p_activate()
-        >>> y = id(obj)
-        >>> del obj
-        >>> obj = cn.get(p64(0))
-        >>> id(obj) == y
-        True
-        >>> obj._p_state
-        0
-
-        A request for an object that doesn't exist will raise a KeyError.
-
-        >>> cn.get(p64(1))
-        Traceback (most recent call last):
-          ...
-        KeyError: '\x00\x00\x00\x00\x00\x00\x00\x01'
-        """
-
-    def test_close(self):
-        r"""doctest of close() method
-
-        This is a minimal test, because most of the interesting
-        effects on closing a connection involve its interaction with the
-        database and the transaction.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-
-        It's safe to close a connection multiple times.
-        >>> cn.close()
-        >>> cn.close()
-        >>> cn.close()
-
-        It's not possible to load or store objects once the storage is closed.
-
-        >>> cn.get(p64(0))
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-        >>> p = Persistent()
-        >>> cn.add(p)
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-        """
-
-    def test_close_with_pending_changes(self):
-        r"""doctest to ensure close() w/ pending changes complains
-
-        >>> import transaction
-
-        Just opening and closing is fine.
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.close()
-
-        Opening, making a change, committing, and closing is fine.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 1
-        >>> transaction.commit()
-        >>> cn.close()
-
-        Opening, making a change, and aborting is fine.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 1
-        >>> transaction.abort()
-        >>> cn.close()
-
-        But trying to close with a change pending complains.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 10
-        >>> cn.close()
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: Cannot close a connection joined to a transaction
-
-        This leaves the connection as it was, so we can still commit
-        the change.
-        >>> transaction.commit()
-        >>> cn2 = db.open()
-        >>> cn2.root()['a']
-        10
-        >>> cn.close(); cn2.close()
-
-        Bug:  We weren't catching the case where the only changes pending
-        were in a subtransaction.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 100
-        >>> transaction.commit(True)
-        >>> cn.close()  # this was succeeding
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: Cannot close a connection with a pending subtransaction
-
-        Again this leaves the connection as it was.
-        >>> transaction.commit()
-        >>> cn2 = db.open()
-        >>> cn2.root()['a']
-        100
-        >>> cn.close(); cn2.close()
-
-        Make sure we can still close a connection after aborting a pending
-        subtransaction.
-        >>> cn = db.open()
-        >>> cn.root()['a'] = 1000
-        >>> transaction.commit(True)
-        >>> cn.root()['a']
-        1000
-        >>> transaction.abort()
-        >>> cn.root()['a']
-        100
-        >>> cn.close()
-
-        >>> db.close()
-        """
-
-    def test_onCloseCallbacks(self):
-        r"""doctest of onCloseCallback() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-
-        Every function registered is called, even if it raises an
-        exception.  They are only called once.
-
-        >>> L = []
-        >>> def f():
-        ...     L.append("f")
-        >>> def g():
-        ...     L.append("g")
-        ...     return 1 / 0
-        >>> cn.onCloseCallback(g)
-        >>> cn.onCloseCallback(f)
-        >>> cn.close()
-        >>> L
-        ['g', 'f']
-        >>> del L[:]
-        >>> cn.close()
-        >>> L
-        []
-
-        The implementation keeps a list of callbacks that is reset
-        to a class variable (which is bound to None) after the connection
-        is closed.
-
-        >>> cn._Connection__onCloseCallbacks
-        """
-
-    def test_db(self):
-        r"""doctest of db() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.db() is db
-        True
-        >>> cn.close()
-        >>> cn.db()
-        """
-
-    def test_isReadOnly(self):
-        r"""doctest of isReadOnly() method
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> cn.isReadOnly()
-        False
-        >>> cn.close()
-        >>> cn.isReadOnly()
-        Traceback (most recent call last):
-          ...
-        ConnectionStateError: The database connection is closed
-
-        An expedient way to create a read-only storage:
-
-        >>> db._storage._is_read_only = True
-        >>> cn = db.open()
-        >>> cn.isReadOnly()
-        True
-        """
-
-    def test_cache(self):
-        r"""doctest of cacheMinimize() and cacheFullSweep() methods.
-
-        These tests are fairly minimal, just verifying that the
-        methods can be called and have some effect.  We need other
-        tests that verify the cache works as intended.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> r = cn.root()
-        >>> cn.cacheMinimize()
-        >>> r._p_state
-        -1
-
-        The next couple of tests are involved because they have to
-        cater to backwards compatibility issues.  The cacheMinimize()
-        method used to take an argument, but now ignores it.
-        cacheFullSweep() used to do something different than
-        cacheMinimize(), but it doesn't anymore.  We want to verify
-        that these methods do something, but all cause deprecation
-        warnings.  To do that, we need a warnings hook.
-
-        >>> hook = WarningsHook()
-        >>> hook.install()
-
-        >>> r._p_activate()
-        >>> cn.cacheMinimize(12)
-        >>> r._p_state
-        -1
-        >>> len(hook.warnings)
-        1
-        >>> message, category, filename, lineno = hook.warnings[0]
-        >>> print message
-        This will be removed in ZODB 3.6:
-        cacheMinimize() dt= is ignored.
-        >>> category.__name__
-        'DeprecationWarning'
-        >>> hook.clear()
-
-        cacheFullSweep() is a doozy.  It generates two deprecation
-        warnings, one from the Connection and one from the
-        cPickleCache.  Maybe we should drop the cPickleCache warning,
-        but it's there for now.  When passed an argument, it acts like
-        cacheGC().  When it isn't passed an argument it acts like
-        cacheMinimize().
-
-        >>> r._p_activate()
-        >>> cn.cacheFullSweep(12)
-        >>> r._p_state
-        0
-        >>> len(hook.warnings)
-        2
-        >>> message, category, filename, lineno = hook.warnings[0]
-        >>> print message
-        This will be removed in ZODB 3.6:
-        cacheFullSweep is deprecated. Use cacheMinimize instead.
-        >>> category.__name__
-        'DeprecationWarning'
-        >>> message, category, filename, lineno = hook.warnings[1]
-        >>> message
-        'No argument expected'
-        >>> category.__name__
-        'DeprecationWarning'
-
-        We have to uninstall the hook so that other warnings don't get lost.
-
-        >>> hook.uninstall()
-
-        """
-
-class InvalidationTests(unittest.TestCase):
-
-    # It's harder to write serious tests, because some of the critical
-    # correctness issues relate to concurrency.  We'll have to depend
-    # on the various concurrent updates and NZODBThreads tests to
-    # handle these.
-
-    def test_invalidate(self):
-        r"""
-
-        This test initializes the database with several persistent
-        objects, then manually delivers invalidations and verifies that
-        they have the expected effect.
-
-        >>> db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        >>> cn = db.open()
-        >>> p1 = Persistent()
-        >>> p2 = Persistent()
-        >>> p3 = Persistent()
-        >>> r = cn.root()
-        >>> r.update(dict(p1=p1, p2=p2, p3=p3))
-        >>> transaction.commit()
-
-        Transaction ids are 8-byte strings, just like oids; p64() will
-        create one from an int.
-
-        >>> cn.invalidate(p64(1), {p1._p_oid: 1})
-        >>> cn._txn_time
-        '\x00\x00\x00\x00\x00\x00\x00\x01'
-        >>> p1._p_oid in cn._invalidated
-        True
-        >>> p2._p_oid in cn._invalidated
-        False
-
-        >>> cn.invalidate(p64(10), {p2._p_oid: 1, p64(76): 1})
-        >>> cn._txn_time
-        '\x00\x00\x00\x00\x00\x00\x00\x01'
-        >>> p1._p_oid in cn._invalidated
-        True
-        >>> p2._p_oid in cn._invalidated
-        True
-
-        Calling invalidate() doesn't affect the object state until
-        a transaction boundary.
-
-        >>> p1._p_state
-        0
-        >>> p2._p_state
-        0
-        >>> p3._p_state
-        0
-
-        The sync() method will abort the current transaction and
-        process any pending invalidations.
-
-        >>> cn.sync()
-        >>> p1._p_state
-        -1
-        >>> p2._p_state
-        -1
-        >>> p3._p_state
-        0
-        >>> cn._invalidated
-        {}
-
-        """
-
-# ---- stubs
-
-class StubObject(Persistent):
-    pass
-
-class StubTransaction:
-    pass
-
-class ErrorOnGetstateException(Exception):
-    pass
-
-class ErrorOnGetstateObject(Persistent):
-
-    def __getstate__(self):
-        raise ErrorOnGetstateException
-
-class ModifyOnGetStateObject(Persistent):
-
-    def __init__(self, p):
-        self._v_p = p
-
-    def __getstate__(self):
-        self._p_jar.add(self._v_p)
-        self.p = self._v_p
-        return Persistent.__getstate__(self)
-
-
-class StubStorage:
-    """Very simple in-memory storage that does *just* enough to support tests.
-
-    Only one concurrent transaction is supported.
-    Voting is not supported.
-    Versions are not supported.
-
-    Inspect self._stored and self._finished to see how the storage has been
-    used during a unit test. Whenever an object is stored in the store()
-    method, its oid is appended to self._stored. When a transaction is
-    finished, the oids that have been stored during the transaction are
-    appended to self._finished.
-    """
-
-    # internal
-    _oid = 1
-    _transaction = None
-
-    def __init__(self):
-        # internal
-        self._stored = []
-        self._finished = []
-        self._data = {}
-        self._transdata = {}
-        self._transstored = []
-
-    def new_oid(self):
-        oid = str(self._oid)
-        self._oid += 1
-        return oid
-
-    def sortKey(self):
-        return 'StubStorage sortKey'
-
-    def tpc_begin(self, transaction):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction is None:
-            self._transaction = transaction
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-
-    def tpc_abort(self, transaction):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        del self._transaction
-        self._transdata.clear()
-
-    def tpc_finish(self, transaction, callback):
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        self._finished.extend(self._transstored)
-        self._data.update(self._transdata)
-        callback(transaction)
-        del self._transaction
-        self._transdata.clear()
-        self._transstored = []
-
-    def load(self, oid, version):
-        if version != '':
-            raise TypeError('StubStorage does not support versions.')
-        return self._data[oid]
-
-    def store(self, oid, serial, p, version, transaction):
-        if version != '':
-            raise TypeError('StubStorage does not support versions.')
-        if transaction is None:
-            raise TypeError('transaction may not be None')
-        elif self._transaction != transaction:
-            raise RuntimeError(
-                'StubStorage uses only one transaction at a time')
-        self._stored.append(oid)
-        self._transstored.append(oid)
-        self._transdata[oid] = (p, serial)
-        # Explicitly returing None, as we're not pretending to be a ZEO
-        # storage
-        return None
-
-
-class TestConnectionInterface(unittest.TestCase):
-
-    def test_connection_interface(self):
-        from ZODB.interfaces import IConnection
-        db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
-        cn = db.open()
-        verifyObject(IConnection, cn)
-
-
-class StubDatabase:
-
-    def __init__(self):
-        self._storage = StubStorage()
-
-    classFactory = None
-    database_name = 'stubdatabase'
-    databases = {'stubdatabase': database_name}
-
-    def invalidate(self, transaction, dict_with_oid_keys, connection):
-        pass
-
-def test_suite():
-    s = unittest.makeSuite(ConnectionDotAdd, 'check')
-    s.addTest(doctest.DocTestSuite())
-    s.addTest(unittest.makeSuite(TestConnectionInterface))
-    return s
diff --git a/branches/bug1734/src/ZODB/tests/testDB.py b/branches/bug1734/src/ZODB/tests/testDB.py
deleted file mode 100644
index c0286012..00000000
--- a/branches/bug1734/src/ZODB/tests/testDB.py
+++ /dev/null
@@ -1,143 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import os
-import time
-import unittest
-import warnings
-
-import transaction
-
-import ZODB
-import ZODB.FileStorage
-
-from ZODB.tests.MinPO import MinPO
-
-# Return total number of connections across all pools in a db._pools.
-def nconn(pools):
-    return sum([len(pool.all) for pool in pools.values()])
-
-class DBTests(unittest.TestCase):
-
-    def setUp(self):
-        self.__path = os.path.abspath('test.fs')
-        store = ZODB.FileStorage.FileStorage(self.__path)
-        self.db = ZODB.DB(store)
-
-    def tearDown(self):
-        self.db.close()
-        for s in ('', '.index', '.lock', '.tmp'):
-            if os.path.exists(self.__path+s):
-                os.remove(self.__path+s)
-
-    def dowork(self, version=''):
-        c = self.db.open(version)
-        r = c.root()
-        o = r[time.time()] = MinPO(0)
-        transaction.commit()
-        for i in range(25):
-            o.value = MinPO(i)
-            transaction.commit()
-            o = o.value
-        c.close()
-
-    # make sure the basic methods are callable
-
-    def testSets(self):
-        # test set methods that have non-trivial implementations
-        warnings.filterwarnings("error", category=DeprecationWarning)
-        self.assertRaises(DeprecationWarning,
-                          self.db.setCacheDeactivateAfter, 12)
-        self.assertRaises(DeprecationWarning,
-                          self.db.setVersionCacheDeactivateAfter, 12)
-        # Obscure:  There is no API call for removing the warning we just
-        # added, but filters appears to be a public variable.
-        del warnings.filters[0]
-        self.db.setCacheSize(15)
-        self.db.setVersionCacheSize(15)
-
-    def test_removeVersionPool(self):
-        # Test that we can remove a version pool
-
-        # This is white box because we check some internal data structures
-
-        self.dowork()
-        self.dowork('v2')
-        c1 = self.db.open('v1')
-        c1.close() # return to pool
-        c12 = self.db.open('v1')
-        c12.close() # return to pool
-        self.assert_(c1 is c12) # should be same
-
-        pools = self.db._pools
-
-        self.assertEqual(len(pools), 3)
-        self.assertEqual(nconn(pools), 3)
-
-        self.db.removeVersionPool('v1')
-
-        self.assertEqual(len(pools), 2)
-        self.assertEqual(nconn(pools), 2)
-
-        c12 = self.db.open('v1')
-        c12.close() # return to pool
-        self.assert_(c1 is not c12) # should be different
-
-        self.assertEqual(len(pools), 3)
-        self.assertEqual(nconn(pools), 3)
-
-    def _test_for_leak(self):
-        self.dowork()
-        self.dowork('v2')
-        while 1:
-            c1 = self.db.open('v1')
-            self.db.removeVersionPool('v1')
-            c1.close() # return to pool
-
-    def test_removeVersionPool_while_connection_open(self):
-        # Test that we can remove a version pool
-
-        # This is white box because we check some internal data structures
-
-        self.dowork()
-        self.dowork('v2')
-        c1 = self.db.open('v1')
-        c1.close() # return to pool
-        c12 = self.db.open('v1')
-        self.assert_(c1 is c12) # should be same
-
-        pools = self.db._pools
-
-        self.assertEqual(len(pools), 3)
-        self.assertEqual(nconn(pools), 3)
-
-        self.db.removeVersionPool('v1')
-
-        self.assertEqual(len(pools), 2)
-        self.assertEqual(nconn(pools), 2)
-
-        c12.close() # should leave pools alone
-
-        self.assertEqual(len(pools), 2)
-        self.assertEqual(nconn(pools), 2)
-
-        c12 = self.db.open('v1')
-        c12.close() # return to pool
-        self.assert_(c1 is not c12) # should be different
-
-        self.assertEqual(len(pools), 3)
-        self.assertEqual(nconn(pools), 3)
-
-
-def test_suite():
-    return unittest.makeSuite(DBTests)
diff --git a/branches/bug1734/src/ZODB/tests/testDemoStorage.py b/branches/bug1734/src/ZODB/tests/testDemoStorage.py
deleted file mode 100644
index 06931589..00000000
--- a/branches/bug1734/src/ZODB/tests/testDemoStorage.py
+++ /dev/null
@@ -1,65 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import ZODB.DemoStorage
-import unittest
-
-from ZODB.tests import StorageTestBase, BasicStorage, \
-     VersionStorage, Synchronization
-
-class DemoStorageTests(StorageTestBase.StorageTestBase,
-                       BasicStorage.BasicStorage,
-                       VersionStorage.VersionStorage,
-                       Synchronization.SynchronizedStorage,
-                       ):
-
-    def setUp(self):
-        self._storage = ZODB.DemoStorage.DemoStorage()
-
-    def tearDown(self):
-        self._storage.close()
-
-    def checkOversizeNote(self):
-        # This base class test checks for the common case where a storage
-        # doesnt support huge transaction metadata. This storage doesnt
-        # have this limit, so we inhibit this test here.
-        pass
-
-    def checkAbortVersionNonCurrent(self):
-        # TODO:  Need to implement a real loadBefore for DemoStorage?
-        pass
-
-    def checkLoadBeforeVersion(self):
-        # TODO:  Need to implement a real loadBefore for DemoStorage?
-        pass
-
-    # the next three pack tests depend on undo
-
-    def checkPackVersionReachable(self):
-        pass
-
-    def checkPackVersions(self):
-        pass
-
-    def checkPackVersionsInPast(self):
-        pass
-
-
-def test_suite():
-    suite = unittest.makeSuite(DemoStorageTests, 'check')
-    return suite
-
-if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    loader.testMethodPrefix = "check"
-    unittest.main(testLoader=loader)
diff --git a/branches/bug1734/src/ZODB/tests/testFileStorage.py b/branches/bug1734/src/ZODB/tests/testFileStorage.py
deleted file mode 100644
index fcb03e9d..00000000
--- a/branches/bug1734/src/ZODB/tests/testFileStorage.py
+++ /dev/null
@@ -1,498 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import os, unittest
-import transaction
-import ZODB.FileStorage
-from ZODB import POSException
-
-from ZODB.tests import StorageTestBase, BasicStorage, \
-     TransactionalUndoStorage, VersionStorage, \
-     TransactionalUndoVersionStorage, PackableStorage, \
-     Synchronization, ConflictResolution, HistoryStorage, \
-     IteratorStorage, Corruption, RevisionStorage, PersistentStorage, \
-     MTStorage, ReadOnlyStorage, RecoveryStorage
-from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
-
-class BaseFileStorageTests(StorageTestBase.StorageTestBase):
-
-    def open(self, **kwargs):
-        self._storage = ZODB.FileStorage.FileStorage('FileStorageTests.fs',
-                                                     **kwargs)
-
-    def setUp(self):
-        self.open(create=1)
-
-    def tearDown(self):
-        self._storage.close()
-        self._storage.cleanup()
-
-class FileStorageTests(
-    BaseFileStorageTests,
-    BasicStorage.BasicStorage,
-    TransactionalUndoStorage.TransactionalUndoStorage,
-    RevisionStorage.RevisionStorage,
-    VersionStorage.VersionStorage,
-    TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
-    PackableStorage.PackableStorage,
-    PackableStorage.PackableUndoStorage,
-    Synchronization.SynchronizedStorage,
-    ConflictResolution.ConflictResolvingStorage,
-    ConflictResolution.ConflictResolvingTransUndoStorage,
-    HistoryStorage.HistoryStorage,
-    IteratorStorage.IteratorStorage,
-    IteratorStorage.ExtendedIteratorStorage,
-    PersistentStorage.PersistentStorage,
-    MTStorage.MTStorage,
-    ReadOnlyStorage.ReadOnlyStorage
-    ):
-
-    def checkLongMetadata(self):
-        s = "X" * 75000
-        try:
-            self._dostore(user=s)
-        except POSException.StorageError:
-            pass
-        else:
-            self.fail("expect long user field to raise error")
-        try:
-            self._dostore(description=s)
-        except POSException.StorageError:
-            pass
-        else:
-            self.fail("expect long user field to raise error")
-
-    def check_use_fsIndex(self):
-        from ZODB.fsIndex import fsIndex
-
-        self.assertEqual(self._storage._index.__class__, fsIndex)
-
-    # A helper for checking that when an .index contains a dict for the
-    # index, it's converted to an fsIndex when the file is opened.
-    def convert_index_to_dict(self):
-        # Convert the index in the current .index file to a Python dict.
-        # Return the index originally found.
-        import cPickle as pickle
-
-        f = open('FileStorageTests.fs.index', 'r+b')
-        p = pickle.Unpickler(f)
-        data = p.load()
-        index = data['index']
-
-        newindex = dict(index)
-        data['index'] = newindex
-
-        f.seek(0)
-        f.truncate()
-        p = pickle.Pickler(f, 1)
-        p.dump(data)
-        f.close()
-        return index
-
-    def check_conversion_to_fsIndex(self, read_only=False):
-        from ZODB.fsIndex import fsIndex
-
-        # Create some data, and remember the index.
-        for i in range(10):
-            self._dostore()
-        oldindex_as_dict = dict(self._storage._index)
-
-        # Save the index.
-        self._storage.close()
-
-        # Convert it to a dict.
-        old_index = self.convert_index_to_dict()
-        self.assert_(isinstance(old_index, fsIndex))
-        new_index = self.convert_index_to_dict()
-        self.assert_(isinstance(new_index, dict))
-
-        # Verify it's converted to fsIndex in memory upon open.
-        self.open(read_only=read_only)
-        self.assert_(isinstance(self._storage._index, fsIndex))
-
-        # Verify it has the right content.
-        newindex_as_dict = dict(self._storage._index)
-        self.assertEqual(oldindex_as_dict, newindex_as_dict)
-
-        # Check that the type on disk has changed iff read_only is False.
-        self._storage.close()
-        current_index = self.convert_index_to_dict()
-        if read_only:
-            self.assert_(isinstance(current_index, dict))
-        else:
-            self.assert_(isinstance(current_index, fsIndex))
-
-    def check_conversion_to_fsIndex_readonly(self):
-        # Same thing, but the disk .index should continue to hold a
-        # Python dict.
-        self.check_conversion_to_fsIndex(read_only=True)
-
-    def check_conversion_from_dict_to_btree_data_in_fsIndex(self):
-        # To support efficient range searches on its keys as part of
-        # implementing a record iteration protocol in FileStorage, we
-        # converted the fsIndex class from using a dictionary as its
-        # self._data attribute to using an OOBTree in its stead.
-
-        from ZODB.fsIndex import fsIndex
-        from BTrees.OOBTree import OOBTree
-
-        # Create some data, and remember the index.
-        for i in range(10):
-            self._dostore()
-        data_dict = dict(self._storage._index._data)
-
-        # Replace the OOBTree with a dictionary and commit it.
-        self._storage._index._data = data_dict
-        transaction.commit()
-
-        # Save the index.
-        self._storage.close()
-
-        # Verify it's converted to fsIndex in memory upon open.
-        self.open()
-        self.assert_(isinstance(self._storage._index, fsIndex))
-        self.assert_(isinstance(self._storage._index._data, OOBTree))
-
-        # Verify it has the right content.
-        new_data_dict = dict(self._storage._index._data)
-        self.assertEqual(len(data_dict), len(new_data_dict))
-
-        for k in data_dict:
-            old_tree = data_dict[k]
-            new_tree = new_data_dict[k]
-            self.assertEqual(list(old_tree.items()), list(new_tree.items()))
-
-    def check_save_after_load_with_no_index(self):
-        for i in range(10):
-            self._dostore()
-        self._storage.close()
-        os.remove('FileStorageTests.fs.index')
-        self.open()
-        self.assertEqual(self._storage._saved, 1)
-
-    def check_index_oid_ignored(self):
-        # Prior to ZODB 3.2.6, the 'oid' value stored in the .index file
-        # was believed.  But there were cases where adding larger oids
-        # didn't update the FileStorage ._oid attribute -- the restore()
-        # method in particular didn't update it, and that's about the only
-        # method copyTransactionsFrom() uses.  A database copy created that
-        # way then stored an 'oid' of z64 in the .index file.  This created
-        # torturous problems, as when that file was opened, "new" oids got
-        # generated starting over from 0 again.
-        # Now the cached 'oid' value is ignored:  verify that this is so.
-        import cPickle as pickle
-        from ZODB.utils import z64
-        from ZODB.DB import DB
-
-        # Create some data.
-        db = DB(self._storage)
-        conn = db.open()
-        conn.root()['xyz'] = 1
-        transaction.commit()
-        true_max_oid = self._storage._oid
-
-        # Save away the index, and poke in a bad 'oid' value by hand.
-        db.close()
-        f = open('FileStorageTests.fs.index', 'r+b')
-        p = pickle.Unpickler(f)
-        data = p.load()
-        saved_oid = data['oid']
-        self.assertEqual(true_max_oid, saved_oid)
-        data['oid'] = z64
-        f.seek(0)
-        f.truncate()
-        p = pickle.Pickler(f, 1)
-        p.dump(data)
-        f.close()
-
-        # Verify that we get the correct oid again when we reopen, despite
-        # that we stored nonsense in the .index file's 'oid'.
-        self.open()
-        self.assertEqual(self._storage._oid, true_max_oid)
-
-    # This would make the unit tests too slow
-    # check_save_after_load_that_worked_hard(self)
-
-    def check_periodic_save_index(self):
-
-        # Check the basic algorithm
-        oldsaved = self._storage._saved
-        self._storage._records_before_save = 10
-        for i in range(4):
-            self._dostore()
-        self.assertEqual(self._storage._saved, oldsaved)
-        self._dostore()
-        self.assertEqual(self._storage._saved, oldsaved+1)
-
-        # Now make sure the parameter changes as we get bigger
-        for i in range(20):
-            self._dostore()
-
-        self.failUnless(self._storage._records_before_save > 20)
-
-    def checkStoreBumpsOid(self):
-        # If .store() is handed an oid bigger than the storage knows
-        # about already, it's crucial that the storage bump its notion
-        # of the largest oid in use.
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        giant_oid = '\xee' * 8
-        # Store an object.
-        # oid, serial, data, version, transaction
-        r1 = self._storage.store(giant_oid, '\0'*8, 'data', '', t)
-        # Finish the transaction.
-        r2 = self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # Before ZODB 3.2.6, this failed, with ._oid == z64.
-        self.assertEqual(self._storage._oid, giant_oid)
-
-    def checkRestoreBumpsOid(self):
-        # As above, if .restore() is handed an oid bigger than the storage
-        # knows about already, it's crucial that the storage bump its notion
-        # of the largest oid in use.  Because copyTransactionsFrom(), and
-        # ZRS recovery, use the .restore() method, this is plain critical.
-        t = transaction.Transaction()
-        self._storage.tpc_begin(t)
-        giant_oid = '\xee' * 8
-        # Store an object.
-        # oid, serial, data, version, prev_txn, transaction
-        r1 = self._storage.restore(giant_oid, '\0'*8, 'data', '', None, t)
-        # Finish the transaction.
-        r2 = self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        # Before ZODB 3.2.6, this failed, with ._oid == z64.
-        self.assertEqual(self._storage._oid, giant_oid)
-
-    def checkCorruptionInPack(self):
-        # This sets up a corrupt .fs file, with a redundant transaction
-        # length mismatch.  The implementation of pack in many releases of
-        # ZODB blew up if the .fs file had such damage:  it detected the
-        # damage, but the code to raise CorruptedError referenced an undefined
-        # global.
-        import time
-
-        from ZODB.DB import DB
-        from ZODB.utils import U64, p64
-        from ZODB.FileStorage.format import CorruptedError
-
-        db = DB(self._storage)
-        conn = db.open()
-        conn.root()['xyz'] = 1
-        transaction.commit()
-
-        # Ensure it's all on disk.
-        db.close()
-        self._storage.close()
-
-        # Reopen before damaging.
-        self.open()
-
-        # Open .fs directly, and damage content.
-        f = open('FileStorageTests.fs', 'r+b')
-        f.seek(0, 2)
-        pos2 = f.tell() - 8
-        f.seek(pos2)
-        tlen2 = U64(f.read(8))  # length-8 of the last transaction
-        pos1 = pos2 - tlen2 + 8 # skip over the tid at the start
-        f.seek(pos1)
-        tlen1 = U64(f.read(8))  # should be redundant length-8
-        self.assertEqual(tlen1, tlen2)  # verify that it is redundant
-
-        # Now damage the second copy.
-        f.seek(pos2)
-        f.write(p64(tlen2 - 1))
-        f.close()
-
-        # Try to pack.  This used to yield
-        #     NameError: global name 's' is not defined
-        try:
-            self._storage.pack(time.time(), None)
-        except CorruptedError, detail:
-            self.assert_("redundant transaction length does not match "
-                         "initial transaction length" in str(detail))
-        else:
-            self.fail("expected CorruptedError")
-
-    def check_record_iternext(self):
-        from ZODB.DB import DB
-
-        db = DB(self._storage)
-        conn = db.open()
-        conn.root()['abc'] = MinPO('abc')
-        conn.root()['xyz'] = MinPO('xyz')
-        transaction.commit()
-
-        # Ensure it's all on disk.
-        db.close()
-        self._storage.close()
-
-        self.open()
-
-        key = None
-        for x in ('\000', '\001', '\002'):
-            oid, tid, data, next_oid = self._storage.record_iternext(key)
-            self.assertEqual(oid, ('\000' * 7) + x)
-            key = next_oid
-            expected_data, expected_tid = self._storage.load(oid, '')
-            self.assertEqual(expected_data, data)
-            self.assertEqual(expected_tid, tid)
-            if x == '\002':
-                self.assertEqual(next_oid, None)
-            else:
-                self.assertNotEqual(next_oid, None)
-
-
-class FileStorageRecoveryTest(
-    StorageTestBase.StorageTestBase,
-    RecoveryStorage.RecoveryStorage,
-    ):
-
-    def setUp(self):
-        self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
-        self._dst = ZODB.FileStorage.FileStorage("Dest.fs", create=True)
-
-    def tearDown(self):
-        self._storage.close()
-        self._dst.close()
-        self._storage.cleanup()
-        self._dst.cleanup()
-
-    def new_dest(self):
-        return ZODB.FileStorage.FileStorage('Dest.fs')
-
-class SlowFileStorageTest(BaseFileStorageTests):
-
-    level = 2
-
-    def check10Kstores(self):
-        # The _get_cached_serial() method has a special case
-        # every 8000 calls.  Make sure it gets minimal coverage.
-        oids = [[self._storage.new_oid(), None] for i in range(100)]
-        for i in range(100):
-            t = transaction.Transaction()
-            self._storage.tpc_begin(t)
-            for j in range(100):
-                o = MinPO(j)
-                oid, revid = oids[j]
-                serial = self._storage.store(oid, revid, zodb_pickle(o), "", t)
-                oids[j][1] = serial
-            self._storage.tpc_vote(t)
-            self._storage.tpc_finish(t)
-
-# Raise an exception if the tids in FileStorage fs aren't
-# strictly increasing.
-def checkIncreasingTids(fs):
-    lasttid = '\0' * 8
-    for txn in fs.iterator():
-        if lasttid >= txn.tid:
-            raise ValueError("tids out of order %r >= %r" % (lasttid, tid))
-        lasttid = txn.tid
-
-# Return a TimeStamp object 'minutes' minutes in the future.
-def timestamp(minutes):
-    import time
-    from persistent.TimeStamp import TimeStamp
-
-    t = time.time() + 60 * minutes
-    return TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
-
-def testTimeTravelOnOpen():
-    """
-    >>> from ZODB.FileStorage import FileStorage
-    >>> from ZODB.DB import DB
-    >>> import transaction
-    >>> from zope.testing.loggingsupport import InstalledHandler
-
-    Arrange to capture log messages -- they're an important part of
-    this test!
-
-    >>> handler = InstalledHandler('ZODB.FileStorage')
-
-    Create a new file storage.
-
-    >>> st = FileStorage('temp.fs', create=True)
-    >>> db = DB(st)
-    >>> db.close()
-
-    First check the normal case:  transactions are recorded with
-    increasing tids, and time doesn't run backwards.
-
-    >>> st = FileStorage('temp.fs')
-    >>> db = DB(st)
-    >>> conn = db.open()
-    >>> conn.root()['xyz'] = 1
-    >>> transaction.get().commit()
-    >>> checkIncreasingTids(st)
-    >>> db.close()
-    >>> st.cleanup() # remove .fs, .index, etc files
-    >>> handler.records   # i.e., no log messages
-    []
-
-    Now force the database to have transaction records with tids from
-    the future.
-
-    >>> st = FileStorage('temp.fs', create=True)
-    >>> st._ts = timestamp(15)  # 15 minutes in the future
-    >>> db = DB(st)
-    >>> db.close()
-
-    >>> st = FileStorage('temp.fs') # this should log a warning
-    >>> db = DB(st)
-    >>> conn = db.open()
-    >>> conn.root()['xyz'] = 1
-    >>> transaction.get().commit()
-    >>> checkIncreasingTids(st)
-    >>> db.close()
-    >>> st.cleanup()
-
-    >>> [record.levelname for record in handler.records]
-    ['WARNING']
-    >>> handler.clear()
-
-    And one more time, with transaction records far in the future.
-    We expect to log a critical error then, as a time so far in the
-    future probably indicates a real problem with the system.  Shorter
-    spans may be due to clock drift.
-
-    >>> st = FileStorage('temp.fs', create=True)
-    >>> st._ts = timestamp(60)  # an hour in the future
-    >>> db = DB(st)
-    >>> db.close()
-
-    >>> st = FileStorage('temp.fs') # this should log a critical error
-    >>> db = DB(st)
-    >>> conn = db.open()
-    >>> conn.root()['xyz'] = 1
-    >>> transaction.get().commit()
-    >>> checkIncreasingTids(st)
-    >>> db.close()
-    >>> st.cleanup()
-
-    >>> [record.levelname for record in handler.records]
-    ['CRITICAL']
-    >>> handler.clear()
-    >>> handler.uninstall()
-    """
-
-def test_suite():
-    import doctest
-
-    suite = unittest.TestSuite()
-    for klass in [FileStorageTests, Corruption.FileStorageCorruptTests,
-                  FileStorageRecoveryTest, SlowFileStorageTest]:
-        suite.addTest(unittest.makeSuite(klass, "check"))
-    suite.addTest(doctest.DocTestSuite())
-    return suite
-
-if __name__=='__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/ZODB/tests/testMappingStorage.py b/branches/bug1734/src/ZODB/tests/testMappingStorage.py
deleted file mode 100644
index e21cc09c..00000000
--- a/branches/bug1734/src/ZODB/tests/testMappingStorage.py
+++ /dev/null
@@ -1,47 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import ZODB.MappingStorage
-import unittest
-
-from ZODB.tests import StorageTestBase
-from ZODB.tests \
-     import BasicStorage, MTStorage, Synchronization, PackableStorage
-
-class MappingStorageTests(StorageTestBase.StorageTestBase,
-                          BasicStorage.BasicStorage,
-                          MTStorage.MTStorage,
-                          PackableStorage.PackableStorage,
-                          Synchronization.SynchronizedStorage,
-                          ):
-
-    def setUp(self):
-        self._storage = ZODB.MappingStorage.MappingStorage()
-
-    def tearDown(self):
-        self._storage.close()
-
-    def checkOversizeNote(self):
-        # This base class test checks for the common case where a storage
-        # doesnt support huge transaction metadata. This storage doesnt
-        # have this limit, so we inhibit this test here.
-        pass
-
-def test_suite():
-    suite = unittest.makeSuite(MappingStorageTests, 'check')
-    return suite
-
-if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    loader.testMethodPrefix = "check"
-    unittest.main(testLoader=loader)
diff --git a/branches/bug1734/src/ZODB/tests/testPersistentList.py b/branches/bug1734/src/ZODB/tests/testPersistentList.py
deleted file mode 100644
index 233a0070..00000000
--- a/branches/bug1734/src/ZODB/tests/testPersistentList.py
+++ /dev/null
@@ -1,223 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the list interface to PersistentList
-"""
-
-import unittest
-from persistent.list import PersistentList
-
-l0 = []
-l1 = [0]
-l2 = [0, 1]
-
-class TestPList(unittest.TestCase):
-    def checkTheWorld(self):
-        # Test constructors
-        u = PersistentList()
-        u0 = PersistentList(l0)
-        u1 = PersistentList(l1)
-        u2 = PersistentList(l2)
-
-        uu = PersistentList(u)
-        uu0 = PersistentList(u0)
-        uu1 = PersistentList(u1)
-        uu2 = PersistentList(u2)
-
-        v = PersistentList(tuple(u))
-        class OtherList:
-            def __init__(self, initlist):
-                self.__data = initlist
-            def __len__(self):
-                return len(self.__data)
-            def __getitem__(self, i):
-                return self.__data[i]
-        v0 = PersistentList(OtherList(u0))
-        vv = PersistentList("this is also a sequence")
-
-        # Test __repr__
-        eq = self.assertEqual
-
-        eq(str(u0), str(l0), "str(u0) == str(l0)")
-        eq(repr(u1), repr(l1), "repr(u1) == repr(l1)")
-        eq(`u2`, `l2`, "`u2` == `l2`")
-
-        # Test __cmp__ and __len__
-
-        def mycmp(a, b):
-            r = cmp(a, b)
-            if r < 0: return -1
-            if r > 0: return 1
-            return r
-
-        all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
-        for a in all:
-            for b in all:
-                eq(mycmp(a, b), mycmp(len(a), len(b)),
-                      "mycmp(a, b) == mycmp(len(a), len(b))")
-
-        # Test __getitem__
-
-        for i in range(len(u2)):
-            eq(u2[i], i, "u2[i] == i")
-
-        # Test __setitem__
-
-        uu2[0] = 0
-        uu2[1] = 100
-        try:
-            uu2[2] = 200
-        except IndexError:
-            pass
-        else:
-            raise TestFailed("uu2[2] shouldn't be assignable")
-
-        # Test __delitem__
-
-        del uu2[1]
-        del uu2[0]
-        try:
-            del uu2[0]
-        except IndexError:
-            pass
-        else:
-            raise TestFailed("uu2[0] shouldn't be deletable")
-
-        # Test __getslice__
-
-        for i in range(-3, 4):
-            eq(u2[:i], l2[:i], "u2[:i] == l2[:i]")
-            eq(u2[i:], l2[i:], "u2[i:] == l2[i:]")
-            for j in range(-3, 4):
-                eq(u2[i:j], l2[i:j], "u2[i:j] == l2[i:j]")
-
-        # Test __setslice__
-
-        for i in range(-3, 4):
-            u2[:i] = l2[:i]
-            eq(u2, l2, "u2 == l2")
-            u2[i:] = l2[i:]
-            eq(u2, l2, "u2 == l2")
-            for j in range(-3, 4):
-                u2[i:j] = l2[i:j]
-                eq(u2, l2, "u2 == l2")
-
-        uu2 = u2[:]
-        uu2[:0] = [-2, -1]
-        eq(uu2, [-2, -1, 0, 1], "uu2 == [-2, -1, 0, 1]")
-        uu2[0:] = []
-        eq(uu2, [], "uu2 == []")
-
-        # Test __contains__
-        for i in u2:
-            self.failUnless(i in u2, "i in u2")
-        for i in min(u2)-1, max(u2)+1:
-            self.failUnless(i not in u2, "i not in u2")
-
-        # Test __delslice__
-
-        uu2 = u2[:]
-        del uu2[1:2]
-        del uu2[0:1]
-        eq(uu2, [], "uu2 == []")
-
-        uu2 = u2[:]
-        del uu2[1:]
-        del uu2[:1]
-        eq(uu2, [], "uu2 == []")
-
-        # Test __add__, __radd__, __mul__ and __rmul__
-
-        #self.failUnless(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
-        self.failUnless(u1 + [1] == u2, "u1 + [1] == u2")
-        #self.failUnless([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
-        self.failUnless(u2 == u2*1 == 1*u2, "u2 == u2*1 == 1*u2")
-        self.failUnless(u2+u2 == u2*2 == 2*u2, "u2+u2 == u2*2 == 2*u2")
-        self.failUnless(u2+u2+u2 == u2*3 == 3*u2, "u2+u2+u2 == u2*3 == 3*u2")
-
-        # Test append
-
-        u = u1[:]
-        u.append(1)
-        eq(u, u2, "u == u2")
-
-        # Test insert
-
-        u = u2[:]
-        u.insert(0, -1)
-        eq(u, [-1, 0, 1], "u == [-1, 0, 1]")
-
-        # Test pop
-
-        u = PersistentList([0, -1, 1])
-        u.pop()
-        eq(u, [0, -1], "u == [0, -1]")
-        u.pop(0)
-        eq(u, [-1], "u == [-1]")
-
-        # Test remove
-
-        u = u2[:]
-        u.remove(1)
-        eq(u, u1, "u == u1")
-
-        # Test count
-        u = u2*3
-        eq(u.count(0), 3, "u.count(0) == 3")
-        eq(u.count(1), 3, "u.count(1) == 3")
-        eq(u.count(2), 0, "u.count(2) == 0")
-
-
-        # Test index
-
-        eq(u2.index(0), 0, "u2.index(0) == 0")
-        eq(u2.index(1), 1, "u2.index(1) == 1")
-        try:
-            u2.index(2)
-        except ValueError:
-            pass
-        else:
-            raise TestFailed("expected ValueError")
-
-        # Test reverse
-
-        u = u2[:]
-        u.reverse()
-        eq(u, [1, 0], "u == [1, 0]")
-        u.reverse()
-        eq(u, u2, "u == u2")
-
-        # Test sort
-
-        u = PersistentList([1, 0])
-        u.sort()
-        eq(u, u2, "u == u2")
-
-        # Test extend
-
-        u = u1[:]
-        u.extend(u2)
-        eq(u, u1 + u2, "u == u1 + u2")
-
-    def checkBackwardCompat(self):
-        # Verify that the sanest of the ZODB 3.2 dotted paths still works.
-        from ZODB.PersistentList import PersistentList as oldPath
-        self.assert_(oldPath is PersistentList)
-
-def test_suite():
-    return unittest.makeSuite(TestPList, 'check')
-
-if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    loader.testMethodPrefix = "check"
-    unittest.main(testLoader=loader)
diff --git a/branches/bug1734/src/ZODB/tests/testPersistentMapping.py b/branches/bug1734/src/ZODB/tests/testPersistentMapping.py
deleted file mode 100644
index adddb4db..00000000
--- a/branches/bug1734/src/ZODB/tests/testPersistentMapping.py
+++ /dev/null
@@ -1,108 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Verify that PersistentMapping works with old versions of Zope.
-
-The comments in PersistentMapping.py address the issue in some detail.
-The pickled form of a PersistentMapping must use _container to store
-the actual mapping, because old versions of Zope used this attribute.
-If the new code doesn't generate pickles that are consistent with the
-old code, developers will have a hard time testing the new code.
-"""
-
-import unittest
-
-import transaction
-from transaction import Transaction
-import ZODB
-from ZODB.MappingStorage import MappingStorage
-import cPickle
-import cStringIO
-import sys
-
-# This pickle contains a persistent mapping pickle created from the
-# old code.
-pickle = ('((U\x0bPersistenceq\x01U\x11PersistentMappingtq\x02Nt.}q\x03U\n'
-          '_containerq\x04}q\x05U\x07versionq\x06U\x03oldq\x07ss.\n')
-
-class PMTests(unittest.TestCase):
-
-    def checkOldStyleRoot(self):
-        # insert the pickle in place of the root
-        s = MappingStorage()
-        t = Transaction()
-        s.tpc_begin(t)
-        s.store('\000' * 8, None, pickle, '', t)
-        s.tpc_vote(t)
-        s.tpc_finish(t)
-
-        db = ZODB.DB(s)
-        # If the root can be loaded successfully, we should be okay.
-        r = db.open().root()
-        # But make sure it looks like a new mapping
-        self.assert_(hasattr(r, 'data'))
-        self.assert_(not hasattr(r, '_container'))
-
-    # TODO:  This test fails in ZODB 3.3a1.  It's making some assumption(s)
-    # about pickles that aren't true.  Hard to say when it stopped working,
-    # because this entire test suite hasn't been run for a long time, due to
-    # a mysterious "return None" at the start of the test_suite() function
-    # below.  I noticed that when the new checkBackwardCompat() test wasn't
-    # getting run.
-    def TODO_checkNewPicklesAreSafe(self):
-        s = MappingStorage()
-        db = ZODB.DB(s)
-        r = db.open().root()
-        r[1] = 1
-        r[2] = 2
-        r[3] = r
-        transaction.commit()
-        # MappingStorage stores serialno + pickle in its _index.
-        root_pickle = s._index['\000' * 8][8:]
-
-        f = cStringIO.StringIO(root_pickle)
-        u = cPickle.Unpickler(f)
-        klass_info = u.load()
-        klass = find_global(*klass_info[0])
-        inst = klass.__new__(klass)
-        state = u.load()
-        inst.__setstate__(state)
-
-        self.assert_(hasattr(inst, '_container'))
-        self.assert_(not hasattr(inst, 'data'))
-
-    def checkBackwardCompat(self):
-        # Verify that the sanest of the ZODB 3.2 dotted paths still works.
-        from persistent.mapping import PersistentMapping as newPath
-        from ZODB.PersistentMapping import PersistentMapping as oldPath
-
-        self.assert_(oldPath is newPath)
-
-def find_global(modulename, classname):
-    """Helper for this test suite to get special PersistentMapping"""
-
-    if classname == "PersistentMapping":
-        class PersistentMapping(object):
-            def __setstate__(self, state):
-                self.__dict__.update(state)
-        return PersistentMapping
-    else:
-        __import__(modulename)
-        mod = sys.modules[modulename]
-        return getattr(mod, classname)
-
-def test_suite():
-    return unittest.makeSuite(PMTests, 'check')
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/branches/bug1734/src/ZODB/tests/testRecover.py b/branches/bug1734/src/ZODB/tests/testRecover.py
deleted file mode 100644
index 14846a11..00000000
--- a/branches/bug1734/src/ZODB/tests/testRecover.py
+++ /dev/null
@@ -1,185 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the file storage recovery script."""
-
-import base64
-import os
-import random
-import sys
-import tempfile
-import unittest
-import StringIO
-
-import ZODB
-from ZODB.FileStorage import FileStorage
-from ZODB.fsrecover import recover
-
-from persistent.mapping import PersistentMapping
-import transaction
-
-class RecoverTest(unittest.TestCase):
-
-    level = 2
-
-    path = None
-
-    def setUp(self):
-        self.path = tempfile.mktemp(suffix=".fs")
-        self.storage = FileStorage(self.path)
-        self.populate()
-        self.dest = tempfile.mktemp(suffix=".fs")
-        self.recovered = None
-
-    def tearDown(self):
-        self.storage.close()
-        if self.recovered is not None:
-            self.recovered.close()
-        self.storage.cleanup()
-        temp = FileStorage(self.dest)
-        temp.close()
-        temp.cleanup()
-
-    def populate(self):
-        db = ZODB.DB(self.storage)
-        cn = db.open()
-        rt = cn.root()
-
-        # Create a bunch of objects; the Data.fs is about 100KB.
-        for i in range(50):
-            d = rt[i] = PersistentMapping()
-            transaction.commit()
-            for j in range(50):
-                d[j] = "a" * j
-            transaction.commit()
-
-    def damage(self, num, size):
-        self.storage.close()
-        # Drop size null bytes into num random spots.
-        for i in range(num):
-            offset = random.randint(0, self.storage._pos - size)
-            f = open(self.path, "a+b")
-            f.seek(offset)
-            f.write("\0" * size)
-            f.close()
-
-    ITERATIONS = 5
-
-    # Run recovery, from self.path to self.dest.  Return whatever
-    # recovery printed to stdout, as a string.
-    def recover(self):
-        orig_stdout = sys.stdout
-        faux_stdout = StringIO.StringIO()
-        try:
-            sys.stdout = faux_stdout
-            try:
-                recover(self.path, self.dest,
-                        verbose=0, partial=True, force=False, pack=1)
-            except SystemExit:
-                raise RuntimeError, "recover tried to exit"
-        finally:
-            sys.stdout = orig_stdout
-        return faux_stdout.getvalue()
-
-    # Caution:  because recovery is robust against many kinds of damage,
-    # it's almost impossible for a call to self.recover() to raise an
-    # exception.  As a result, these tests may pass even if fsrecover.py
-    # is broken badly.  testNoDamage() tries to ensure that at least
-    # recovery doesn't produce any error msgs if the input .fs is in
-    # fact not damaged.
-    def testNoDamage(self):
-        output = self.recover()
-        self.assert_('error' not in output, output)
-        self.assert_('\n0 bytes removed during recovery' in output, output)
-
-        # Verify that the recovered database is identical to the original.
-        before = file(self.path, 'rb')
-        before_guts = before.read()
-        before.close()
-
-        after = file(self.dest, 'rb')
-        after_guts = after.read()
-        after.close()
-
-        self.assertEqual(before_guts, after_guts,
-                         "recovery changed a non-damaged .fs file")
-
-    def testOneBlock(self):
-        for i in range(self.ITERATIONS):
-            self.damage(1, 1024)
-            output = self.recover()
-            self.assert_('error' in output, output)
-            self.recovered = FileStorage(self.dest)
-            self.recovered.close()
-            os.remove(self.path)
-            os.rename(self.dest, self.path)
-
-    def testFourBlocks(self):
-        for i in range(self.ITERATIONS):
-            self.damage(4, 512)
-            output = self.recover()
-            self.assert_('error' in output, output)
-            self.recovered = FileStorage(self.dest)
-            self.recovered.close()
-            os.remove(self.path)
-            os.rename(self.dest, self.path)
-
-    def testBigBlock(self):
-        for i in range(self.ITERATIONS):
-            self.damage(1, 32 * 1024)
-            output = self.recover()
-            self.assert_('error' in output, output)
-            self.recovered = FileStorage(self.dest)
-            self.recovered.close()
-            os.remove(self.path)
-            os.rename(self.dest, self.path)
-
-    def testBadTransaction(self):
-        # Find transaction headers and blast them.
-
-        L = self.storage.undoLog()
-        r = L[3]
-        tid = base64.decodestring(r["id"] + "\n")
-        pos1 = self.storage._txn_find(tid, 0)
-
-        r = L[8]
-        tid = base64.decodestring(r["id"] + "\n")
-        pos2 = self.storage._txn_find(tid, 0)
-
-        self.storage.close()
-
-        # Overwrite the entire header.
-        f = open(self.path, "a+b")
-        f.seek(pos1 - 50)
-        f.write("\0" * 100)
-        f.close()
-        output = self.recover()
-        self.assert_('error' in output, output)
-        self.recovered = FileStorage(self.dest)
-        self.recovered.close()
-        os.remove(self.path)
-        os.rename(self.dest, self.path)
-
-        # Overwrite part of the header.
-        f = open(self.path, "a+b")
-        f.seek(pos2 + 10)
-        f.write("\0" * 100)
-        f.close()
-        output = self.recover()
-        self.assert_('error' in output, output)
-        self.recovered = FileStorage(self.dest)
-        self.recovered.close()
-
-
-def test_suite():
-    return unittest.makeSuite(RecoverTest)
diff --git a/branches/bug1734/src/ZODB/tests/testSerialize.py b/branches/bug1734/src/ZODB/tests/testSerialize.py
deleted file mode 100644
index 44252088..00000000
--- a/branches/bug1734/src/ZODB/tests/testSerialize.py
+++ /dev/null
@@ -1,124 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests of the serializer."""
-
-import cPickle
-import cStringIO as StringIO
-import sys
-import unittest
-
-from ZODB import serialize
-
-
-class ClassWithNewargs(int):
-    def __new__(cls, value):
-        return int.__new__(cls, value)
-
-    def __getnewargs__(self):
-        return int(self),
-
-class ClassWithoutNewargs(object):
-    def __init__(self, value):
-        self.value = value
-
-def make_pickle(ob):
-    sio = StringIO.StringIO()
-    p = cPickle.Pickler(sio, 1)
-    p.dump(ob)
-    return sio.getvalue()
-
-
-class SerializerTestCase(unittest.TestCase):
-
-    # old format:  (module, name), None
-    old_style_without_newargs = make_pickle(
-        ((__name__, "ClassWithoutNewargs"), None))
-
-    # old format:  (module, name), argtuple
-    old_style_with_newargs = make_pickle(
-        ((__name__, "ClassWithNewargs"), (1,)))
-
-    # new format:  klass
-    new_style_without_newargs = make_pickle(
-        ClassWithoutNewargs)
-
-    # new format:  klass, argtuple
-    new_style_with_newargs = make_pickle(
-        (ClassWithNewargs, (1,)))
-
-    def test_getClassName(self):
-        r = serialize.BaseObjectReader()
-        eq = self.assertEqual
-        eq(r.getClassName(self.old_style_with_newargs),
-           __name__ + ".ClassWithNewargs")
-        eq(r.getClassName(self.new_style_with_newargs),
-           __name__ + ".ClassWithNewargs")
-        eq(r.getClassName(self.old_style_without_newargs),
-           __name__ + ".ClassWithoutNewargs")
-        eq(r.getClassName(self.new_style_without_newargs),
-           __name__ + ".ClassWithoutNewargs")
-
-    def test_getGhost(self):
-        # Use a TestObjectReader since we need _get_class() to be
-        # implemented; otherwise this is just a BaseObjectReader.
-
-        class TestObjectReader(serialize.BaseObjectReader):
-            # A production object reader would optimize this, but we
-            # don't need to in a test
-            def _get_class(self, module, name):
-                __import__(module)
-                return getattr(sys.modules[module], name)
-
-        r = TestObjectReader()
-        g = r.getGhost(self.old_style_with_newargs)
-        self.assert_(isinstance(g, ClassWithNewargs))
-        self.assertEqual(g, 1)
-        g = r.getGhost(self.old_style_without_newargs)
-        self.assert_(isinstance(g, ClassWithoutNewargs))
-        g = r.getGhost(self.new_style_with_newargs)
-        self.assert_(isinstance(g, ClassWithNewargs))
-        g = r.getGhost(self.new_style_without_newargs)
-        self.assert_(isinstance(g, ClassWithoutNewargs))
-
-    def test_myhasattr(self):
-
-        class OldStyle:
-            bar = "bar"
-            def __getattr__(self, name):
-                if name == "error":
-                    raise ValueError("whee!")
-                else:
-                    raise AttributeError, name
-
-        class NewStyle(object):
-            bar = "bar"
-            def _raise(self):
-                raise ValueError("whee!")
-            error = property(_raise)
-
-        self.assertRaises(ValueError,
-                          serialize.myhasattr, OldStyle(), "error")
-        self.assertRaises(ValueError,
-                          serialize.myhasattr, NewStyle(), "error")
-        self.assert_(serialize.myhasattr(OldStyle(), "bar"))
-        self.assert_(serialize.myhasattr(NewStyle(), "bar"))
-        self.assert_(not serialize.myhasattr(OldStyle(), "rat"))
-        self.assert_(not serialize.myhasattr(NewStyle(), "rat"))
-
-
-def test_suite():
-    import doctest
-    suite = unittest.makeSuite(SerializerTestCase)
-    suite.addTest(doctest.DocTestSuite("ZODB.serialize"))
-    return suite
diff --git a/branches/bug1734/src/ZODB/tests/testSubTransaction.py b/branches/bug1734/src/ZODB/tests/testSubTransaction.py
deleted file mode 100644
index 15c288c3..00000000
--- a/branches/bug1734/src/ZODB/tests/testSubTransaction.py
+++ /dev/null
@@ -1,138 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-r"""
-ZODB subtransaction tests
-=========================
-
-Subtransactions are provided by a generic transaction interface, but
-only supported by ZODB.  These tests verify that some of the important
-cases work as expected.
-
->>> import transaction
->>> from ZODB import DB
->>> from ZODB.tests.test_storage import MinimalMemoryStorage
->>> from ZODB.tests.MinPO import MinPO
-
-First create a few objects in the database root with a normal commit.
-We're going to make a series of modifications to these objects.
-
->>> db = DB(MinimalMemoryStorage())
->>> cn = db.open()
->>> rt = cn.root()
->>> def init():
-...     global a, b, c
-...     a = rt["a"] = MinPO("a0")
-...     b = rt["b"] = MinPO("b0")
-...     c = rt["c"] = MinPO("c0")
-...     transaction.commit()
->>> init()
-
-We'll also open a second database connection and use it to verify that
-the intermediate results of subtransactions are not visible to other
-connections.
-
->>> cn2 = db.open(synch=False)
->>> rt2 = cn2.root()
->>> shadow_a = rt2["a"]
->>> shadow_b = rt2["b"]
->>> shadow_c = rt2["c"]
-
-Subtransaction commit
----------------------
-
-We'll make a series of modifications in subtransactions.
-
->>> a.value = "a1"
->>> b.value = "b1"
->>> transaction.commit(1)
->>> a.value, b.value
-('a1', 'b1')
->>> shadow_a.value, shadow_b.value
-('a0', 'b0')
-
->>> a.value = "a2"
->>> c.value = "c1"
->>> transaction.commit(1)
->>> a.value, c.value
-('a2', 'c1')
->>> shadow_a.value, shadow_c.value
-('a0', 'c0')
-
->>> a.value = "a3"
->>> transaction.commit(1)
->>> a.value
-'a3'
->>> shadow_a.value
-'a0'
-
->>> transaction.commit()
-
->>> a.value, b.value, c.value
-('a3', 'b1', 'c1')
-
-Subtransaction with nested abort
---------------------------------
-
->>> init()
->>> a.value = "a1"
->>> transaction.commit(1)
-
->>> b.value = "b1"
->>> transaction.commit(1)
-
-A sub-transaction abort will undo current changes, reverting to the
-database state as of the last sub-transaction commit.  There is
-(apparently) no way to abort an already-committed subtransaction.
-
->>> c.value = "c1"
->>> transaction.abort(1)
-
-Multiple aborts have no extra effect.
-
->>> transaction.abort(1)
-
->>> a.value, b.value, c.value
-('a1', 'b1', 'c0')
-
->>> transaction.commit()
->>> a.value, b.value, c.value
-('a1', 'b1', 'c0')
-
-Subtransaction with top-level abort
------------------------------------
-
->>> init()
->>> a.value = "a1"
->>> transaction.commit(1)
-
->>> b.value = "b1"
->>> transaction.commit(1)
-
-A sub-transaction abort will undo current changes, reverting to the
-database state as of the last sub-transaction commit.  There is
-(apparently) no way to abort an already-committed subtransaction.
-
->>> c.value = "c1"
->>> transaction.abort(1)
-
->>> transaction.abort()
->>> a.value, b.value, c.value
-('a0', 'b0', 'c0')
-
-"""
-
-import doctest
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/ZODB/tests/testTimeStamp.py b/branches/bug1734/src/ZODB/tests/testTimeStamp.py
deleted file mode 100644
index 3bd6ecf0..00000000
--- a/branches/bug1734/src/ZODB/tests/testTimeStamp.py
+++ /dev/null
@@ -1,144 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the TimeStamp utility type"""
-
-import time
-import unittest
-
-from persistent.TimeStamp import TimeStamp
-
-EPSILON = 0.000001
-
-class TimeStampTests(unittest.TestCase):
-
-    def checkYMDTimeStamp(self):
-        self._check_ymd(2001, 6, 3)
-
-    def _check_ymd(self, yr, mo, dy):
-        ts = TimeStamp(yr, mo, dy)
-        self.assertEqual(ts.year(), yr)
-        self.assertEqual(ts.month(), mo)
-        self.assertEqual(ts.day(), dy)
-
-        self.assertEquals(ts.hour(), 0)
-        self.assertEquals(ts.minute(), 0)
-        self.assertEquals(ts.second(), 0)
-
-        t = time.gmtime(ts.timeTime())
-        self.assertEquals(yr, t[0])
-        self.assertEquals(mo, t[1])
-        self.assertEquals(dy, t[2])
-
-    def checkFullTimeStamp(self):
-        native_ts = int(time.time()) # fractional seconds get in the way
-        t = time.gmtime(native_ts)   # the corresponding GMT struct tm
-        ts = TimeStamp(*t[:6])
-
-        # Seconds are stored internally via (conceptually) multiplying by
-        # 2**32 then dividing by 60, ending up with a 32-bit integer.
-        # While this gives a lot of room for cramming many distinct
-        # TimeStamps into a second, it's not good at roundtrip accuracy.
-        # For example, 1 second is stored as int(2**32/60) == 71582788.
-        # Converting back gives 71582788*60.0/2**32 == 0.9999999962747097.
-        # In general, we can lose up to 0.999... to truncation during
-        # storing, creating an absolute error up to about 1*60.0/2**32 ==
-        # 0.000000014 on the seconds value we get back.  This is so even
-        # when we have an exact integral second value going in (as we
-        # do in this test), so we can't expect equality in any comparison
-        # involving seconds.  Minutes (etc) are stored exactly, so we
-        # can expect equality for those.
-
-        self.assert_(abs(ts.timeTime() - native_ts) < EPSILON)
-        self.assertEqual(ts.year(), t[0])
-        self.assertEqual(ts.month(), t[1])
-        self.assertEqual(ts.day(), t[2])
-        self.assertEquals(ts.hour(), t[3])
-        self.assertEquals(ts.minute(), t[4])
-        self.assert_(abs(ts.second() - t[5]) < EPSILON)
-
-    def checkRawTimestamp(self):
-        t = time.gmtime()
-        ts1 = TimeStamp(*t[:6])
-        ts2 = TimeStamp(`ts1`)
-
-        self.assertEquals(ts1, ts2)
-        self.assertEquals(ts1.timeTime(), ts2.timeTime())
-        self.assertEqual(ts1.year(), ts2.year())
-        self.assertEqual(ts1.month(), ts2.month())
-        self.assertEqual(ts1.day(), ts2.day())
-        self.assertEquals(ts1.hour(), ts2.hour())
-        self.assertEquals(ts1.minute(), ts2.minute())
-        self.assert_(abs(ts1.second() - ts2.second()) < EPSILON)
-
-    def checkDictKey(self):
-        t = time.gmtime()
-        ts1 = TimeStamp(*t[:6])
-        ts2 = TimeStamp(2000, *t[1:6])
-
-        d = {}
-        d[ts1] = 1
-        d[ts2] = 2
-
-        self.assertEquals(len(d), 2)
-
-    def checkCompare(self):
-        ts1 = TimeStamp(1972, 6, 27)
-        ts2 = TimeStamp(1971, 12, 12)
-        self.assert_(ts1 > ts2)
-        self.assert_(ts2 <= ts1)
-
-    def checkLaterThan(self):
-        t = time.gmtime()
-        ts = TimeStamp(*t[:6])
-        ts2 = ts.laterThan(ts)
-        self.assert_(ts2 > ts)
-
-    # TODO:  should test for bogus inputs to TimeStamp constructor
-
-    def checkTimeStamp(self):
-        # Alternate test suite
-        t = TimeStamp(2002, 1, 23, 10, 48, 5) # GMT
-        self.assertEquals(str(t), '2002-01-23 10:48:05.000000')
-        self.assertEquals(repr(t), '\x03B9H\x15UUU')
-        self.assertEquals(TimeStamp('\x03B9H\x15UUU'), t)
-        self.assertEquals(t.year(), 2002)
-        self.assertEquals(t.month(), 1)
-        self.assertEquals(t.day(), 23)
-        self.assertEquals(t.hour(), 10)
-        self.assertEquals(t.minute(), 48)
-        self.assertEquals(round(t.second()), 5)
-        self.assertEquals(t.timeTime(), 1011782885)
-        t1 = TimeStamp(2002, 1, 23, 10, 48, 10)
-        self.assertEquals(str(t1), '2002-01-23 10:48:10.000000')
-        self.assert_(t == t)
-        self.assert_(t != t1)
-        self.assert_(t < t1)
-        self.assert_(t <= t1)
-        self.assert_(t1 >= t)
-        self.assert_(t1 > t)
-        self.failIf(t == t1)
-        self.failIf(t != t)
-        self.failIf(t > t1)
-        self.failIf(t >= t1)
-        self.failIf(t1 < t)
-        self.failIf(t1 <= t)
-        self.assertEquals(cmp(t, t), 0)
-        self.assertEquals(cmp(t, t1), -1)
-        self.assertEquals(cmp(t1, t), 1)
-        self.assertEquals(t1.laterThan(t), t1)
-        self.assert_(t.laterThan(t1) > t1)
-        self.assertEquals(TimeStamp(2002,1,23), TimeStamp(2002,1,23,0,0,0))
-
-def test_suite():
-    return unittest.makeSuite(TimeStampTests, 'check')
diff --git a/branches/bug1734/src/ZODB/tests/testUtils.py b/branches/bug1734/src/ZODB/tests/testUtils.py
deleted file mode 100644
index 2f5a6ab6..00000000
--- a/branches/bug1734/src/ZODB/tests/testUtils.py
+++ /dev/null
@@ -1,97 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the routines to convert between long and 64-bit strings"""
-
-import random
-import unittest
-from persistent import Persistent
-
-NUM = 100
-
-from ZODB.utils import U64, p64, u64
-
-class TestUtils(unittest.TestCase):
-
-    small = [random.randrange(1, 1L<<32, int=long)
-             for i in range(NUM)]
-    large = [random.randrange(1L<<32, 1L<<64, int=long)
-             for i in range(NUM)]
-    all = small + large
-
-    def checkLongToStringToLong(self):
-        for num in self.all:
-            s = p64(num)
-            n = U64(s)
-            self.assertEquals(num, n, "U64() failed")
-            n2 = u64(s)
-            self.assertEquals(num, n2, "u64() failed")
-
-    def checkKnownConstants(self):
-        self.assertEquals("\000\000\000\000\000\000\000\001", p64(1))
-        self.assertEquals("\000\000\000\001\000\000\000\000", p64(1L<<32))
-        self.assertEquals(u64("\000\000\000\000\000\000\000\001"), 1)
-        self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
-        self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L<<32)
-        self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L<<32)
-
-    def checkPersistentIdHandlesDescriptor(self):
-        from ZODB.serialize import BaseObjectWriter
-        class P(Persistent):
-            pass
-
-        writer = BaseObjectWriter(None)
-        self.assertEqual(writer.persistent_id(P), None)
-
-    # It's hard to know where to put this test.  We're checking that the
-    # ConflictError constructor uses utils.py's get_pickle_metadata() to
-    # deduce the class path from a pickle, instead of actually loading
-    # the pickle (and so also trying to import application module and
-    # class objects, which isn't a good idea on a ZEO server when avoidable).
-    def checkConflictErrorDoesntImport(self):
-        from ZODB.serialize import BaseObjectWriter
-        from ZODB.POSException import ConflictError
-        from ZODB.tests.MinPO import MinPO
-        import cPickle as pickle
-
-        obj = MinPO()
-        data = BaseObjectWriter().serialize(obj)
-
-        # The pickle contains a GLOBAL ('c') opcode resolving to MinPO's
-        # module and class.
-        self.assert_('cZODB.tests.MinPO\nMinPO\n' in data)
-
-        # Fiddle the pickle so it points to something "impossible" instead.
-        data = data.replace('cZODB.tests.MinPO\nMinPO\n',
-                            'cpath.that.does.not.exist\nlikewise.the.class\n')
-        # Pickle can't resolve that GLOBAL opcode -- gets ImportError.
-        self.assertRaises(ImportError, pickle.loads, data)
-
-        # Verify that building ConflictError doesn't get ImportError.
-        try:
-            raise ConflictError(object=obj, data=data)
-        except ConflictError, detail:
-            # And verify that the msg names the impossible path.
-            self.assert_('path.that.does.not.exist.likewise.the.class' in
-                         str(detail))
-        else:
-            self.fail("expected ConflictError, but no exception raised")
-
-
-def test_suite():
-    return unittest.makeSuite(TestUtils, 'check')
-
-if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    loader.testMethodPrefix = "check"
-    unittest.main(testLoader=loader)
diff --git a/branches/bug1734/src/ZODB/tests/testZODB.py b/branches/bug1734/src/ZODB/tests/testZODB.py
deleted file mode 100644
index d283ea4a..00000000
--- a/branches/bug1734/src/ZODB/tests/testZODB.py
+++ /dev/null
@@ -1,646 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import unittest
-
-import ZODB
-import ZODB.FileStorage
-from ZODB.POSException import ReadConflictError, ConflictError
-from ZODB.POSException import TransactionFailedError
-from ZODB.tests.warnhook import WarningsHook
-
-from persistent import Persistent
-from persistent.mapping import PersistentMapping
-import transaction
-
-class P(Persistent):
-    pass
-
-class Independent(Persistent):
-
-    def _p_independent(self):
-        return 1
-
-class DecoyIndependent(Persistent):
-
-    def _p_independent(self):
-        return 0
-
-class ZODBTests(unittest.TestCase):
-
-    def setUp(self):
-        self._storage = ZODB.FileStorage.FileStorage(
-            'ZODBTests.fs', create=1)
-        self._db = ZODB.DB(self._storage)
-
-    def populate(self):
-        transaction.begin()
-        conn = self._db.open()
-        root = conn.root()
-        root['test'] = pm = PersistentMapping()
-        for n in range(100):
-            pm[n] = PersistentMapping({0: 100 - n})
-        transaction.get().note('created test data')
-        transaction.commit()
-        conn.close()
-
-    def tearDown(self):
-        self._db.close()
-        self._storage.cleanup()
-
-    def checkExportImport(self, abort_it=False):
-        self.populate()
-        conn = self._db.open()
-        try:
-            self.duplicate(conn, abort_it)
-        finally:
-            conn.close()
-        conn = self._db.open()
-        try:
-            self.verify(conn, abort_it)
-        finally:
-            conn.close()
-
-    def duplicate(self, conn, abort_it):
-        transaction.begin()
-        transaction.get().note('duplication')
-        root = conn.root()
-        ob = root['test']
-        assert len(ob) > 10, 'Insufficient test data'
-        try:
-            import tempfile
-            f = tempfile.TemporaryFile()
-            ob._p_jar.exportFile(ob._p_oid, f)
-            assert f.tell() > 0, 'Did not export correctly'
-            f.seek(0)
-            new_ob = ob._p_jar.importFile(f)
-            self.assertEqual(new_ob, ob)
-            root['dup'] = new_ob
-            f.close()
-            if abort_it:
-                transaction.abort()
-            else:
-                transaction.commit()
-        except:
-            transaction.abort()
-            raise
-
-    def verify(self, conn, abort_it):
-        transaction.begin()
-        root = conn.root()
-        ob = root['test']
-        try:
-            ob2 = root['dup']
-        except KeyError:
-            if abort_it:
-                # Passed the test.
-                return
-            else:
-                raise
-        else:
-            self.failUnless(not abort_it, 'Did not abort duplication')
-        l1 = list(ob.items())
-        l1.sort()
-        l2 = list(ob2.items())
-        l2.sort()
-        l1 = map(lambda (k, v): (k, v[0]), l1)
-        l2 = map(lambda (k, v): (k, v[0]), l2)
-        self.assertEqual(l1, l2)
-        self.assert_(ob._p_oid != ob2._p_oid)
-        self.assertEqual(ob._p_jar, ob2._p_jar)
-        oids = {}
-        for v in ob.values():
-            oids[v._p_oid] = 1
-        for v in ob2.values():
-            assert not oids.has_key(v._p_oid), (
-                'Did not fully separate duplicate from original')
-        transaction.commit()
-
-    def checkExportImportAborted(self):
-        self.checkExportImport(abort_it=True)
-
-    def checkVersionOnly(self):
-        # Make sure the changes to make empty transactions a no-op
-        # still allow things like abortVersion().  This should work
-        # because abortVersion() calls tpc_begin() itself.
-        conn = self._db.open("version")
-        try:
-            r = conn.root()
-            r[1] = 1
-            transaction.commit()
-        finally:
-            conn.close()
-        self._db.abortVersion("version")
-        transaction.commit()
-
-    def checkResetCache(self):
-        # The cache size after a reset should be 0.  Note that
-        # _resetCache is not a public API, but the resetCaches()
-        # function is, and resetCaches() causes _resetCache() to be
-        # called.
-        self.populate()
-        conn = self._db.open()
-        conn.root()
-        self.assert_(len(conn._cache) > 0)  # Precondition
-        conn._resetCache()
-        self.assertEqual(len(conn._cache), 0)
-
-    def checkResetCachesAPI(self):
-        # Checks the resetCaches() API.
-        # (resetCaches used to be called updateCodeTimestamp.)
-        self.populate()
-        conn = self._db.open()
-        conn.root()
-        self.assert_(len(conn._cache) > 0)  # Precondition
-        ZODB.Connection.resetCaches()
-        conn.close()
-        self.assert_(len(conn._cache) > 0)  # Still not flushed
-        conn._setDB(self._db)  # simulate the connection being reopened
-        self.assertEqual(len(conn._cache), 0)
-
-    def checkExplicitTransactionManager(self):
-        # Test of transactions that apply to only the connection,
-        # not the thread.
-        tm1 = transaction.TransactionManager()
-        conn1 = self._db.open(txn_mgr=tm1)
-        tm2 = transaction.TransactionManager()
-        conn2 = self._db.open(txn_mgr=tm2)
-        try:
-            r1 = conn1.root()
-            r2 = conn2.root()
-            if r1.has_key('item'):
-                del r1['item']
-                tm1.get().commit()
-            r1.get('item')
-            r2.get('item')
-            r1['item'] = 1
-            tm1.get().commit()
-            self.assertEqual(r1['item'], 1)
-            # r2 has not seen a transaction boundary,
-            # so it should be unchanged.
-            self.assertEqual(r2.get('item'), None)
-            conn2.sync()
-            # Now r2 is updated.
-            self.assertEqual(r2['item'], 1)
-
-            # Now, for good measure, send an update in the other direction.
-            r2['item'] = 2
-            tm2.get().commit()
-            self.assertEqual(r1['item'], 1)
-            self.assertEqual(r2['item'], 2)
-            conn1.sync()
-            conn2.sync()
-            self.assertEqual(r1['item'], 2)
-            self.assertEqual(r2['item'], 2)
-        finally:
-            conn1.close()
-            conn2.close()
-
-    def checkLocalTransactions(self):
-        # Test of transactions that apply to only the connection,
-        # not the thread.
-        conn1 = self._db.open()
-        conn2 = self._db.open()
-        hook = WarningsHook()
-        hook.install()
-        try:
-            conn1.setLocalTransaction()
-            conn2.setLocalTransaction()
-            r1 = conn1.root()
-            r2 = conn2.root()
-            if r1.has_key('item'):
-                del r1['item']
-                conn1.getTransaction().commit()
-            r1.get('item')
-            r2.get('item')
-            r1['item'] = 1
-            conn1.getTransaction().commit()
-            self.assertEqual(r1['item'], 1)
-            # r2 has not seen a transaction boundary,
-            # so it should be unchanged.
-            self.assertEqual(r2.get('item'), None)
-            conn2.sync()
-            # Now r2 is updated.
-            self.assertEqual(r2['item'], 1)
-
-            # Now, for good measure, send an update in the other direction.
-            r2['item'] = 2
-            conn2.getTransaction().commit()
-            self.assertEqual(r1['item'], 1)
-            self.assertEqual(r2['item'], 2)
-            conn1.sync()
-            conn2.sync()
-            self.assertEqual(r1['item'], 2)
-            self.assertEqual(r2['item'], 2)
-            for msg, obj, filename, lineno in hook.warnings:
-                self.assert_(msg in [
-                    "This will be removed in ZODB 3.6:\n"
-                        "setLocalTransaction() is deprecated. "
-                        "Use the txn_mgr argument to DB.open() instead.",
-                    "This will be removed in ZODB 3.6:\n"
-                        "getTransaction() is deprecated. "
-                        "Use the txn_mgr argument to DB.open() instead."])
-        finally:
-            conn1.close()
-            conn2.close()
-            hook.uninstall()
-
-    def checkReadConflict(self):
-        self.obj = P()
-        self.readConflict()
-
-    def readConflict(self, shouldFail=True):
-        # Two transactions run concurrently.  Each reads some object,
-        # then one commits and the other tries to read an object
-        # modified by the first.  This read should fail with a conflict
-        # error because the object state read is not necessarily
-        # consistent with the objects read earlier in the transaction.
-
-        tm1 = transaction.TransactionManager()
-        conn = self._db.open(mvcc=False, txn_mgr=tm1)
-        r1 = conn.root()
-        r1["p"] = self.obj
-        self.obj.child1 = P()
-        tm1.get().commit()
-
-        # start a new transaction with a new connection
-        tm2 = transaction.TransactionManager()
-        cn2 = self._db.open(mvcc=False, txn_mgr=tm2)
-        # start a new transaction with the other connection
-        r2 = cn2.root()
-
-        self.assertEqual(r1._p_serial, r2._p_serial)
-
-        self.obj.child2 = P()
-        tm1.get().commit()
-
-        # resume the transaction using cn2
-        obj = r2["p"]
-        # An attempt to access obj should fail, because r2 was read
-        # earlier in the transaction and obj was modified by the othe
-        # transaction.
-        if shouldFail:
-            self.assertRaises(ReadConflictError, lambda: obj.child1)
-            # And since ReadConflictError was raised, attempting to commit
-            # the transaction should re-raise it.  checkNotIndependent()
-            # failed this part of the test for a long time.
-            self.assertRaises(ReadConflictError, tm2.get().commit)
-
-            # And since that commit failed, trying to commit again should
-            # fail again.
-            self.assertRaises(TransactionFailedError, tm2.get().commit)
-            # And again.
-            self.assertRaises(TransactionFailedError, tm2.get().commit)
-            # Etc.
-            self.assertRaises(TransactionFailedError, tm2.get().commit)
-
-        else:
-            # make sure that accessing the object succeeds
-            obj.child1
-        tm2.get().abort()
-
-    def checkReadConflictIgnored(self):
-        # Test that an application that catches a read conflict and
-        # continues can not commit the transaction later.
-        root = self._db.open(mvcc=False).root()
-        root["real_data"] = real_data = PersistentMapping()
-        root["index"] = index = PersistentMapping()
-
-        real_data["a"] = PersistentMapping({"indexed_value": 0})
-        real_data["b"] = PersistentMapping({"indexed_value": 1})
-        index[1] = PersistentMapping({"b": 1})
-        index[0] = PersistentMapping({"a": 1})
-        transaction.commit()
-
-        # load some objects from one connection
-        tm = transaction.TransactionManager()
-        cn2 = self._db.open(mvcc=False, txn_mgr=tm)
-        r2 = cn2.root()
-        real_data2 = r2["real_data"]
-        index2 = r2["index"]
-
-        real_data["b"]["indexed_value"] = 0
-        del index[1]["b"]
-        index[0]["b"] = 1
-        transaction.commit()
-
-        del real_data2["a"]
-        try:
-            del index2[0]["a"]
-        except ReadConflictError:
-            # This is the crux of the text.  Ignore the error.
-            pass
-        else:
-            self.fail("No conflict occurred")
-
-        # real_data2 still ready to commit
-        self.assert_(real_data2._p_changed)
-
-        # index2 values not ready to commit
-        self.assert_(not index2._p_changed)
-        self.assert_(not index2[0]._p_changed)
-        self.assert_(not index2[1]._p_changed)
-
-        self.assertRaises(ReadConflictError, tm.get().commit)
-        self.assertRaises(TransactionFailedError, tm.get().commit)
-        tm.get().abort()
-
-    def checkIndependent(self):
-        self.obj = Independent()
-        self.readConflict(shouldFail=False)
-
-    def checkNotIndependent(self):
-        self.obj = DecoyIndependent()
-        self.readConflict()
-
-    def checkReadConflictErrorClearedDuringAbort(self):
-        # When a transaction is aborted, the "memory" of which
-        # objects were the cause of a ReadConflictError during
-        # that transaction should be cleared.
-        root = self._db.open(mvcc=False).root()
-        data = PersistentMapping({'d': 1})
-        root["data"] = data
-        transaction.commit()
-
-        # Provoke a ReadConflictError.
-        tm2 = transaction.TransactionManager()
-        cn2 = self._db.open(mvcc=False, txn_mgr=tm2)
-        r2 = cn2.root()
-        data2 = r2["data"]
-
-        data['d'] = 2
-        transaction.commit()
-
-        try:
-            data2['d'] = 3
-        except ReadConflictError:
-            pass
-        else:
-            self.fail("No conflict occurred")
-
-        # Explicitly abort cn2's transaction.
-        tm2.get().abort()
-
-        # cn2 should retain no memory of the read conflict after an abort(),
-        # but 3.2.3 had a bug wherein it did.
-        data_conflicts = data._p_jar._conflicts
-        data2_conflicts = data2._p_jar._conflicts
-        self.failIf(data_conflicts)
-        self.failIf(data2_conflicts)  # this used to fail
-
-        # And because of that, we still couldn't commit a change to data2['d']
-        # in the new transaction.
-        cn2.sync()  # process the invalidation for data2['d']
-        data2['d'] = 3
-        tm2.get().commit()  # 3.2.3 used to raise ReadConflictError
-
-        cn2.close()
-
-    def checkTxnBeginImpliesAbort(self):
-        # begin() should do an abort() first, if needed.
-        cn = self._db.open()
-        rt = cn.root()
-        rt['a'] = 1
-
-        transaction.begin()  # should abort adding 'a' to the root
-        rt = cn.root()
-        self.assertRaises(KeyError, rt.__getitem__, 'a')
-
-        # A longstanding bug:  this didn't work if changes were only in
-        # subtransactions.
-        transaction.begin()
-        rt = cn.root()
-        rt['a'] = 2
-        transaction.commit(1)
-
-        transaction.begin()
-        rt = cn.root()
-        self.assertRaises(KeyError, rt.__getitem__, 'a')
-
-        # One more time, mixing "top level" and subtransaction changes.
-        transaction.begin()
-        rt = cn.root()
-        rt['a'] = 3
-        transaction.commit(1)
-        rt['b'] = 4
-
-        transaction.begin()
-        rt = cn.root()
-        self.assertRaises(KeyError, rt.__getitem__, 'a')
-        self.assertRaises(KeyError, rt.__getitem__, 'b')
-
-        # That used methods of the default transaction *manager*.  Alas,
-        # that's not necessarily the same as using methods of the current
-        # transaction, and, in fact, when this test was written,
-        # Transaction.begin() didn't do anything (everything from here
-        # down failed).
-
-        # Oh, bleech.  Since Transaction.begin is also deprecated, we have
-        # to goof around suppressing the deprecation warning.
-        import warnings
-
-        # First verify that Transaction.begin *is* deprecated, by turning
-        # the warning into an error.
-        warnings.filterwarnings("error", category=DeprecationWarning)
-        self.assertRaises(DeprecationWarning, transaction.get().begin)
-        del warnings.filters[0]
-
-        # Now ignore DeprecationWarnings for the duration.  Use a
-        # try/finally block to ensure we reenable DeprecationWarnings
-        # no matter what.
-        warnings.filterwarnings("ignore", category=DeprecationWarning)
-        try:
-            cn = self._db.open()
-            rt = cn.root()
-            rt['a'] = 1
-
-            transaction.get().begin()  # should abort adding 'a' to the root
-            rt = cn.root()
-            self.assertRaises(KeyError, rt.__getitem__, 'a')
-
-            # A longstanding bug:  this didn't work if changes were only in
-            # subtransactions.
-            transaction.get().begin()
-            rt = cn.root()
-            rt['a'] = 2
-            transaction.get().commit(1)
-
-            transaction.get().begin()
-            rt = cn.root()
-            self.assertRaises(KeyError, rt.__getitem__, 'a')
-
-            # One more time, mixing "top level" and subtransaction changes.
-            transaction.get().begin()
-            rt = cn.root()
-            rt['a'] = 3
-            transaction.get().commit(1)
-            rt['b'] = 4
-
-            transaction.get().begin()
-            rt = cn.root()
-            self.assertRaises(KeyError, rt.__getitem__, 'a')
-            self.assertRaises(KeyError, rt.__getitem__, 'b')
-
-            cn.close()
-
-        finally:
-            del warnings.filters[0]
-
-    def checkFailingCommitSticks(self):
-        # See also checkFailingSubtransactionCommitSticks.
-        cn = self._db.open()
-        rt = cn.root()
-        rt['a'] = 1
-
-        # Arrange for commit to fail during tpc_vote.
-        poisoned = PoisonedObject(PoisonedJar(break_tpc_vote=True))
-        transaction.get().register(poisoned)
-
-        self.assertRaises(PoisonedError, transaction.get().commit)
-        # Trying to commit again fails too.
-        self.assertRaises(TransactionFailedError, transaction.get().commit)
-        self.assertRaises(TransactionFailedError, transaction.get().commit)
-        self.assertRaises(TransactionFailedError, transaction.get().commit)
-
-        # The change to rt['a'] is lost.
-        self.assertRaises(KeyError, rt.__getitem__, 'a')
-
-        # Trying to modify an object also fails, because Transaction.join()
-        # also raises TransactionFailedError.
-        self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
-
-        # Clean up via abort(), and try again.
-        transaction.get().abort()
-        rt['a'] = 1
-        transaction.get().commit()
-        self.assertEqual(rt['a'], 1)
-
-        # Cleaning up via begin() should also work.
-        rt['a'] = 2
-        transaction.get().register(poisoned)
-        self.assertRaises(PoisonedError, transaction.get().commit)
-        self.assertRaises(TransactionFailedError, transaction.get().commit)
-        # The change to rt['a'] is lost.
-        self.assertEqual(rt['a'], 1)
-        # Trying to modify an object also fails.
-        self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
-        # Clean up via begin(), and try again.
-        transaction.begin()
-        rt['a'] = 2
-        transaction.get().commit()
-        self.assertEqual(rt['a'], 2)
-
-        cn.close()
-
-    def checkFailingSubtransactionCommitSticks(self):
-        cn = self._db.open()
-        rt = cn.root()
-        rt['a'] = 1
-        transaction.get().commit(True)
-        self.assertEqual(rt['a'], 1)
-
-        rt['b'] = 2
-        # Subtransactions don't do tpc_vote, so we poison tpc_begin.
-        poisoned = PoisonedObject(PoisonedJar(break_tpc_begin=True))
-        transaction.get().register(poisoned)
-        self.assertRaises(PoisonedError, transaction.get().commit, True)
-        # Trying to subtxn-commit again fails too.
-        self.assertRaises(TransactionFailedError, transaction.get().commit, True)
-        self.assertRaises(TransactionFailedError, transaction.get().commit, True)
-        # Top-level commit also fails.
-        self.assertRaises(TransactionFailedError, transaction.get().commit)
-
-        # The changes to rt['a'] and rt['b'] are lost.
-        self.assertRaises(KeyError, rt.__getitem__, 'a')
-        self.assertRaises(KeyError, rt.__getitem__, 'b')
-
-        # Trying to modify an object also fails, because Transaction.join()
-        # also raises TransactionFailedError.
-        self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
-
-        # Clean up via abort(), and try again.
-        transaction.get().abort()
-        rt['a'] = 1
-        transaction.get().commit()
-        self.assertEqual(rt['a'], 1)
-
-        # Cleaning up via begin() should also work.
-        rt['a'] = 2
-        transaction.get().register(poisoned)
-        self.assertRaises(PoisonedError, transaction.get().commit, True)
-        self.assertRaises(TransactionFailedError, transaction.get().commit, True)
-        # The change to rt['a'] is lost.
-        self.assertEqual(rt['a'], 1)
-        # Trying to modify an object also fails.
-        self.assertRaises(TransactionFailedError, rt.__setitem__, 'b', 2)
-        # Clean up via begin(), and try again.
-        transaction.begin()
-        rt['a'] = 2
-        transaction.get().commit(True)
-        self.assertEqual(rt['a'], 2)
-        transaction.get().commit()
-
-        cn2 = self._db.open()
-        rt = cn.root()
-        self.assertEqual(rt['a'], 2)
-
-        cn.close()
-        cn2.close()
-
-class PoisonedError(Exception):
-    pass
-
-# PoisonedJar arranges to raise exceptions from interesting places.
-# For whatever reason, subtransaction commits don't call tpc_vote.
-class PoisonedJar:
-    def __init__(self, break_tpc_begin=False, break_tpc_vote=False):
-        self.break_tpc_begin = break_tpc_begin
-        self.break_tpc_vote = break_tpc_vote
-
-    def sortKey(self):
-        return str(id(self))
-
-    # A way to poison a subtransaction commit.
-    def tpc_begin(self, *args):
-        if self.break_tpc_begin:
-            raise PoisonedError("tpc_begin fails")
-
-    # A way to poison a top-level commit.
-    def tpc_vote(self, *args):
-        if self.break_tpc_vote:
-            raise PoisonedError("tpc_vote fails")
-
-    # commit_sub is needed else this jar is ignored during subtransaction
-    # commit.
-    def commit_sub(*args):
-        pass
-
-    def abort_sub(*args):
-        pass
-
-    def commit(*args):
-        pass
-
-    def abort(*self):
-        pass
-
-
-class PoisonedObject:
-    def __init__(self, poisonedjar):
-        self._p_jar = poisonedjar
-
-def test_suite():
-    return unittest.makeSuite(ZODBTests, 'check')
-
-if __name__ == "__main__":
-    unittest.main(defaultTest="test_suite")
diff --git a/branches/bug1734/src/ZODB/tests/test_cache.py b/branches/bug1734/src/ZODB/tests/test_cache.py
deleted file mode 100644
index 8dee47bf..00000000
--- a/branches/bug1734/src/ZODB/tests/test_cache.py
+++ /dev/null
@@ -1,209 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test behavior of Connection plus cPickleCache."""
-
-import doctest
-
-from persistent import Persistent
-import transaction
-from ZODB.config import databaseFromString
-
-class RecalcitrantObject(Persistent):
-    """A Persistent object that will not become a ghost."""
-
-    deactivations = 0
-
-    def _p_deactivate(self):
-        self.__class__.deactivations += 1
-
-    def init(cls):
-        cls.deactivations = 0
-
-    init = classmethod(init)
-
-class RegularObject(Persistent):
-
-    deactivations = 0
-    invalidations = 0
-
-    def _p_deactivate(self):
-        self.__class__.deactivations += 1
-        super(RegularObject, self)._p_deactivate()
-
-    def _p_invalidate(self):
-        self.__class__.invalidations += 1
-        super(RegularObject, self)._p_invalidate()
-
-    def init(cls):
-        cls.deactivations = 0
-        cls.invalidations = 0
-
-    init = classmethod(init)
-
-class CacheTests:
-
-    def test_cache(self):
-        r"""Test basic cache methods.
-
-        Let's start with a clean transaction
-
-        >>> transaction.abort()
-
-        >>> RegularObject.init()
-        >>> db = databaseFromString("<zodb>\n"
-        ...                         "cache-size 4\n"
-        ...                         "<mappingstorage/>\n"
-        ...                         "</zodb>")
-        >>> cn = db.open()
-        >>> r = cn.root()
-        >>> L = []
-        >>> for i in range(5):
-        ...     o = RegularObject()
-        ...     L.append(o)
-        ...     r[i] = o
-        >>> transaction.commit()
-
-        After committing a transaction and calling cacheGC(), there
-        should be cache-size (4) objects in the cache.  One of the
-        RegularObjects was deactivated.
-
-        >>> cn._cache.ringlen()
-        4
-        >>> RegularObject.deactivations
-        1
-
-        If we explicitly activate the objects again, the ringlen
-        should go back up to 5.
-
-        >>> for o in L:
-        ...     o._p_activate()
-        >>> cn._cache.ringlen()
-        5
-
-        >>> cn.cacheGC()
-        >>> cn._cache.ringlen()
-        4
-        >>> RegularObject.deactivations
-        2
-
-        >>> cn.cacheMinimize()
-        >>> cn._cache.ringlen()
-        0
-        >>> RegularObject.deactivations
-        6
-
-        If we activate all the objects again and mark one as modified,
-        then the one object should not be deactivated even by a
-        minimize.
-
-        >>> for o in L:
-        ...     o._p_activate()
-        >>> o.attr = 1
-        >>> cn._cache.ringlen()
-        5
-        >>> cn.cacheMinimize()
-        >>> cn._cache.ringlen()
-        1
-        >>> RegularObject.deactivations
-        10
-
-        Clean up
-
-        >>> transaction.abort()
-
-        """
-
-    def test_cache_gc_recalcitrant(self):
-        r"""Test that a cacheGC() call will return.
-
-        It's possible for a particular object to ignore the
-        _p_deactivate() call.  We want to check several things in this
-        case.  The cache should called the real _p_deactivate() method
-        not the one provided by Persistent.  The cacheGC() call should
-        also return when it's looked at each item, regardless of whether
-        it became a ghost.
-
-        >>> RecalcitrantObject.init()
-        >>> db = databaseFromString("<zodb>\n"
-        ...                         "cache-size 4\n"
-        ...                         "<mappingstorage/>\n"
-        ...                         "</zodb>")
-        >>> cn = db.open()
-        >>> r = cn.root()
-        >>> L = []
-        >>> for i in range(5):
-        ...     o = RecalcitrantObject()
-        ...     L.append(o)
-        ...     r[i] = o
-        >>> transaction.commit()
-        >>> [o._p_state for o in L]
-        [0, 0, 0, 0, 0]
-
-        The Connection calls cacheGC() after it commits a transaction.
-        Since the cache will now have more objects that it's target size,
-        it will call _p_deactivate() on each RecalcitrantObject.
-
-        >>> RecalcitrantObject.deactivations
-        5
-        >>> [o._p_state for o in L]
-        [0, 0, 0, 0, 0]
-
-        An explicit call to cacheGC() has the same effect.
-
-        >>> cn.cacheGC()
-        >>> RecalcitrantObject.deactivations
-        10
-        >>> [o._p_state for o in L]
-        [0, 0, 0, 0, 0]
-        """
-
-    def test_cache_on_abort(self):
-        r"""Test that the cache handles transaction abort correctly.
-
-        >>> RegularObject.init()
-        >>> db = databaseFromString("<zodb>\n"
-        ...                         "cache-size 4\n"
-        ...                         "<mappingstorage/>\n"
-        ...                         "</zodb>")
-        >>> cn = db.open()
-        >>> r = cn.root()
-        >>> L = []
-        >>> for i in range(5):
-        ...     o = RegularObject()
-        ...     L.append(o)
-        ...     r[i] = o
-        >>> transaction.commit()
-        >>> RegularObject.deactivations
-        1
-
-        Modify three of the objects and verify that they are
-        deactivated when the transaction aborts.
-
-        >>> for i in range(0, 5, 2):
-        ...     L[i].attr = i
-        >>> [L[i]._p_state for i in range(0, 5, 2)]
-        [1, 1, 1]
-        >>> cn._cache.ringlen()
-        5
-
-        >>> transaction.abort()
-        >>> cn._cache.ringlen()
-        2
-        >>> RegularObject.deactivations
-        4
-        """
-
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/ZODB/tests/test_datamanageradapter.py b/branches/bug1734/src/ZODB/tests/test_datamanageradapter.py
deleted file mode 100644
index 62189f07..00000000
--- a/branches/bug1734/src/ZODB/tests/test_datamanageradapter.py
+++ /dev/null
@@ -1,808 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""
-$Id$
-"""
-import unittest
-from doctest import DocTestSuite
-from transaction._transaction import DataManagerAdapter
-from ZODB.tests.sampledm import DataManager
-
-def test_normal_commit():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes.  When the data manager joins a transaction,
-    the transaction will create an adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1)
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction will then call tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This prepares the data manager:
-
-    >>> dm.state, dm.delta
-    (1, 1)
-    >>> dm.prepared
-    True
-
-    Finally, tpc_finish is called:
-
-    >>> jar.tpc_finish(t1)
-
-    and the data manager finishes the two-phase commit:
-
-    >>> dm.state, dm.delta
-    (1, 0)
-    >>> dm.prepared
-    False
-    """
-
-def test_abort():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    When the data manager joins a transaction,
-    the transaction will create an adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object.
-
-    Now we'll abort the transaction. The transaction will get the
-    "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    Then the transaction will call abort on the jar:
-
-    >>> t1 = '1'
-    >>> jar.abort(t1)
-
-    Which aborts the changes in the data manager:
-
-    >>> dm.state, dm.delta
-    (0, 0)
-    """
-
-def test_tpc_abort_phase1():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes.  When the data manager joins a transaction,
-    the transaction will create an adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1)
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    At this point, the transaction decides to abort. It calls tpc_abort:
-
-    >>> jar.tpc_abort(t1)
-
-    Which causes the state of the data manager to be restored:
-
-    >>> dm.state, dm.delta
-    (0, 0)
-    """
-
-def test_tpc_abort_phase2():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes.  When the data manager joins a transaction,
-    the transaction will create an adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1)
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction calls vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This prepares the data manager:
-
-    >>> dm.state, dm.delta
-    (1, 1)
-    >>> dm.prepared
-    True
-
-    At this point, the transaction decides to abort. It calls tpc_abort:
-
-    >>> jar.tpc_abort(t1)
-
-    Which causes the state of the data manager to be restored:
-
-    >>> dm.state, dm.delta
-    (0, 0)
-    >>> dm.prepared
-    False
-    """
-
-def test_commit_w_subtransactions():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes in a subtransaction.  When the data
-    manager joins a transaction, the transaction will create an
-    adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction will then call tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This doesn't do anything either, because zodb4 data managers don't
-    actually do two-phase commit for subtransactions.
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Finally, we call tpc_finish. This does actally create a savepoint,
-    but we can't really tell that from outside.
-
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    We'll do more of the above:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    Note that the bove works *because* the same transaction is used
-    for each subtransaction.
-
-    Finally, we'll do a little more work:
-
-    >>> dm.inc()
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    and then commit the top-level transaction.
-
-    The transaction  will actually go through the steps for a subtransaction:
-
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-
-    And then call commit_sub:
-
-    >>> jar.commit_sub(t1)
-
-    As usual, this doesn't actually do anything. ;)
-
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    The transaction manager doesn's call tpc_begin, because commit_sub
-    implies the start of two-phase commit. Next, it does call commit:
-
-    >>> jar.commit(t1)
-
-    which doesn't do anything.
-
-    Finally, the transaction calls tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    which actually does something (because this is the top-level txn):
-
-    >>> dm.state, dm.delta
-    (5, 5)
-    >>> dm.prepared
-    True
-
-    Finally, tpc_finish is called:
-
-    >>> jar.tpc_finish(t1)
-
-    and the data manager finishes the two-phase commit:
-
-    >>> dm.state, dm.delta
-    (5, 0)
-    >>> dm.prepared
-    False
-    """
-
-def test_commit_w_subtransactions_featuring_subtransaction_abort():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes in a subtransaction.  When the data
-    manager joins a transaction, the transaction will create an
-    adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction will then call tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This doesn't do anything either, because zodb4 data managers don't
-    actually do two-phase commit for subtransactions.
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Finally, we call tpc_finish. This does actally create a savepoint,
-    but we can't really tell that from outside.
-
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    We'll do more of the above:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 2)
-
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    But then we'll decide to abort a subtransaction.
-
-    The transaction will just call abort as usual:
-
-    >>> jar.abort(t1)
-
-    This will cause a rollback to the last savepoint:
-    >>> dm.state, dm.delta
-    (0, 2)
-
-    Then we do more work:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    Note that the bove works *because* the same transaction is used
-    for each subtransaction.
-
-    Finally, we'll do a little more work:
-
-    >>> dm.inc()
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    and then commit the top-level transaction.
-
-    The transaction  will actually go through the steps for a subtransaction:
-
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-
-    And then call commit_sub:
-
-    >>> jar.commit_sub(t1)
-
-    As usual, this doesn't actually do anything. ;)
-
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    The transaction manager doesn's call tpc_begin, because commit_sub
-    implies the start of two-phase commit. Next, it does call commit:
-
-    >>> jar.commit(t1)
-
-    which doesn't do anything.
-
-    Finally, the transaction calls tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    which actually does something (because this is the top-level txn):
-
-    >>> dm.state, dm.delta
-    (5, 5)
-    >>> dm.prepared
-    True
-
-    Finally, tpc_finish is called:
-
-    >>> jar.tpc_finish(t1)
-
-    and the data manager finishes the two-phase commit:
-
-    >>> dm.state, dm.delta
-    (5, 0)
-    >>> dm.prepared
-    False
-    """
-
-def test_abort_w_subtransactions():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes in a subtransaction.  When the data
-    manager joins a transaction, the transaction will create an
-    adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction will then call tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This doesn't do anything either, because zodb4 data managers don't
-    actually do two-phase commit for subtransactions.
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Finally, we call tpc_finish. This does actally create a savepoint,
-    but we can't really tell that from outside.
-
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    We'll do more of the above:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    Note that the bove works *because* the same transaction is used
-    for each subtransaction.
-
-    Finally, we'll do a little more work:
-
-    >>> dm.inc()
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    and then abort the top-level transaction.
-
-    The transaction first call abort on the jar:
-
-    >>> jar.abort(t1)
-
-    This will have the effect of aborting the subtrancation:
-
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    Then the transaction will call abort_sub:
-
-    >>> jar.abort_sub(t1)
-
-    This will abort all of the subtransactions:
-
-    >>> dm.state, dm.delta
-    (0, 0)
-    """
-
-
-def test_tpc_abort_w_subtransactions_featuring_subtransaction_abort():
-    """
-    So, we have a data manager:
-
-    >>> dm = DataManager()
-
-    and we do some work that modifies uncommited state:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Now we'll commit the changes in a subtransaction.  When the data
-    manager joins a transaction, the transaction will create an
-    adapter.
-
-    >>> dma = DataManagerAdapter(dm)
-
-    and register it as a modified object. At commit time, the
-    transaction will get the "jar" like this:
-
-    >>> jar = getattr(dma, '_p_jar', dma)
-
-    and, of course, the jar and the adapter will be the same:
-
-    >>> jar is dma
-    True
-
-    The transaction will call tpc_begin:
-
-    >>> t1 = '1'
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-
-    Then the transaction will call commit on the jar:
-
-    >>> jar.commit(t1)
-
-    This doesn't actually do anything. :)
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    The transaction will then call tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    This doesn't do anything either, because zodb4 data managers don't
-    actually do two-phase commit for subtransactions.
-
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    Finally, we call tpc_finish. This does actally create a savepoint,
-    but we can't really tell that from outside.
-
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 1)
-
-    We'll do more of the above:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 2)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 2)
-
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    But then we'll decide to abort a subtransaction.
-
-    The transaction will just call abort as usual:
-
-    >>> jar.abort(t1)
-
-    This will cause a rollback to the last savepoint:
-    >>> dm.state, dm.delta
-    (0, 2)
-
-    Then we do more work:
-
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 3)
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-    >>> dm.state, dm.delta
-    (0, 3)
-
-    Note that the bove works *because* the same transaction is used
-    for each subtransaction.
-
-    Finally, we'll do a little more work:
-
-    >>> dm.inc()
-    >>> dm.inc()
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    and then commit the top-level transaction.
-
-    The transaction  will actually go through the steps for a subtransaction:
-
-    >>> jar.tpc_begin(t1, 1) # 1 -> subtxn
-    >>> jar.commit(t1)
-    >>> jar.tpc_vote(t1)
-    >>> jar.tpc_finish(t1)
-
-    And then call commit_sub:
-
-    >>> jar.commit_sub(t1)
-
-    As usual, this doesn't actually do anything. ;)
-
-    >>> dm.state, dm.delta
-    (0, 5)
-
-    The transaction manager doesn's call tpc_begin, because commit_sub
-    implies the start of two-phase commit. Next, it does call commit:
-
-    >>> jar.commit(t1)
-
-    which doesn't do anything.
-
-    Finally, the transaction calls tpc_vote:
-
-    >>> jar.tpc_vote(t1)
-
-    which actually does something (because this is the top-level txn):
-
-    >>> dm.state, dm.delta
-    (5, 5)
-    >>> dm.prepared
-    True
-
-    Now, at the last minute, the transaction is aborted (possibly due
-    to a "no vote" from another data manager):
-
-    >>> jar.tpc_abort(t1)
-
-    An the changes are undone:
-
-    >>> dm.state, dm.delta
-    (0, 0)
-    >>> dm.prepared
-    False
-    """
-
-def test_suite():
-    return DocTestSuite()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/ZODB/tests/test_doctest_files.py b/branches/bug1734/src/ZODB/tests/test_doctest_files.py
deleted file mode 100644
index 94a150b8..00000000
--- a/branches/bug1734/src/ZODB/tests/test_doctest_files.py
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-from zope.testing.doctestunit import DocFileSuite
-
-def test_suite():
-    return DocFileSuite("dbopen.txt",
-                        "multidb.txt",
-                        )
diff --git a/branches/bug1734/src/ZODB/tests/test_fsdump.py b/branches/bug1734/src/ZODB/tests/test_fsdump.py
deleted file mode 100644
index 0acb7130..00000000
--- a/branches/bug1734/src/ZODB/tests/test_fsdump.py
+++ /dev/null
@@ -1,78 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2005 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-r"""
-fsdump test
-===========
-
-Let's get a temp file path to work with first.
-
->>> import tempfile
->>> path = tempfile.mktemp('.fs', 'Data')
->>> print 'path:', path #doctest: +ELLIPSIS
-path: ...Data...fs
-
-More imports.
-
->>> import ZODB
->>> from ZODB.FileStorage import FileStorage
->>> import transaction as txn
->>> from BTrees.OOBTree import OOBTree
->>> from ZODB.FileStorage.fsdump import fsdump  # we're testing this
-
-Create an empty FileStorage.
-
->>> st = FileStorage(path)
-
-For empty DB fsdump() output definitely empty:
-
->>> fsdump(path)
-
-Create a root object and try again:
-
->>> db = ZODB.DB(st) # yes, that creates a root object!
->>> fsdump(path) #doctest: +ELLIPSIS
-Trans #00000 tid=... time=... offset=52
-    status=' ' user='' description='initial database creation'
-  data #00000 oid=0000000000000000 size=66 class=persistent.mapping.PersistentMapping
-
-Now we see first transaction with root object.
-
-Let's add a BTree:
-
->>> root = db.open().root()
->>> root['tree'] = OOBTree()
->>> txn.get().note('added an OOBTree')
->>> txn.get().commit()
->>> fsdump(path) #doctest: +ELLIPSIS
-Trans #00000 tid=... time=... offset=52
-    status=' ' user='' description='initial database creation'
-  data #00000 oid=0000000000000000 size=66 class=persistent.mapping.PersistentMapping
-Trans #00001 tid=... time=... offset=207
-    status=' ' user='' description='added an OOBTree'
-  data #00000 oid=0000000000000000 size=114 class=persistent.mapping.PersistentMapping
-  data #00001 oid=0000000000000001 size=30 class=BTrees._OOBTree.OOBTree
-
-Now we see two transactions and two changed objects.
-
-Clean up.
-
->>> st.close()
->>> st.cleanup() # remove .fs, .index, etc
-"""
-
-from zope.testing import doctest
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/ZODB/tests/test_storage.py b/branches/bug1734/src/ZODB/tests/test_storage.py
deleted file mode 100644
index 2c815929..00000000
--- a/branches/bug1734/src/ZODB/tests/test_storage.py
+++ /dev/null
@@ -1,164 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""A storage used for unittests.
-
-The primary purpose of this module is to have a minimal multi-version
-storage to use for unit tests.  MappingStorage isn't sufficient.
-Since even a minimal storage has some complexity, we run standard
-storage tests against the test storage.
-"""
-
-import bisect
-import threading
-import unittest
-
-from ZODB.BaseStorage import BaseStorage
-from ZODB import POSException
-from ZODB.utils import z64
-
-from ZODB.tests import StorageTestBase
-from ZODB.tests \
-     import BasicStorage, MTStorage, Synchronization, PackableStorage, \
-     RevisionStorage
-
-class Transaction(object):
-    """Hold data for current transaction for MinimalMemoryStorage."""
-
-    def __init__(self, tid):
-        self.index = {}
-        self.tid = tid
-
-    def store(self, oid, data):
-        self.index[(oid, self.tid)] = data
-
-    def cur(self):
-        return dict.fromkeys([oid for oid, tid in self.index.keys()], self.tid)
-
-class MinimalMemoryStorage(BaseStorage, object):
-    """Simple in-memory storage that supports revisions.
-
-    This storage is needed to test multi-version concurrency control.
-    It is similar to MappingStorage, but keeps multiple revisions.  It
-    does not support versions.  It doesn't implement operations like
-    pack(), because they aren't necessary for testing.
-    """
-
-    def __init__(self):
-        super(MinimalMemoryStorage, self).__init__("name")
-        # _index maps oid, tid pairs to data records
-        self._index = {}
-        # _cur maps oid to current tid
-        self._cur = {}
-
-    def isCurrent(self, oid, serial):
-        return serial == self._cur[oid]
-
-    def hook(self, oid, tid, version):
-        # A hook for testing
-        pass
-
-    def __len__(self):
-        return len(self._index)
-
-    def _clear_temp(self):
-        pass
-
-    def loadEx(self, oid, version):
-        self._lock_acquire()
-        try:
-            assert not version
-            tid = self._cur[oid]
-            self.hook(oid, tid, version)
-            return self._index[(oid, tid)], tid, ""
-        finally:
-            self._lock_release()
-
-    def load(self, oid, version):
-        return self.loadEx(oid, version)[:2]
-
-    def _begin(self, tid, u, d, e):
-        self._txn = Transaction(tid)
-
-    def store(self, oid, serial, data, v, txn):
-        if txn is not self._transaction:
-            raise POSException.StorageTransactionError(self, txn)
-        assert not v
-        if self._cur.get(oid) != serial:
-            if not (serial is None or self._cur.get(oid) in [None, z64]):
-                raise POSException.ConflictError(
-                    oid=oid, serials=(self._cur.get(oid), serial), data=data)
-        self._txn.store(oid, data)
-        return self._tid
-
-    def _abort(self):
-        del self._txn
-
-    def _finish(self, tid, u, d, e):
-        self._lock_acquire()
-        try:
-            self._index.update(self._txn.index)
-            self._cur.update(self._txn.cur())
-            self._ltid = self._tid
-        finally:
-            self._lock_release()
-
-    def lastTransaction(self):
-        return self._ltid
-
-    def loadBefore(self, the_oid, the_tid):
-        # It's okay if loadBefore() is really expensive, because this
-        # storage is just used for testing.
-        self._lock_acquire()
-        try:
-            tids = [tid for oid, tid in self._index if oid == the_oid]
-            if not tids:
-                raise KeyError, the_oid
-            tids.sort()
-            i = bisect.bisect_left(tids, the_tid) - 1
-            if i == -1:
-                return None
-            tid = tids[i]
-            j = i + 1
-            if j == len(tids):
-                end_tid = None
-            else:
-                end_tid = tids[j]
-            return self._index[(the_oid, tid)], tid, end_tid
-        finally:
-            self._lock_release()
-
-    def loadSerial(self, oid, serial):
-        self._lock_acquire()
-        try:
-            return self._index[(oid, serial)]
-        finally:
-            self._lock_release()
-
-class MinimalTestSuite(StorageTestBase.StorageTestBase,
-                       BasicStorage.BasicStorage,
-                       MTStorage.MTStorage,
-                       Synchronization.SynchronizedStorage,
-                       RevisionStorage.RevisionStorage,
-                       ):
-
-    def setUp(self):
-        self._storage = MinimalMemoryStorage()
-
-    # we don't implement undo
-
-    def checkLoadBeforeUndo(self):
-        pass
-
-def test_suite():
-    return unittest.makeSuite(MinimalTestSuite, "check")
diff --git a/branches/bug1734/src/ZODB/tests/testfsIndex.py b/branches/bug1734/src/ZODB/tests/testfsIndex.py
deleted file mode 100644
index 64760e4e..00000000
--- a/branches/bug1734/src/ZODB/tests/testfsIndex.py
+++ /dev/null
@@ -1,176 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import unittest
-import random
-
-from ZODB.fsIndex import fsIndex
-from ZODB.utils import p64, z64
-
-
-class Test(unittest.TestCase):
-
-    def setUp(self):
-        self.index = fsIndex()
-
-        for i in range(200):
-            self.index[p64(i * 1000)] = (i * 1000L + 1)
-
-    def testInserts(self):
-        index = self.index
-
-        for i in range(0,200):
-            self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000L+1)))
-
-        self.assertEqual(len(index), 200)
-
-        key=p64(2000)
-
-        self.assertEqual(index.get(key), 2001)
-
-        key=p64(2001)
-        self.assertEqual(index.get(key), None)
-        self.assertEqual(index.get(key, ''), '')
-
-        # self.failUnless(len(index._data) > 1)
-
-    def testUpdate(self):
-        index = self.index
-        d={}
-
-        for i in range(200):
-            d[p64(i*1000)]=(i*1000L+1)
-
-        index.update(d)
-
-        for i in range(400,600):
-            d[p64(i*1000)]=(i*1000L+1)
-
-        index.update(d)
-
-        for i in range(100, 500):
-            d[p64(i*1000)]=(i*1000L+2)
-
-        index.update(d)
-
-        self.assertEqual(index.get(p64(2000)), 2001)
-        self.assertEqual(index.get(p64(599000)), 599001)
-        self.assertEqual(index.get(p64(399000)), 399002)
-        self.assertEqual(len(index), 600)
-
-    def testKeys(self):
-        keys = list(iter(self.index))
-        keys.sort()
-
-        for i, k in enumerate(keys):
-            self.assertEqual(k, p64(i * 1000))
-
-        keys = list(self.index.iterkeys())
-        keys.sort()
-
-        for i, k in enumerate(keys):
-            self.assertEqual(k, p64(i * 1000))
-
-        keys = self.index.keys()
-        keys.sort()
-
-        for i, k in enumerate(keys):
-            self.assertEqual(k, p64(i * 1000))
-
-    def testValues(self):
-        values = list(self.index.itervalues())
-        values.sort()
-
-        for i, v in enumerate(values):
-            self.assertEqual(v, (i * 1000L + 1))
-
-        values = self.index.values()
-        values.sort()
-
-        for i, v in enumerate(values):
-            self.assertEqual(v, (i * 1000L + 1))
-
-    def testItems(self):
-        items = list(self.index.iteritems())
-        items.sort()
-
-        for i, item in enumerate(items):
-            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))
-
-        items = self.index.items()
-        items.sort()
-
-        for i, item in enumerate(items):
-            self.assertEqual(item, (p64(i * 1000), (i * 1000L + 1)))
-
-    def testMaxKey(self):
-        index = self.index
-        index.clear()
-
-        # An empty index should complain.
-        self.assertRaises(ValueError, index.maxKey)
-
-        # Now build up a tree with random values, and check maxKey at each
-        # step.
-        correct_max = ""   # smaller than anything we'll add
-        for i in range(1000):
-            key = p64(random.randrange(100000000))
-            index[key] = i
-            correct_max = max(correct_max, key)
-            index_max = index.maxKey()
-            self.assertEqual(index_max, correct_max)
-
-        index.clear()
-        a = '\000\000\000\000\000\001\000\000'
-        b = '\000\000\000\000\000\002\000\000'
-        c = '\000\000\000\000\000\003\000\000'
-        d = '\000\000\000\000\000\004\000\000'
-        index[a] = 1
-        index[c] = 2
-        self.assertEqual(index.maxKey(b), a)
-        self.assertEqual(index.maxKey(d), c)
-        self.assertRaises(ValueError, index.maxKey, z64)
-
-    def testMinKey(self):
-        index = self.index
-        index.clear()
-
-        # An empty index should complain.
-        self.assertRaises(ValueError, index.minKey)
-
-        # Now build up a tree with random values, and check minKey at each
-        # step.
-        correct_min = "\xff" * 8   # bigger than anything we'll add
-        for i in range(1000):
-            key = p64(random.randrange(100000000))
-            index[key] = i
-            correct_min = min(correct_min, key)
-            index_min = index.minKey()
-            self.assertEqual(index_min, correct_min)
-
-        index.clear()
-        a = '\000\000\000\000\000\001\000\000'
-        b = '\000\000\000\000\000\002\000\000'
-        c = '\000\000\000\000\000\003\000\000'
-        d = '\000\000\000\000\000\004\000\000'
-        index[a] = 1
-        index[c] = 2
-        self.assertEqual(index.minKey(b), c)
-        self.assertRaises(ValueError, index.minKey, d)
-
-def test_suite():
-    loader=unittest.TestLoader()
-    return loader.loadTestsFromTestCase(Test)
-
-if __name__=='__main__':
-    unittest.TextTestRunner().run(test_suite())
diff --git a/branches/bug1734/src/ZODB/tests/testfsoids.py b/branches/bug1734/src/ZODB/tests/testfsoids.py
deleted file mode 100644
index 1c2e7265..00000000
--- a/branches/bug1734/src/ZODB/tests/testfsoids.py
+++ /dev/null
@@ -1,177 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-r"""
-fsoids test, of the workhorse fsoids.Trace class
-================================================
-
-Let's get a temp file path to work with first.
-
->>> import tempfile
->>> path = tempfile.mktemp('.fs', 'Data')
->>> print 'path:', path #doctest: +ELLIPSIS
-path: ...Data...fs
-
-More imports.
-
->>> import ZODB
->>> from ZODB.FileStorage import FileStorage
->>> import transaction as txn
->>> from BTrees.OOBTree import OOBTree
->>> from ZODB.FileStorage.fsoids import Tracer  # we're testing this
-
-Create an empty FileStorage.
-
->>> st = FileStorage(path)
-
-There's not a lot interesting in an empty DB!
-
->>> t = Tracer(path)
->>> t.register_oids(0x123456)
->>> t.register_oids(1)
->>> t.register_oids(0)
->>> t.run()
->>> t.report()
-oid 0x00 <unknown> 0 revisions
-    this oid was not defined (no data record for it found)
-oid 0x01 <unknown> 0 revisions
-    this oid was not defined (no data record for it found)
-oid 0x123456 <unknown> 0 revisions
-    this oid was not defined (no data record for it found)
-
-That didn't tell us much, but does show that the specified oids are sorted
-into increasing order.
-
-Create a root object and try again:
-
->>> db = ZODB.DB(st) # yes, that creates a root object!
->>> t = Tracer(path)
->>> t.register_oids(0, 1)
->>> t.run(); t.report() #doctest: +ELLIPSIS
-oid 0x00 persistent.mapping.PersistentMapping 1 revision
-    tid 0x... offset=4 ...
-        tid user=''
-        tid description='initial database creation'
-        new revision persistent.mapping.PersistentMapping at 52
-oid 0x01 <unknown> 0 revisions
-    this oid was not defined (no data record for it found)
-
-So we see oid 0 has been used in our one transaction, and that it was created
-there, and is a PersistentMapping.  4 is the file offset to the start of the
-transaction record, and 52 is the file offset to the start of the data record
-for oid 0 within this transaction.  Because tids are timestamps too, the
-"..." parts vary across runs.  The initial line for a tid actually looks like
-this:
-
-    tid 0x035748597843b877 offset=4 2004-08-20 20:41:28.187000
-
-Let's add a BTree and try again:
-
->>> root = db.open().root()
->>> root['tree'] = OOBTree()
->>> txn.get().note('added an OOBTree')
->>> txn.get().commit()
->>> t = Tracer(path)
->>> t.register_oids(0, 1)
->>> t.run(); t.report() #doctest: +ELLIPSIS
-oid 0x00 persistent.mapping.PersistentMapping 2 revisions
-    tid 0x... offset=4 ...
-        tid user=''
-        tid description='initial database creation'
-        new revision persistent.mapping.PersistentMapping at 52
-    tid 0x... offset=168 ...
-        tid user=''
-        tid description='added an OOBTree'
-        new revision persistent.mapping.PersistentMapping at 207
-        references 0x01 BTrees._OOBTree.OOBTree at 207
-oid 0x01 BTrees._OOBTree.OOBTree 1 revision
-    tid 0x... offset=168 ...
-        tid user=''
-        tid description='added an OOBTree'
-        new revision BTrees._OOBTree.OOBTree at 363
-        referenced by 0x00 persistent.mapping.PersistentMapping at 207
-
-So there are two revisions of oid 0 now, and the second references oid 1.
-
-One more, storing a reference in the BTree back to the root object:
-
->>> tree = root['tree']
->>> tree['root'] = root
->>> txn.get().note('circling back to the root')
->>> txn.get().commit()
->>> t = Tracer(path)
->>> t.register_oids(0, 1, 2)
->>> t.run(); t.report() #doctest: +ELLIPSIS
-oid 0x00 persistent.mapping.PersistentMapping 2 revisions
-    tid 0x... offset=4 ...
-        tid user=''
-        tid description='initial database creation'
-        new revision persistent.mapping.PersistentMapping at 52
-    tid 0x... offset=168 ...
-        tid user=''
-        tid description='added an OOBTree'
-        new revision persistent.mapping.PersistentMapping at 207
-        references 0x01 BTrees._OOBTree.OOBTree at 207
-    tid 0x... offset=443 ...
-        tid user=''
-        tid description='circling back to the root'
-        referenced by 0x01 BTrees._OOBTree.OOBTree at 491
-oid 0x01 BTrees._OOBTree.OOBTree 2 revisions
-    tid 0x... offset=168 ...
-        tid user=''
-        tid description='added an OOBTree'
-        new revision BTrees._OOBTree.OOBTree at 363
-        referenced by 0x00 persistent.mapping.PersistentMapping at 207
-    tid 0x... offset=443 ...
-        tid user=''
-        tid description='circling back to the root'
-        new revision BTrees._OOBTree.OOBTree at 491
-        references 0x00 persistent.mapping.PersistentMapping at 491
-oid 0x02 <unknown> 0 revisions
-    this oid was not defined (no data record for it found)
-
-Note that we didn't create any new object there (oid 2 is still unused), we
-just made oid 1 refer to oid 0.  Therefore there's a new "new revision" line
-in the output for oid 1.  Note that there's also new output for oid 0, even
-though the root object didn't change:  we got new output for oid 0 because
-it's a traced oid and the new transaction made a new reference *to* it.
-
-Since the Trace constructor takes only one argument, the only sane thing
-you can do to make it fail is to give it a path to a file that doesn't
-exist:
-
->>> Tracer('/eiruowieuu/lsijflfjlsijflsdf/eurowiurowioeuri/908479287.fs')
-Traceback (most recent call last):
-  ...
-ValueError: must specify an existing FileStorage
-
-You get the same kind of exception if you pass it a path to an existing
-directory (the path must be to a file, not a directory):
-
->>> import os
->>> Tracer(os.path.dirname(__file__))
-Traceback (most recent call last):
-  ...
-ValueError: must specify an existing FileStorage
-
-
-Clean up.
->>> st.close()
->>> st.cleanup() # remove .fs, .index, etc
-"""
-
-from zope.testing import doctest
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/ZODB/tests/testmvcc.py b/branches/bug1734/src/ZODB/tests/testmvcc.py
deleted file mode 100644
index 4e11d003..00000000
--- a/branches/bug1734/src/ZODB/tests/testmvcc.py
+++ /dev/null
@@ -1,364 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-r"""
-Multi-version concurrency control tests
-=======================================
-
-Multi-version concurrency control (MVCC) exploits storages that store
-multiple revisions of an object to avoid read conflicts.  Normally
-when an object is read from the storage, its most recent revision is
-read.  Under MVCC, an older revision may be read so that the transaction
-sees a consistent view of the database.
-
-ZODB guarantees execution-time consistency: A single transaction will
-always see a consistent view of the database while it is executing.
-If transaction A is running, has already read an object O1, and a
-different transaction B modifies object O2, then transaction A can no
-longer read the current revision of O2.  It must either read the
-version of O2 that is consistent with O1 or raise a ReadConflictError.
-When MVCC is in use, A will do the former.
-
-This note includes doctests that explain how MVCC is implemented (and
-test that the implementation is correct).  The tests use a
-MinimalMemoryStorage that implements MVCC support, but not much else.
-
->>> from ZODB.tests.test_storage import MinimalMemoryStorage
->>> from ZODB import DB
->>> db = DB(MinimalMemoryStorage())
-
-We will use two different connections with the experimental
-setLocalTransaction() method to make sure that the connections act
-independently, even though they'll be run from a single thread.
-
->>> import transaction
->>> tm1 = transaction.TransactionManager()
->>> cn1 = db.open(txn_mgr=tm1)
-
-The test will just use some MinPO objects.  The next few lines just
-setup an initial database state.
-
->>> from ZODB.tests.MinPO import MinPO
->>> r = cn1.root()
->>> r["a"] = MinPO(1)
->>> r["b"] = MinPO(1)
->>> tm1.get().commit()
-
-Now open a second connection.
-
->>> tm2 = transaction.TransactionManager()
->>> cn2 = db.open(txn_mgr=tm2)
-
-Connection high-water mark
---------------------------
-
-The ZODB Connection tracks a transaction high-water mark, which
-bounds the latest transaction id that can be read by the current
-transaction and still present a consistent view of the database.
-Transactions with ids up to but not including the high-water mark
-are OK to read.  When a transaction commits, the database sends
-invalidations to all the other connections; the invalidation contains
-the transaction id and the oids of modified objects.  The Connection
-stores the high-water mark in _txn_time, which is set to None until
-an invalidation arrives.
-
->>> cn = db.open()
-
->>> print cn._txn_time
-None
->>> cn.invalidate(100, dict.fromkeys([1, 2]))
->>> cn._txn_time
-100
->>> cn.invalidate(200, dict.fromkeys([1, 2]))
->>> cn._txn_time
-100
-
-A connection's high-water mark is set to the transaction id taken from
-the first invalidation processed by the connection.  Transaction ids are
-monotonically increasing, so the first one seen during the current
-transaction remains the high-water mark for the duration of the
-transaction.
-
-We'd like simple abort and commit calls to make txn boundaries,
-but that doesn't work unless an object is modified.  sync() will abort
-a transaction and process invalidations.
-
->>> cn.sync()
->>> print cn._txn_time  # the high-water mark got reset to None
-None
-
-Basic functionality
--------------------
-
-The next bit of code includes a simple MVCC test.  One transaction
-will modify "a."  The other transaction will then modify "b" and commit.
-
->>> r1 = cn1.root()
->>> r1["a"].value = 2
->>> tm1.get().commit()
->>> txn = db.lastTransaction()
-
-The second connection has its high-water mark set now.
-
->>> cn2._txn_time == txn
-True
-
-It is safe to read "b," because it was not modified by the concurrent
-transaction.
-
->>> r2 = cn2.root()
->>> r2["b"]._p_serial < cn2._txn_time
-True
->>> r2["b"].value
-1
->>> r2["b"].value = 2
-
-It is not safe, however, to read the current revision of "a" because
-it was modified at the high-water mark.  If we read it, we'll get a
-non-current version.
-
->>> r2["a"].value
-1
->>> r2["a"]._p_serial < cn2._txn_time
-True
-
-We can confirm that we have a non-current revision by asking the
-storage.
-
->>> db._storage.isCurrent(r2["a"]._p_oid, r2["a"]._p_serial)
-False
-
-It's possible to modify "a", but we get a conflict error when we
-commit the transaction.
-
->>> r2["a"].value = 3
->>> tm2.get().commit()
-Traceback (most recent call last):
- ...
-ConflictError: database conflict error (oid 0x01, class ZODB.tests.MinPO.MinPO)
-
->>> tm2.get().abort()
-
-This example will demonstrate that we can commit a transaction if we only
-modify current revisions.
-
->>> print cn2._txn_time
-None
-
->>> r1 = cn1.root()
->>> r1["a"].value = 3
->>> tm1.get().commit()
->>> txn = db.lastTransaction()
->>> cn2._txn_time == txn
-True
-
->>> r2["b"].value = r2["a"].value + 1
->>> r2["b"].value
-3
->>> tm2.get().commit()
->>> print cn2._txn_time
-None
-
-Object cache
-------------
-
-A Connection keeps objects in its cache so that multiple database
-references will always point to the same Python object.  At
-transaction boundaries, objects modified by other transactions are
-ghostified so that the next transaction doesn't see stale state.  We
-need to be sure the non-current objects loaded by MVCC are always
-ghosted.  It should be trivial, because MVCC is only used when an
-invalidation has been received for an object.
-
-First get the database back in an initial state.
-
->>> cn1.sync()
->>> r1["a"].value = 0
->>> r1["b"].value = 0
->>> tm1.get().commit()
-
->>> cn2.sync()
->>> r2["a"].value
-0
->>> r2["b"].value = 1
->>> tm2.get().commit()
-
->>> r1["b"].value
-0
->>> cn1.sync()  # cn2 modified 'b', so cn1 should get a ghost for b
->>> r1["b"]._p_state  # -1 means GHOST
--1
-
-Closing the connection, committing a transaction, and aborting a transaction,
-should all have the same effect on non-current objects in cache.
-
->>> def testit():
-...     cn1.sync()
-...     r1["a"].value = 0
-...     r1["b"].value = 0
-...     tm1.get().commit()
-...     cn2.sync()
-...     r2["b"].value = 1
-...     tm2.get().commit()
-
->>> testit()
->>> r1["b"]._p_state  # 0 means UPTODATE, although note it's an older revision
-0
->>> r1["b"].value
-0
->>> r1["a"].value = 1
->>> tm1.get().commit()
->>> r1["b"]._p_state
--1
-
-When a connection is closed, it is saved by the database.  It will be
-reused by the next open() call (along with its object cache).
-
->>> testit()
->>> r1["a"].value = 1
->>> tm1.get().abort()
->>> cn1.close()
->>> cn3 = db.open()
->>> cn1 is cn3
-True
->>> r1 = cn1.root()
-
-Although "b" is a ghost in cn1 at this point (because closing a connection
-has the same effect on non-current objects in the connection's cache as
-committing a transaction), not every object is a ghost.  The root was in
-the cache and was current, so our first reference to it doesn't return
-a ghost.
-
->>> r1._p_state # UPTODATE
-0
->>> r1["b"]._p_state # GHOST
--1
-
->>> cn1._transaction = None # See the Cleanup section below
-
-Late invalidation
------------------
-
-The combination of ZEO and MVCC adds more complexity.  Since
-invalidations are delivered asynchronously by ZEO, it is possible for
-an invalidation to arrive just after a request to load the invalidated
-object is sent.  The connection can't use the just-loaded data,
-because the invalidation arrived first.  The complexity for MVCC is
-that it must check for invalidated objects after it has loaded them,
-just in case.
-
-Rather than add all the complexity of ZEO to these tests, the
-MinimalMemoryStorage has a hook.  We'll write a subclass that will
-deliver an invalidation when it loads an object.  The hook allows us
-to test the Connection code.
-
->>> class TestStorage(MinimalMemoryStorage):
-...    def __init__(self):
-...        self.hooked = {}
-...        self.count = 0
-...        super(TestStorage, self).__init__()
-...    def registerDB(self, db, limit):
-...        self.db = db
-...    def hook(self, oid, tid, version):
-...        if oid in self.hooked:
-...            self.db.invalidate(tid, {oid:1})
-...            self.count += 1
-
-We can execute this test with a single connection, because we're
-synthesizing the invalidation that is normally generated by the second
-connection.  We need to create two revisions so that there is a
-non-current revision to load.
-
->>> ts = TestStorage()
->>> db = DB(ts)
->>> cn1 = db.open(txn_mgr=tm1)
->>> r1 = cn1.root()
->>> r1["a"] = MinPO(0)
->>> r1["b"] = MinPO(0)
->>> tm1.get().commit()
->>> r1["b"].value = 1
->>> tm1.get().commit()
->>> cn1.cacheMinimize()  # makes everything in cache a ghost
-
->>> oid = r1["b"]._p_oid
->>> ts.hooked[oid] = 1
-
-Once the oid is hooked, an invalidation will be delivered the next
-time it is activated.  The code below activates the object, then
-confirms that the hook worked and that the old state was retrieved.
-
->>> oid in cn1._invalidated
-False
->>> r1["b"]._p_state
--1
->>> r1["b"]._p_activate()
->>> oid in cn1._invalidated
-True
->>> ts.count
-1
->>> r1["b"].value
-0
-
-No earlier revision available
------------------------------
-
-We'll reuse the code from the example above, except that there will
-only be a single revision of "b."  As a result, the attempt to
-activate "b" will result in a ReadConflictError.
-
->>> ts = TestStorage()
->>> db = DB(ts)
->>> cn1 = db.open(txn_mgr=tm1)
->>> r1 = cn1.root()
->>> r1["a"] = MinPO(0)
->>> r1["b"] = MinPO(0)
->>> tm1.get().commit()
->>> cn1.cacheMinimize()  # makes everything in cache a ghost
-
->>> oid = r1["b"]._p_oid
->>> ts.hooked[oid] = 1
-
-Again, once the oid is hooked, an invalidation will be delivered the next
-time it is activated.  The code below activates the object, but unlike the
-section above, this is no older state to retrieve.
-
->>> oid in cn1._invalidated
-False
->>> r1["b"]._p_state
--1
->>> r1["b"]._p_activate()
-Traceback (most recent call last):
- ...
-ReadConflictError: database read conflict error (oid 0x02, class ZODB.tests.MinPO.MinPO)
->>> oid in cn1._invalidated
-True
->>> ts.count
-1
-
-Cleanup
--------
-
-The setLocalTransaction() feature creates cyclic trash involving the
-Connection and Transaction.  The Transaction has an __del__ method,
-which prevents the cycle from being collected.  There's no API for
-clearing the Connection's local transaction.
-
->>> cn1._transaction = None
->>> cn2._transaction = None
-
-"""
-
-import doctest
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/ZODB/tests/util.py b/branches/bug1734/src/ZODB/tests/util.py
deleted file mode 100644
index c4ed8c4a..00000000
--- a/branches/bug1734/src/ZODB/tests/util.py
+++ /dev/null
@@ -1,40 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Conventience function for creating test databases
-
-$Id$
-"""
-
-import time
-import persistent
-import transaction
-from ZODB.MappingStorage import MappingStorage
-from ZODB.DB import DB as _DB
-
-def DB(name='Test'):
-    return _DB(MappingStorage(name))
-
-def commit():
-    transaction.commit()
-
-def pack(db):
-    db.pack(time.time()+1)
-
-class P(persistent.Persistent):
-
-    def __init__(self, name):
-        self.name = name
-
-    def __repr__(self):
-        return 'P(%s)' % self.name
diff --git a/branches/bug1734/src/ZODB/tests/warnhook.py b/branches/bug1734/src/ZODB/tests/warnhook.py
deleted file mode 100644
index a09002cb..00000000
--- a/branches/bug1734/src/ZODB/tests/warnhook.py
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import warnings
-
-class WarningsHook:
-    """Hook to capture warnings generated by Python.
-
-    The function warnings.showwarning() is designed to be hooked by
-    application code, allowing the application to customize the way it
-    handles warnings.
-
-    This hook captures the unformatted warning information and stored
-    it in a list.  A test can inspect this list after the test is over.
-
-    Issues:
-
-    The warnings module has lots of delicate internal state.  If
-    a warning has been reported once, it won't be reported again.  It
-    may be necessary to extend this class with a mechanism for
-    modifying the internal state so that we can be guaranteed a
-    warning will be reported.
-
-    If Python is run with a warnings filter, e.g. python -Werror,
-    then a test that is trying to inspect a particular warning will
-    fail.  Perhaps this class can be extended to install more-specific
-    filters the test to work anyway.
-    """
-
-    def __init__(self):
-        self.original = None
-        self.warnings = []
-
-    def install(self):
-        self.original = warnings.showwarning
-        warnings.showwarning = self.showwarning
-
-    def uninstall(self):
-        assert self.original is not None
-        warnings.showwarning = self.original
-        self.original = None
-
-    def showwarning(self, message, category, filename, lineno):
-        self.warnings.append((str(message), category, filename, lineno))
-
-    def clear(self):
-        self.warnings = []
diff --git a/branches/bug1734/src/ZODB/transact.py b/branches/bug1734/src/ZODB/transact.py
deleted file mode 100644
index 329d717d..00000000
--- a/branches/bug1734/src/ZODB/transact.py
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Tools to simplify transactions within applications."""
-
-from ZODB.POSException import ReadConflictError, ConflictError
-
-def _commit(note):
-    t = transaction.get()
-    if note:
-        t.note(note)
-    t.commit()
-
-def transact(f, note=None, retries=5):
-    """Returns transactional version of function argument f.
-
-    Higher-order function that converts a regular function into
-    a transactional function.  The transactional function will
-    retry up to retries time before giving up.  If note, it will
-    be added to the transaction metadata when it commits.
-
-    The retries occur on ConflictErrors.  If some other
-    TransactionError occurs, the transaction will not be retried.
-    """
-
-    # TODO:  deal with ZEO disconnected errors?
-
-    def g(*args, **kwargs):
-        n = retries
-        while n:
-            n -= 1
-            try:
-                r = f(*args, **kwargs)
-            except ReadConflictError, msg:
-                transaction.abort()
-                if not n:
-                    raise
-                continue
-            try:
-                _commit(note)
-            except ConflictError, msg:
-                transaction.abort()
-                if not n:
-                    raise
-                continue
-            return r
-        raise RuntimeError, "couldn't commit transaction"
-    return g
diff --git a/branches/bug1734/src/ZODB/utils.py b/branches/bug1734/src/ZODB/utils.py
deleted file mode 100644
index 3ee601fa..00000000
--- a/branches/bug1734/src/ZODB/utils.py
+++ /dev/null
@@ -1,300 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-import sys
-import time
-import struct
-from struct import pack, unpack
-from binascii import hexlify
-import cPickle as pickle
-from cStringIO import StringIO
-import weakref
-import warnings
-
-from persistent.TimeStamp import TimeStamp
-
-__all__ = ['z64',
-           't32',
-           'p64',
-           'u64',
-           'U64',
-           'cp',
-           'newTimeStamp',
-           'oid_repr',
-           'serial_repr',
-           'tid_repr',
-           'positive_id',
-           'get_refs',
-           'readable_tid_repr',
-           'WeakSet',
-           'DEPRECATED_ARGUMENT',
-           'deprecated36',
-           'get_pickle_metadata',
-          ]
-
-# A unique marker to give as the default value for a deprecated argument.
-# The method should then do a
-#
-#     if that_arg is not DEPRECATED_ARGUMENT:
-#         complain
-#
-# dance.
-DEPRECATED_ARGUMENT = object()
-
-# Raise DeprecationWarning, noting that the deprecated thing will go
-# away in ZODB 3.6.  Point to the caller of our caller (i.e., at the
-# code using the deprecated thing).
-def deprecated36(msg):
-    warnings.warn("This will be removed in ZODB 3.6:\n%s" % msg,
-                  DeprecationWarning, stacklevel=3)
-
-z64 = '\0'*8
-
-# TODO The purpose of t32 is unclear.  Code that uses it is usually
-# of the form:
-#
-#    if e < 0:
-#        e = t32 - e
-#
-# Doesn't make sense (since e is negative, it creates a number larger than
-# t32).  If users said "e += t32", *maybe* it would make sense.
-t32 = 1L << 32
-
-assert sys.hexversion >= 0x02030000
-
-# The distinction between ints and longs is blurred in Python 2.2,
-# so u64() are U64() really the same.
-
-def p64(v):
-    """Pack an integer or long into a 8-byte string"""
-    return pack(">Q", v)
-
-def u64(v):
-    """Unpack an 8-byte string into a 64-bit long integer."""
-    return unpack(">Q", v)[0]
-
-U64 = u64
-
-def cp(f1, f2, l):
-    read = f1.read
-    write = f2.write
-    n = 8192
-
-    while l > 0:
-        if n > l:
-            n = l
-        d = read(n)
-        if not d:
-            break
-        write(d)
-        l = l - len(d)
-
-
-def newTimeStamp(old=None,
-                 TimeStamp=TimeStamp,
-                 time=time.time, gmtime=time.gmtime):
-    t = time()
-    ts = TimeStamp(gmtime(t)[:5]+(t%60,))
-    if old is not None:
-        return ts.laterThan(old)
-    return ts
-
-
-def oid_repr(oid):
-    if isinstance(oid, str) and len(oid) == 8:
-        # Convert to hex and strip leading zeroes.
-        as_hex = hexlify(oid).lstrip('0')
-        # Ensure two characters per input byte.
-        if len(as_hex) & 1:
-            as_hex = '0' + as_hex
-        elif as_hex == '':
-            as_hex = '00'
-        return '0x' + as_hex
-    else:
-        return repr(oid)
-
-serial_repr = oid_repr
-tid_repr = serial_repr
-
-# For example, produce
-#     '0x03441422948b4399 2002-04-14 20:50:34.815000'
-# for 8-byte string tid '\x03D\x14"\x94\x8bC\x99'.
-def readable_tid_repr(tid):
-    result = tid_repr(tid)
-    if isinstance(tid, str) and len(tid) == 8:
-        result = "%s %s" % (result, TimeStamp(tid))
-    return result
-
-# Addresses can "look negative" on some boxes, some of the time.  If you
-# feed a "negative address" to an %x format, Python 2.3 displays it as
-# unsigned, but produces a FutureWarning, because Python 2.4 will display
-# it as signed.  So when you want to prodce an address, use positive_id() to
-# obtain it.
-# _ADDRESS_MASK is 2**(number_of_bits_in_a_native_pointer).  Adding this to
-# a negative address gives a positive int with the same hex representation as
-# the significant bits in the original.
-
-_ADDRESS_MASK = 256 ** struct.calcsize('P')
-def positive_id(obj):
-    """Return id(obj) as a non-negative integer."""
-
-    result = id(obj)
-    if result < 0:
-        result += _ADDRESS_MASK
-        assert result > 0
-    return result
-
-# So full of undocumented magic it's hard to fathom.
-# The existence of cPickle.noload() isn't documented, and what it
-# does isn't documented either.  In general it unpickles, but doesn't
-# actually build any objects of user-defined classes.  Despite that
-# persistent_load is documented to be a callable, there's an
-# undocumented gimmick where if it's actually a list, for a PERSID or
-# BINPERSID opcode cPickle just appends "the persistent id" to that list.
-# Also despite that "a persistent id" is documented to be a string,
-# ZODB persistent ids are actually (often? always?) tuples, most often
-# of the form
-#     (oid, (module_name, class_name))
-# So the effect of the following is to dig into the object pickle, and
-# return a list of the persistent ids found (which are usually nested
-# tuples), without actually loading any modules or classes.
-# Note that pickle.py doesn't support any of this, it's undocumented code
-# only in cPickle.c.
-def get_refs(a_pickle):
-    # The pickle is in two parts.  First there's the class of the object,
-    # needed to build a ghost,  See get_pickle_metadata for how complicated
-    # this can get.  The second part is the state of the object.  We want
-    # to find all the persistent references within both parts (although I
-    # expect they can only appear in the second part).
-    f = StringIO(a_pickle)
-    u = pickle.Unpickler(f)
-    u.persistent_load = refs = []
-    u.noload() # class info
-    u.noload() # instance state info
-    return refs
-
-# Given a ZODB pickle, return pair of strings (module_name, class_name).
-# Do this without importing the module or class object.
-# See ZODB/serialize.py's module docstring for the only docs that exist about
-# ZODB pickle format.  If the code here gets smarter, please update those
-# docs to be at least as smart.  The code here doesn't appear to make sense
-# for what serialize.py calls formats 5 and 6.
-
-def get_pickle_metadata(data):
-    # ZODB's data records contain two pickles.  The first is the class
-    # of the object, the second is the object.  We're only trying to
-    # pick apart the first here, to extract the module and class names.
-    if data.startswith('(c'):   # pickle MARK GLOBAL opcode sequence
-        global_prefix = 2
-    elif data.startswith('c'):  # pickle GLOBAL opcode
-        global_prefix = 1
-    else:
-        global_prefix = 0
-
-    if global_prefix:
-        # Formats 1 and 2.
-        # Don't actually unpickle a class, because it will attempt to
-        # load the class.  Just break open the pickle and get the
-        # module and class from it.  The module and class names are given by
-        # newline-terminated strings following the GLOBAL opcode.
-        modname, classname, rest = data.split('\n', 2)
-        modname = modname[global_prefix:]   # strip GLOBAL opcode
-        return modname, classname
-
-    # Else there are a bunch of other possible formats.
-    f = StringIO(data)
-    u = pickle.Unpickler(f)
-    try:
-        class_info = u.load()
-    except Exception, err:
-        print "Error", err
-        return '', ''
-    if isinstance(class_info, tuple):
-        if isinstance(class_info[0], tuple):
-            # Formats 3 and 4.
-            modname, classname = class_info[0]
-        else:
-            # Formats 5 and 6 (probably) end up here.
-            modname, classname = class_info
-    else:
-        # This isn't a known format.
-        modname = repr(class_info)
-        classname = ''
-    return modname, classname
-
-# A simple implementation of weak sets, supplying just enough of Python's
-# sets.Set interface for our needs.
-
-class WeakSet(object):
-    """A set of objects that doesn't keep its elements alive.
-
-    The objects in the set must be weakly referencable.
-    The objects need not be hashable, and need not support comparison.
-    Two objects are considered to be the same iff their id()s are equal.
-
-    When the only references to an object are weak references (including
-    those from WeakSets), the object can be garbage-collected, and
-    will vanish from any WeakSets it may be a member of at that time.
-    """
-
-    def __init__(self):
-        # Map id(obj) to obj.  By using ids as keys, we avoid requiring
-        # that the elements be hashable or comparable.
-        self.data = weakref.WeakValueDictionary()
-
-    def __len__(self):
-        return len(self.data)
-
-    def __contains__(self, obj):
-        return id(obj) in self.data
-
-    # Same as a Set, add obj to the collection.
-    def add(self, obj):
-        self.data[id(obj)] = obj
-
-    # Same as a Set, remove obj from the collection, and raise
-    # KeyError if obj not in the collection.
-    def remove(self, obj):
-        del self.data[id(obj)]
-
-    # f is a one-argument function.  Execute f(elt) for each elt in the
-    # set.  f's return value is ignored.
-    def map(self, f):
-        for wr in self.as_weakref_list():
-            elt = wr()
-            if elt is not None:
-                f(elt)
-
-    # Return a list of weakrefs to all the objects in the collection.
-    # Because a weak dict is used internally, iteration is dicey (the
-    # underlying dict may change size during iteration, due to gc or
-    # activity from other threads).  as_weakef_list() is safe.
-    #
-    # Something like this should really be a method of Python's weak dicts.
-    # If we invoke self.data.values() instead, we get back a list of live
-    # objects instead of weakrefs.  If gc occurs while this list is alive,
-    # all the objects move to an older generation (because they're strongly
-    # referenced by the list!).  They can't get collected then, until a
-    # less frequent collection of the older generation.  Before then, if we
-    # invoke self.data.values() again, they're still alive, and if gc occurs
-    # while that list is alive they're all moved to yet an older generation.
-    # And so on.  Stress tests showed that it was easy to get into a state
-    # where a WeakSet grows without bounds, despite that almost all its
-    # elements are actually trash.  By returning a list of weakrefs instead,
-    # we avoid that, although the decision to use weakrefs is now# very
-    # visible to our clients.
-    def as_weakref_list(self):
-        # We're cheating by breaking into the internals of Python's
-        # WeakValueDictionary here (accessing its .data attribute).
-        return self.data.data.values()
diff --git a/branches/bug1734/src/ZODB/winlock.c b/branches/bug1734/src/ZODB/winlock.c
deleted file mode 100755
index 811f5503..00000000
--- a/branches/bug1734/src/ZODB/winlock.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-static char winlock_doc_string[] =
-"Lock files on Windows."
-"\n"
-"$Id$\n";
-
-#include "Python.h"
-
-static PyObject *Error;
-
-#ifdef MS_WIN32
-
-#include <windows.h>
-#include <io.h>
-
-/* LOCK_FUNC is the shared type of Win32 LockFile and UnlockFile. */
-typedef WINBASEAPI BOOL WINAPI LOCK_FUNC(HANDLE, DWORD, DWORD, DWORD, DWORD);
-
-static PyObject *
-common(LOCK_FUNC func, PyObject *args)
-{
-	int fileno;
-	long h, ofslo, ofshi, lenlo, lenhi;
-
-	if (! PyArg_ParseTuple(args, "illll", &fileno,
-			       &ofslo, &ofshi,
-			       &lenlo, &lenhi))
-		return NULL;
-
-	h = _get_osfhandle(fileno);
-	if (h == -1) {
-		PyErr_SetString(Error, "_get_osfhandle failed");
-		return NULL;
-	}
-	if (func((HANDLE)h, ofslo, ofshi, lenlo, lenhi)) {
-		Py_INCREF(Py_None);
-		return Py_None;
-	}
-	PyErr_SetObject(Error, PyInt_FromLong(GetLastError()));
-	return NULL;
-}
-
-static PyObject *
-winlock(PyObject *ignored, PyObject *args)
-{
-	return common(LockFile, args);
-}
-
-static PyObject *
-winunlock(PyObject *ignored, PyObject *args)
-{
-	return common(UnlockFile, args);
-}
-
-static struct PyMethodDef methods[] = {
-    {"LockFile",	(PyCFunction)winlock,	METH_VARARGS,
-     "LockFile(fileno, offsetLow, offsetHigh, lengthLow, lengthHigh) -- "
-     "Lock the file associated with fileno"},
-
-    {"UnlockFile",	(PyCFunction)winunlock,	METH_VARARGS,
-     "UnlockFile(fileno, offsetLow, offsetHigh, lengthLow, lengthHigh) -- "
-     "Unlock the file associated with fileno"},
-
-    {NULL,		NULL}		/* sentinel */
-};
-#else
-
-static struct PyMethodDef methods[] = {
-  {NULL,		NULL}		/* sentinel */
-};
-
-#endif
-
-/* Initialization function for the module (*must* be called initcStringIO) */
-
-#ifndef DL_EXPORT	/* declarations for DLL import/export */
-#define DL_EXPORT(RTYPE) RTYPE
-#endif
-DL_EXPORT(void)
-initwinlock(void)
-{
-	PyObject *m, *d;
-
-	if (!(Error=PyString_FromString("winlock.error")))
-		return;
-
-	/* Create the module and add the functions */
-	m = Py_InitModule4("winlock", methods, winlock_doc_string,
-			   (PyObject*)NULL, PYTHON_API_VERSION);
-
-	d = PyModule_GetDict(m);
-	PyDict_SetItemString(d, "error", Error);
-}
diff --git a/branches/bug1734/src/ZopeUndo/Prefix.py b/branches/bug1734/src/ZopeUndo/Prefix.py
deleted file mode 100644
index e3a28226..00000000
--- a/branches/bug1734/src/ZopeUndo/Prefix.py
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""ZODB undo support for Zope2.
-
-This package is used to support the Prefix object that Zope uses for
-undo.  It is a separate package only to aid configuration management.
-This package is included in Zope and ZODB3, so that ZODB3 is suitable
-for running a ZEO server that handles Zope undo.
-"""
-
-class Prefix:
-    """A Prefix() is equal to any string it as a prefix of.
-
-    This class can be compared to a string (or arbitrary sequence).
-    The comparison will return True if the prefix value is a prefix of
-    the string being compared.
-
-    Two prefixes can not be compared.
-    """
-
-    __no_side_effects__ = 1
-
-    def __init__(self, path):
-        self.value = len(path), path
-
-    def __cmp__(self, o):
-        l, v = self.value
-        return cmp(o[:l], v)
diff --git a/branches/bug1734/src/ZopeUndo/__init__.py b/branches/bug1734/src/ZopeUndo/__init__.py
deleted file mode 100644
index 43cf0e3b..00000000
--- a/branches/bug1734/src/ZopeUndo/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
diff --git a/branches/bug1734/src/ZopeUndo/tests/__init__.py b/branches/bug1734/src/ZopeUndo/tests/__init__.py
deleted file mode 100644
index 43cf0e3b..00000000
--- a/branches/bug1734/src/ZopeUndo/tests/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
diff --git a/branches/bug1734/src/ZopeUndo/tests/testPrefix.py b/branches/bug1734/src/ZopeUndo/tests/testPrefix.py
deleted file mode 100644
index 63064ced..00000000
--- a/branches/bug1734/src/ZopeUndo/tests/testPrefix.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-from ZopeUndo.Prefix import Prefix
-
-import unittest
-
-class PrefixTest(unittest.TestCase):
-
-    def test(self):
-        p1 = (Prefix("/a/b"),
-              ("/a/b", "/a/b/c", "/a/b/c/d"),
-              ("", "/a/c"))
-
-        p2 = (Prefix(""),
-              ("", "/def", "/a/b", "/a/b/c", "/a/b/c/d"),
-              ())
-
-        for prefix, equal, notequal in p1, p2:
-            for s in equal:
-                self.assertEqual(prefix, s)
-            for s in notequal:
-                self.assertNotEqual(prefix, s)
-
-def test_suite():
-    return unittest.makeSuite(PrefixTest)
diff --git a/branches/bug1734/src/persistent/DEPENDENCIES.cfg b/branches/bug1734/src/persistent/DEPENDENCIES.cfg
deleted file mode 100644
index 2ba45e95..00000000
--- a/branches/bug1734/src/persistent/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-# the following are needed by the tests
-transaction
-ZODB
diff --git a/branches/bug1734/src/persistent/README.txt b/branches/bug1734/src/persistent/README.txt
deleted file mode 100644
index 3933be82..00000000
--- a/branches/bug1734/src/persistent/README.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-===================
-Persistence support
-===================
-
-(This document is under construction. More basic documentation will
- eventually appear here.)
-
-
-Overriding __getattr__, __getattribute__, __setattr__, and __delattr__
------------------------------------------------------------------------  
-
-Subclasses can override the attribute-management methods.  For the
-__getattr__ method, the behavior is like that for regular Python
-classes and for earlier versions of ZODB 3.
-
-For __getattribute__, __setattr__, and __delattr__, it is necessary to
-call certain methods defined by persistent.Persistent.  Detailed
-examples and documentation is provided in the test module,
-persistent.tests.test_overriding_attrs.
diff --git a/branches/bug1734/src/persistent/SETUP.cfg b/branches/bug1734/src/persistent/SETUP.cfg
deleted file mode 100644
index 06170013..00000000
--- a/branches/bug1734/src/persistent/SETUP.cfg
+++ /dev/null
@@ -1,33 +0,0 @@
-# Extension information for zpkg.
-
-# Mark an "exported" header for use from other packages.
-# This is not needed for headers only used within the package.
-#
-header  cPersistence.h
-
-
-# This is included by cPersistence.h, so all users of cPersistence.h
-# have to be able to include this indirectly.
-#
-header  ring.h
-
-
-<extension cPersistence>
-  source     cPersistence.c
-  source     ring.c
-
-  depends-on cPersistence.h
-  depends-on ring.h
-</extension>
-
-<extension cPickleCache>
-  source     cPickleCache.c
-  source     ring.c
-
-  depends-on cPersistence.h
-  depends-on ring.h
-</extension>
-
-<extension TimeStamp>
-  source     TimeStamp.c
-</extension>
diff --git a/branches/bug1734/src/persistent/TimeStamp.c b/branches/bug1734/src/persistent/TimeStamp.c
deleted file mode 100644
index a589f9f9..00000000
--- a/branches/bug1734/src/persistent/TimeStamp.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2004 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#include "Python.h"
-#include <time.h>
-
-PyObject *TimeStamp_FromDate(int, int, int, int, int, double);
-PyObject *TimeStamp_FromString(const char *);
-
-static char TimeStampModule_doc[] =
-"A 64-bit TimeStamp used as a ZODB serial number.\n"
-"\n"
-"$Id$\n";
-
-
-typedef struct {
-    PyObject_HEAD
-    unsigned char data[8];
-} TimeStamp;
-
-/* The first dimension of the arrays below is non-leapyear / leapyear */
-
-static char month_len[2][12]={
-  {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
-  {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
-};
-
-static short joff[2][12] = {
-  {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334},
-  {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335}
-};
-
-static double gmoff=0;
-
-/* TODO:  May be better (faster) to store in a file static. */
-#define SCONV ((double)60) / ((double)(1<<16)) / ((double)(1<<16))
-
-static int
-leap(int year)
-{
-    return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
-}
-
-static int
-days_in_month(int year, int month)
-{
-    return month_len[leap(year)][month];
-}
-
-static double
-TimeStamp_yad(int y)
-{
-    double d, s;
-
-    y -= 1900;
-
-    d = (y - 1) * 365;
-    if (y > 0) {
-        s = 1.0;
-	y -= 1;
-    } else {
-	s = -1.0;
-	y = -y;
-    }
-    return d + s * (y / 4 - y / 100 + (y + 300) / 400);
-}
-
-static double
-TimeStamp_abst(int y, int mo, int d, int m, int s)
-{
-    return (TimeStamp_yad(y) + joff[leap(y)][mo] + d) * 86400 + m * 60 + s;
-}
-
-static int
-TimeStamp_init_gmoff(void)
-{
-    struct tm *t;
-    time_t z=0;
-
-    t = gmtime(&z);
-    if (t == NULL) {
-	PyErr_SetString(PyExc_SystemError, "gmtime failed");
-	return -1;
-    }
-
-    gmoff = TimeStamp_abst(t->tm_year+1900, t->tm_mon, t->tm_mday - 1,
-			   t->tm_hour * 60 + t->tm_min, t->tm_sec);
-
-    return 0;
-}
-
-static void
-TimeStamp_dealloc(TimeStamp *ts)
-{
-    PyObject_Del(ts);
-}
-
-static int
-TimeStamp_compare(TimeStamp *v, TimeStamp *w)
-{
-    int cmp = memcmp(v->data, w->data, 8);
-    if (cmp < 0) return -1;
-    if (cmp > 0) return 1;
-    return 0;
-}
-
-static long
-TimeStamp_hash(TimeStamp *self)
-{
-    register unsigned char *p = (unsigned char *)self->data;
-    register int len = 8;
-    register long x = *p << 7;
-    while (--len >= 0)
-	x = (1000003*x) ^ *p++;
-    x ^= 8;
-    if (x == -1)
-	x = -2;
-    return x;
-}
-
-typedef struct {
-    /* TODO:  reverse-engineer what's in these things and comment them */
-    int y;
-    int m;
-    int d;
-    int mi;
-} TimeStampParts;
-
-static void
-TimeStamp_unpack(TimeStamp *self, TimeStampParts *p)
-{
-    unsigned long v;
-
-    v = (self->data[0] * 16777216 + self->data[1] * 65536
-	 + self->data[2] * 256 + self->data[3]);
-    p->y = v / 535680 + 1900;
-    p->m = (v % 535680) / 44640 + 1;
-    p->d = (v % 44640) / 1440 + 1;
-    p->mi = v % 1440;
-}
-
-static double
-TimeStamp_sec(TimeStamp *self)
-{
-    unsigned int v;
-
-    v = (self->data[4] * 16777216 + self->data[5] * 65536
-	 + self->data[6] * 256 + self->data[7]);
-    return SCONV * v;
-}
-
-static PyObject *
-TimeStamp_year(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyInt_FromLong(p.y);
-}
-
-static PyObject *
-TimeStamp_month(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyInt_FromLong(p.m);
-}
-
-static PyObject *
-TimeStamp_day(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyInt_FromLong(p.d);
-}
-
-static PyObject *
-TimeStamp_hour(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyInt_FromLong(p.mi / 60);
-}
-
-static PyObject *
-TimeStamp_minute(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyInt_FromLong(p.mi % 60);
-}
-
-static PyObject *
-TimeStamp_second(TimeStamp *self)
-{
-    return PyFloat_FromDouble(TimeStamp_sec(self));
-}
-
-static PyObject *
-TimeStamp_timeTime(TimeStamp *self)
-{
-    TimeStampParts p;
-    TimeStamp_unpack(self, &p);
-    return PyFloat_FromDouble(TimeStamp_abst(p.y, p.m - 1, p.d - 1, p.mi, 0)
-			      + TimeStamp_sec(self) - gmoff);
-}
-
-static PyObject *
-TimeStamp_raw(TimeStamp *self)
-{
-    return PyString_FromStringAndSize(self->data, 8);
-}
-
-static PyObject *
-TimeStamp_str(TimeStamp *self)
-{
-    char buf[128];
-    TimeStampParts p;
-    int len;
-
-    TimeStamp_unpack(self, &p);
-    len =sprintf(buf, "%4.4d-%2.2d-%2.2d %2.2d:%2.2d:%09.6f",
-	         p.y, p.m, p.d, p.mi / 60, p.mi % 60,
-	         TimeStamp_sec(self));
-
-    return PyString_FromStringAndSize(buf, len);
-}
-
-
-static PyObject *
-TimeStamp_laterThan(TimeStamp *self, PyObject *obj)
-{
-    TimeStamp *o = NULL;
-    TimeStampParts p;
-    unsigned char new[8];
-    int i;
-
-    if (obj->ob_type != self->ob_type) {
-	PyErr_SetString(PyExc_TypeError, "expected TimeStamp object");
-	return NULL;
-    }
-    o = (TimeStamp *)obj;
-    if (memcmp(self->data, o->data, 8) > 0) {
-	Py_INCREF(self);
-	return (PyObject *)self;
-    }
-
-    memcpy(new, o->data, 8);
-    for (i = 7; i > 3; i--) {
-	if (new[i] == 255)
-	    new[i] = 0;
-	else {
-	    new[i]++;
-	    return TimeStamp_FromString(new);
-	}
-    }
-
-    /* All but the first two bytes are the same.  Need to increment
-       the year, month, and day explicitly. */
-    TimeStamp_unpack(o, &p);
-    if (p.mi >= 1439) {
-	p.mi = 0;
-	if (p.d == month_len[leap(p.y)][p.m - 1]) {
-	    p.d = 1;
-	    if (p.m == 12) {
-		p.m = 1;
-		p.y++;
-	    } else
-		p.m++;
-	} else
-	    p.d++;
-    } else
-	p.mi++;
-
-    return TimeStamp_FromDate(p.y, p.m, p.d, p.mi / 60, p.mi % 60, 0);
-}
-
-static struct PyMethodDef TimeStamp_methods[] = {
-    {"year", 	(PyCFunction)TimeStamp_year, 	METH_NOARGS},
-    {"minute", 	(PyCFunction)TimeStamp_minute, 	METH_NOARGS},
-    {"month", 	(PyCFunction)TimeStamp_month, 	METH_NOARGS},
-    {"day", 	(PyCFunction)TimeStamp_day,	METH_NOARGS},
-    {"hour", 	(PyCFunction)TimeStamp_hour, 	METH_NOARGS},
-    {"second", 	(PyCFunction)TimeStamp_second, 	METH_NOARGS},
-    {"timeTime",(PyCFunction)TimeStamp_timeTime, 	METH_NOARGS},
-    {"laterThan", (PyCFunction)TimeStamp_laterThan, 	METH_O},
-    {"raw",	(PyCFunction)TimeStamp_raw,	METH_NOARGS},
-    {NULL,	NULL},
-};
-
-static PyTypeObject TimeStamp_type = {
-    PyObject_HEAD_INIT(NULL)
-    0,
-    "persistent.TimeStamp",
-    sizeof(TimeStamp),
-    0,
-    (destructor)TimeStamp_dealloc,	/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    (cmpfunc)TimeStamp_compare,		/* tp_compare */
-    (reprfunc)TimeStamp_raw,		/* tp_repr */
-    0,					/* tp_as_number */
-    0,					/* tp_as_sequence */
-    0,					/* tp_as_mapping */
-    (hashfunc)TimeStamp_hash,		/* tp_hash */
-    0,					/* tp_call */
-    (reprfunc)TimeStamp_str,		/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
-    0,					/* tp_doc */
-    0,					/* tp_traverse */
-    0,					/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    0,					/* tp_iter */
-    0,					/* tp_iternext */
-    TimeStamp_methods,			/* tp_methods */
-    0,					/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-};
-
-PyObject *
-TimeStamp_FromString(const char *buf)
-{
-    /* buf must be exactly 8 characters */
-    TimeStamp *ts = (TimeStamp *)PyObject_New(TimeStamp, &TimeStamp_type);
-    memcpy(ts->data, buf, 8);
-    return (PyObject *)ts;
-}
-
-#define CHECK_RANGE(VAR, LO, HI) if ((VAR) < (LO) || (VAR) > (HI)) { \
-     return PyErr_Format(PyExc_ValueError, \
-			 # VAR " must be between %d and %d: %d", \
-			 (LO), (HI), (VAR)); \
-    }
-
-PyObject *
-TimeStamp_FromDate(int year, int month, int day, int hour, int min,
-		   double sec)
-{
-    TimeStamp *ts = NULL;
-    int d;
-    unsigned int v;
-
-    if (year < 1900)
-	return PyErr_Format(PyExc_ValueError,
-			    "year must be greater than 1900: %d", year);
-    CHECK_RANGE(month, 1, 12);
-    d = days_in_month(year, month - 1);
-    if (day < 1 || day > d)
-	return PyErr_Format(PyExc_ValueError,
-			    "day must be between 1 and %d: %d", d, day);
-    CHECK_RANGE(hour, 0, 23);
-    CHECK_RANGE(min, 0, 59);
-    /* Seconds are allowed to be anything, so chill
-       If we did want to be pickly, 60 would be a better choice.
-    if (sec < 0 || sec > 59)
-	return PyErr_Format(PyExc_ValueError,
-			    "second must be between 0 and 59: %f", sec);
-    */
-    ts = (TimeStamp *)PyObject_New(TimeStamp, &TimeStamp_type);
-    v = (((year - 1900) * 12 + month - 1) * 31 + day - 1);
-    v = (v * 24 + hour) * 60 + min;
-    ts->data[0] = v / 16777216;
-    ts->data[1] = (v % 16777216) / 65536;
-    ts->data[2] = (v % 65536) / 256;
-    ts->data[3] = v % 256;
-    sec /= SCONV;
-    v = (unsigned int)sec;
-    ts->data[4] = v / 16777216;
-    ts->data[5] = (v % 16777216) / 65536;
-    ts->data[6] = (v % 65536) / 256;
-    ts->data[7] = v % 256;
-
-    return (PyObject *)ts;
-}
-
-PyObject *
-TimeStamp_TimeStamp(PyObject *obj, PyObject *args)
-{
-    char *buf = NULL;
-    int len = 0, y, mo, d, h = 0, m = 0;
-    double sec = 0;
-
-    if (PyArg_ParseTuple(args, "s#:TimeStamp", &buf, &len)) {
-	if (len != 8) {
-	    PyErr_SetString(PyExc_ValueError, "8-character string expected");
-	    return NULL;
-	}
-	return TimeStamp_FromString(buf);
-    }
-    PyErr_Clear();
-
-    if (!PyArg_ParseTuple(args, "iii|iid", &y, &mo, &d, &h, &m, &sec))
-	return NULL;
-    return TimeStamp_FromDate(y, mo, d, h, m, sec);
-}
-
-static PyMethodDef TimeStampModule_functions[] = {
-    {"TimeStamp",	TimeStamp_TimeStamp,	METH_VARARGS},
-    {NULL,		NULL},
-};
-
-
-void
-initTimeStamp(void)
-{
-    PyObject *m;
-
-    if (TimeStamp_init_gmoff() < 0)
-	return;
-
-    m = Py_InitModule4("TimeStamp", TimeStampModule_functions,
-		       TimeStampModule_doc, NULL, PYTHON_API_VERSION);
-    if (m == NULL)
-	return;
-
-    TimeStamp_type.ob_type = &PyType_Type;
-    TimeStamp_type.tp_getattro = PyObject_GenericGetAttr;
-}
diff --git a/branches/bug1734/src/persistent/__init__.py b/branches/bug1734/src/persistent/__init__.py
deleted file mode 100644
index 88d7dd61..00000000
--- a/branches/bug1734/src/persistent/__init__.py
+++ /dev/null
@@ -1,34 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Provide access to Persistent and PersistentMapping.
-
-$Id$
-"""
-
-from cPersistence import Persistent, GHOST, UPTODATE, CHANGED, STICKY
-from cPickleCache import PickleCache
-
-from cPersistence import simple_new
-import copy_reg
-copy_reg.constructor(simple_new)
-
-# Make an interface declaration for Persistent,
-# if zope.interface is available.
-try:
-    from zope.interface import classImplements
-except ImportError:
-    pass
-else:
-    from persistent.interfaces import IPersistent
-    classImplements(Persistent, IPersistent)
diff --git a/branches/bug1734/src/persistent/cPersistence.c b/branches/bug1734/src/persistent/cPersistence.c
deleted file mode 100644
index 0aa442e1..00000000
--- a/branches/bug1734/src/persistent/cPersistence.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-static char cPersistence_doc_string[] =
-"Defines Persistent mixin class for persistent objects.\n"
-"\n"
-"$Id$\n";
-
-#include "cPersistence.h"
-#include "structmember.h"
-
-struct ccobject_head_struct {
-    CACHE_HEAD
-};
-
-/* These two objects are initialized when the module is loaded */
-static PyObject *TimeStamp, *py_simple_new;
-
-/* Strings initialized by init_strings() below. */
-static PyObject *py_keys, *py_setstate, *py___dict__, *py_timeTime;
-static PyObject *py__p_changed, *py__p_deactivate;
-static PyObject *py___getattr__, *py___setattr__, *py___delattr__;
-static PyObject *py___slotnames__, *copy_reg_slotnames, *__newobj__;
-static PyObject *py___getnewargs__, *py___getstate__;
-
-
-static int
-init_strings(void)
-{
-#define INIT_STRING(S) \
-    if (!(py_ ## S = PyString_InternFromString(#S))) \
-	return -1;
-    INIT_STRING(keys);
-    INIT_STRING(setstate);
-    INIT_STRING(timeTime);
-    INIT_STRING(__dict__);
-    INIT_STRING(_p_changed);
-    INIT_STRING(_p_deactivate);
-    INIT_STRING(__getattr__);
-    INIT_STRING(__setattr__);
-    INIT_STRING(__delattr__);
-    INIT_STRING(__slotnames__);
-    INIT_STRING(__getnewargs__);
-    INIT_STRING(__getstate__);
-#undef INIT_STRING
-    return 0;
-}
-
-#ifdef Py_DEBUG
-static void
-fatal_1350(cPersistentObject *self, const char *caller, const char *detail)
-{
-	char buf[1000];
-
-	PyOS_snprintf(buf, sizeof(buf),
-	    "cPersistence.c %s(): object at %p with type %.200s\n"
-	    "%s.\n"
-	    "The only known cause is multiple threads trying to ghost and\n"
-	    "unghost the object simultaneously.\n"
-	    "That's not legal, but ZODB can't stop it.\n"
-	    "See Collector #1350.\n",
-	    caller, self, self->ob_type->tp_name, detail);
-	Py_FatalError(buf);
-}
-#endif
-
-static void ghostify(cPersistentObject*);
-
-/* Load the state of the object, unghostifying it.  Upon success, return 1.
- * If an error occurred, re-ghostify the object and return -1.
- */
-static int
-unghostify(cPersistentObject *self)
-{
-    if (self->state < 0 && self->jar) {
-        PyObject *r;
-
-        /* Is it ever possibly to not have a cache? */
-        if (self->cache) {
-            /* Create a node in the ring for this unghostified object. */
-            self->cache->non_ghost_count++;
-	    ring_add(&self->cache->ring_home, &self->ring);
-	    Py_INCREF(self);
-        }
-	/* set state to CHANGED while setstate() call is in progress
-	   to prevent a recursive call to _PyPersist_Load().
-	*/
-        self->state = cPersistent_CHANGED_STATE;
-        /* Call the object's __setstate__() */
-	r = PyObject_CallMethod(self->jar, "setstate", "O", (PyObject *)self);
-        if (r == NULL) {
-            ghostify(self);
-            return -1;
-        }
-        self->state = cPersistent_UPTODATE_STATE;
-        Py_DECREF(r);
-        if (self->cache && self->ring.r_next == NULL) {
-#ifdef Py_DEBUG
-        	fatal_1350(self, "unghostify",
-		    		 "is not in the cache despite that we just "
-		      		 "unghostified it");
-#else
-		PyErr_Format(PyExc_SystemError, "object at %p with type "
-			     "%.200s not in the cache despite that we just "
-			     "unghostified it", self, self->ob_type->tp_name);
-		return -1;
-#endif
-	}
-    }
-    return 1;
-}
-
-/****************************************************************************/
-
-static PyTypeObject Pertype;
-
-static void
-accessed(cPersistentObject *self)
-{
-    /* Do nothing unless the object is in a cache and not a ghost. */
-    if (self->cache && self->state >= 0 && self->ring.r_next)
-	ring_move_to_head(&self->cache->ring_home, &self->ring);
-}
-
-static void
-unlink_from_ring(cPersistentObject *self)
-{
-    /* If the cache has been cleared, then a non-ghost object
-       isn't in the ring any longer.
-    */
-    if (self->ring.r_next == NULL)
-	return;
-
-    /* if we're ghostifying an object, we better have some non-ghosts */
-    assert(self->cache->non_ghost_count > 0);
-    self->cache->non_ghost_count--;
-    ring_del(&self->ring);
-}
-
-static void
-ghostify(cPersistentObject *self)
-{
-    PyObject **dictptr;
-
-    /* are we already a ghost? */
-    if (self->state == cPersistent_GHOST_STATE)
-        return;
-
-    /* Is it ever possible to not have a cache? */
-    if (self->cache == NULL) {
-        self->state = cPersistent_GHOST_STATE;
-        return;
-    }
-
-    if (self->ring.r_next == NULL) {
-	/* There's no way to raise an error in this routine. */
-#ifdef Py_DEBUG
-	fatal_1350(self, "ghostify", "claims to be in a cache but isn't");
-#else
-	return;
-#endif
-    }
-
-    /* If we're ghostifying an object, we better have some non-ghosts. */
-    assert(self->cache->non_ghost_count > 0);
-    self->cache->non_ghost_count--;
-    ring_del(&self->ring);
-    self->state = cPersistent_GHOST_STATE;
-    dictptr = _PyObject_GetDictPtr((PyObject *)self);
-    if (dictptr && *dictptr) {
-	Py_DECREF(*dictptr);
-	*dictptr = NULL;
-    }
-
-    /* We remove the reference to the just ghosted object that the ring
-     * holds.  Note that the dictionary of oids->objects has an uncounted
-     * reference, so if the ring's reference was the only one, this frees
-     * the ghost object.  Note further that the object's dealloc knows to
-     * inform the dictionary that it is going away.
-     */
-    Py_DECREF(self);
-}
-
-static int
-changed(cPersistentObject *self)
-{
-  if ((self->state == cPersistent_UPTODATE_STATE ||
-       self->state == cPersistent_STICKY_STATE)
-       && self->jar)
-    {
-	PyObject *meth, *arg, *result;
-	static PyObject *s_register;
-
-	if (s_register == NULL)
-	    s_register = PyString_InternFromString("register");
-	meth = PyObject_GetAttr((PyObject *)self->jar, s_register);
-	if (meth == NULL)
-	    return -1;
-	arg = PyTuple_New(1);
-	if (arg == NULL) {
-	    Py_DECREF(meth);
-	    return -1;
-	}
-	Py_INCREF(self);
-	PyTuple_SET_ITEM(arg, 0, (PyObject *)self);
-	result = PyEval_CallObject(meth, arg);
-	Py_DECREF(arg);
-	Py_DECREF(meth);
-	if (result == NULL)
-	    return -1;
-	Py_DECREF(result);
-
-	self->state = cPersistent_CHANGED_STATE;
-    }
-
-  return 0;
-}
-
-static PyObject *
-Per__p_deactivate(cPersistentObject *self)
-{
-    if (self->state == cPersistent_UPTODATE_STATE && self->jar) {
-	PyObject **dictptr = _PyObject_GetDictPtr((PyObject *)self);
-	if (dictptr && *dictptr) {
-	    Py_DECREF(*dictptr);
-	    *dictptr = NULL;
-	}
-	/* Note that we need to set to ghost state unless we are
-	   called directly. Methods that override this need to
-	   do the same! */
-	ghostify(self);
-    }
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static PyObject *
-Per__p_activate(cPersistentObject *self)
-{
-    if (unghostify(self) < 0)
-        return NULL;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static int Per_set_changed(cPersistentObject *self, PyObject *v);
-
-static PyObject *
-Per__p_invalidate(cPersistentObject *self)
-{
-    signed char old_state = self->state;
-
-    if (old_state != cPersistent_GHOST_STATE) {
-        if (Per_set_changed(self, NULL) < 0)
-            return NULL;
-        ghostify(self);
-    }
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-
-static PyObject *
-pickle_slotnames(PyTypeObject *cls)
-{
-    PyObject *slotnames;
-
-    slotnames = PyDict_GetItem(cls->tp_dict, py___slotnames__);
-    if (slotnames) {
-	Py_INCREF(slotnames);
-	return slotnames;
-    }
-
-    slotnames = PyObject_CallFunctionObjArgs(copy_reg_slotnames,
-					     (PyObject*)cls, NULL);
-    if (slotnames && !(slotnames == Py_None || PyList_Check(slotnames))) {
-	PyErr_SetString(PyExc_TypeError,
-			"copy_reg._slotnames didn't return a list or None");
-	Py_DECREF(slotnames);
-	return NULL;
-    }
-
-    return slotnames;
-}
-
-static PyObject *
-pickle_copy_dict(PyObject *state)
-{
-    PyObject *copy, *key, *value;
-    char *ckey;
-    int pos = 0;
-
-    copy = PyDict_New();
-    if (!copy)
-	return NULL;
-
-    if (!state)
-	return copy;
-
-    while (PyDict_Next(state, &pos, &key, &value)) {
-	if (key && PyString_Check(key)) {
-	    ckey = PyString_AS_STRING(key);
-	    if (*ckey == '_' &&
-		(ckey[1] == 'v' || ckey[1] == 'p') &&
-		ckey[2] == '_')
-		/* skip volatile and persistent */
-		continue;
-        }
-
-	if (PyObject_SetItem(copy, key, value) < 0)
-	    goto err;
-    }
-
-    return copy;
- err:
-    Py_DECREF(copy);
-    return NULL;
-}
-
-
-static char pickle___getstate__doc[] =
-"Get the object serialization state\n"
-"\n"
-"If the object has no assigned slots and has no instance dictionary, then \n"
-"None is returned.\n"
-"\n"
-"If the object has no assigned slots and has an instance dictionary, then \n"
-"the a copy of the instance dictionary is returned. The copy has any items \n"
-"with names starting with '_v_' or '_p_' ommitted.\n"
-"\n"
-"If the object has assigned slots, then a two-element tuple is returned.  \n"
-"The first element is either None or a copy of the instance dictionary, \n"
-"as described above. The second element is a dictionary with items \n"
-"for each of the assigned slots.\n"
-;
-
-static PyObject *
-pickle___getstate__(PyObject *self)
-{
-    PyObject *slotnames=NULL, *slots=NULL, *state=NULL;
-    PyObject **dictp;
-    int n=0;
-
-    slotnames = pickle_slotnames(self->ob_type);
-    if (!slotnames)
-	return NULL;
-
-    dictp = _PyObject_GetDictPtr(self);
-    if (dictp)
-	state = pickle_copy_dict(*dictp);
-    else {
-	state = Py_None;
-	Py_INCREF(state);
-    }
-
-    if (slotnames != Py_None) {
-	int i;
-
-	slots = PyDict_New();
-	if (!slots)
-	    goto end;
-
-	for (i = 0; i < PyList_GET_SIZE(slotnames); i++) {
-	    PyObject *name, *value;
-	    char *cname;
-
-	    name = PyList_GET_ITEM(slotnames, i);
-	    if (PyString_Check(name)) {
-		cname = PyString_AS_STRING(name);
-		if (*cname == '_' &&
-		    (cname[1] == 'v' || cname[1] == 'p') &&
-		    cname[2] == '_')
-		    /* skip volatile and persistent */
-		    continue;
-            }
-
-	    /* Unclear:  Will this go through our getattr hook? */
-	    value = PyObject_GetAttr(self, name);
-	    if (value == NULL)
-		PyErr_Clear();
-	    else {
-		int err = PyDict_SetItem(slots, name, value);
-		Py_DECREF(value);
-		if (err < 0)
-		    goto end;
-		n++;
-            }
-        }
-    }
-
-    if (n)
-	state = Py_BuildValue("(NO)", state, slots);
-
- end:
-    Py_XDECREF(slotnames);
-    Py_XDECREF(slots);
-
-    return state;
-}
-
-static int
-pickle_setattrs_from_dict(PyObject *self, PyObject *dict)
-{
-    PyObject *key, *value;
-    int pos = 0;
-
-    if (!PyDict_Check(dict)) {
-	PyErr_SetString(PyExc_TypeError, "Expected dictionary");
-	return -1;
-    }
-
-    while (PyDict_Next(dict, &pos, &key, &value)) {
-	if (PyObject_SetAttr(self, key, value) < 0)
-	    return -1;
-    }
-    return 0;
-}
-
-static char pickle___setstate__doc[] =
-"Set the object serialization state\n\n"
-"The state should be in one of 3 forms:\n\n"
-"- None\n\n"
-"  Ignored\n\n"
-"- A dictionary\n\n"
-"  In this case, the object's instance dictionary will be cleared and \n"
-"  updated with the new state.\n\n"
-"- A two-tuple with a string as the first element. \n\n"
-"  In this case, the method named by the string in the first element will be\n"
-"  called with the second element.\n\n"
-"  This form supports migration of data formats.\n\n"
-"- A two-tuple with None or a Dictionary as the first element and\n"
-"  with a dictionary as the second element.\n\n"
-"  If the first element is not None, then the object's instance dictionary \n"
-"  will be cleared and updated with the value.\n\n"
-"  The items in the second element will be assigned as attributes.\n"
-;
-
-static PyObject *
-pickle___setstate__(PyObject *self, PyObject *state)
-{
-    PyObject *slots=NULL;
-
-    if (PyTuple_Check(state)) {
-	if (!PyArg_ParseTuple(state, "OO:__setstate__", &state, &slots))
-	    return NULL;
-    }
-
-    if (state != Py_None) {
-	PyObject **dict;
-
-	dict = _PyObject_GetDictPtr(self);
-	if (dict) {
-	    if (!*dict) {
-		*dict = PyDict_New();
-		if (!*dict)
-		    return NULL;
-            }
-        }
-
-	if (*dict) {
-	    PyDict_Clear(*dict);
-	    if (PyDict_Update(*dict, state) < 0)
-		return NULL;
-        }
-	else if (pickle_setattrs_from_dict(self, state) < 0)
-	    return NULL;
-    }
-
-    if (slots && pickle_setattrs_from_dict(self, slots) < 0)
-	return NULL;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static char pickle___reduce__doc[] =
-"Reduce an object to contituent parts for serialization\n"
-;
-
-static PyObject *
-pickle___reduce__(PyObject *self)
-{
-    PyObject *args=NULL, *bargs=NULL, *state=NULL, *getnewargs=NULL;
-    int l, i;
-
-    getnewargs = PyObject_GetAttr(self, py___getnewargs__);
-    if (getnewargs) {
-	bargs = PyObject_CallFunctionObjArgs(getnewargs, NULL);
-	Py_DECREF(getnewargs);
-	if (!bargs)
-	    return NULL;
-	l = PyTuple_Size(bargs);
-	if (l < 0)
-	    goto end;
-    }
-    else {
-	PyErr_Clear();
-	l = 0;
-    }
-
-    args = PyTuple_New(l+1);
-    if (args == NULL)
-	goto end;
-
-    Py_INCREF(self->ob_type);
-    PyTuple_SET_ITEM(args, 0, (PyObject*)(self->ob_type));
-    for (i = 0; i < l; i++) {
-	Py_INCREF(PyTuple_GET_ITEM(bargs, i));
-	PyTuple_SET_ITEM(args, i+1, PyTuple_GET_ITEM(bargs, i));
-    }
-
-    state = PyObject_CallMethodObjArgs(self, py___getstate__, NULL);
-    if (!state)
-	goto end;
-
-    state = Py_BuildValue("(OON)", __newobj__, args, state);
-
- end:
-    Py_XDECREF(bargs);
-    Py_XDECREF(args);
-
-    return state;
-}
-
-
-/* Return the object's state, a dict or None.
-
-   If the object has no dict, it's state is None.
-   Otherwise, return a dict containing all the attributes that
-   don't start with "_v_".
-
-   The caller should not modify this dict, as it may be a reference to
-   the object's __dict__.
-*/
-
-static PyObject *
-Per__getstate__(cPersistentObject *self)
-{
-    /* TODO:  Should it be an error to call __getstate__() on a ghost? */
-    if (unghostify(self) < 0)
-        return NULL;
-
-    /* TODO:  should we increment stickyness?  Tim doesn't understand that
-       question. S*/
-    return pickle___getstate__((PyObject*)self);
-}
-
-/* The Persistent base type provides a traverse function, but not a
-   clear function.  An instance of a Persistent subclass will have
-   its dict cleared through subtype_clear().
-
-   There is always a cycle between a persistent object and its cache.
-   When the cycle becomes unreachable, the clear function for the
-   cache will break the cycle.  Thus, the persistent object need not
-   have a clear function.  It would be complex to write a clear function
-   for the objects, if we needed one, because of the reference count
-   tricks done by the cache.
-*/
-
-static void
-Per_dealloc(cPersistentObject *self)
-{
-    if (self->state >= 0)
-	unlink_from_ring(self);
-    if (self->cache)
-	cPersistenceCAPI->percachedel(self->cache, self->oid);
-    Py_XDECREF(self->cache);
-    Py_XDECREF(self->jar);
-    Py_XDECREF(self->oid);
-    self->ob_type->tp_free(self);
-}
-
-static int
-Per_traverse(cPersistentObject *self, visitproc visit, void *arg)
-{
-    int err;
-
-#define VISIT(SLOT) \
-    if (SLOT) { \
-	err = visit((PyObject *)(SLOT), arg); \
-	if (err) \
-		     return err; \
-    }
-
-    VISIT(self->jar);
-    VISIT(self->oid);
-    VISIT(self->cache);
-
-#undef VISIT
-    return 0;
-}
-
-/* convert_name() returns a new reference to a string name
-   or sets an exception and returns NULL.
-*/
-
-static PyObject *
-convert_name(PyObject *name)
-{
-#ifdef Py_USING_UNICODE
-    /* The Unicode to string conversion is done here because the
-       existing tp_setattro slots expect a string object as name
-       and we wouldn't want to break those. */
-    if (PyUnicode_Check(name)) {
-	name = PyUnicode_AsEncodedString(name, NULL, NULL);
-    }
-    else
-#endif
-    if (!PyString_Check(name)) {
-	PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
-	return NULL;
-    } else
-	Py_INCREF(name);
-    return name;
-}
-
-/* Returns true if the object requires unghostification.
-
-   There are several special attributes that we allow access to without
-   requiring that the object be unghostified:
-   __class__
-   __del__
-   __dict__
-   __of__
-   __setstate__
-*/
-
-static int
-unghost_getattr(const char *s)
-{
-    if (*s++ != '_')
-	return 1;
-    if (*s == 'p') {
-	s++;
-	if (*s == '_')
-	    return 0; /* _p_ */
-	else
-	    return 1;
-    }
-    else if (*s == '_') {
-	s++;
-	switch (*s) {
-	case 'c':
-	    return strcmp(s, "class__");
-	case 'd':
-	    s++;
-	    if (!strcmp(s, "el__"))
-		return 0; /* __del__ */
-	    if (!strcmp(s, "ict__"))
-		return 0; /* __dict__ */
-	    return 1;
-	case 'o':
-	    return strcmp(s, "of__");
-	case 's':
-	    return strcmp(s, "setstate__");
-	default:
-	    return 1;
-	}
-    }
-    return 1;
-}
-
-static PyObject*
-Per_getattro(cPersistentObject *self, PyObject *name)
-{
-    PyObject *result = NULL;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (unghost_getattr(s)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-    }
-    result = PyObject_GenericGetAttr((PyObject *)self, name);
-
-  Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-/* Exposed as _p_getattr method.  Test whether base getattr should be used */
-static PyObject *
-Per__p_getattr(cPersistentObject *self, PyObject *name)
-{
-    PyObject *result = NULL;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (*s != '_' || unghost_getattr(s)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-        result = Py_False;
-    }
-    else
-	result = Py_True;
-
-    Py_INCREF(result);
-
-  Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-/*
-   TODO:  we should probably not allow assignment of __class__ and __dict__.
-*/
-
-static int
-Per_setattro(cPersistentObject *self, PyObject *name, PyObject *v)
-{
-    int result = -1;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (strncmp(s, "_p_", 3) != 0) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-	if (strncmp(s, "_v_", 3) != 0
-	    && self->state != cPersistent_CHANGED_STATE) {
-	    if (changed(self) < 0)
-		goto Done;
-	}
-    }
-    result = PyObject_GenericSetAttr((PyObject *)self, name, v);
-
- Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-
-static int
-Per_p_set_or_delattro(cPersistentObject *self, PyObject *name, PyObject *v)
-{
-    int result = -1;	/* guilty until proved innocent */
-    char *s;
-
-    name = convert_name(name);
-    if (!name)
-	goto Done;
-    s = PyString_AS_STRING(name);
-
-    if (strncmp(s, "_p_", 3)) {
-	if (unghostify(self) < 0)
-	    goto Done;
-	accessed(self);
-
-        result = 0;
-    }
-    else {
-        if (PyObject_GenericSetAttr((PyObject *)self, name, v) < 0)
-	    goto Done;
-        result = 1;
-    }
-
- Done:
-    Py_XDECREF(name);
-    return result;
-}
-
-static PyObject *
-Per__p_setattr(cPersistentObject *self, PyObject *args)
-{
-    PyObject *name, *v, *result;
-    int r;
-
-    if (!PyArg_ParseTuple(args, "OO:_p_setattr", &name, &v))
-	return NULL;
-
-    r = Per_p_set_or_delattro(self, name, v);
-    if (r < 0)
-	return NULL;
-
-    result = r ? Py_True : Py_False;
-    Py_INCREF(result);
-    return result;
-}
-
-static PyObject *
-Per__p_delattr(cPersistentObject *self, PyObject *name)
-{
-    int r;
-    PyObject *result;
-
-    r = Per_p_set_or_delattro(self, name, NULL);
-    if (r < 0)
-	return NULL;
-
-    result = r ? Py_True : Py_False;
-    Py_INCREF(result);
-    return result;
-}
-
-
-static PyObject *
-Per_get_changed(cPersistentObject *self)
-{
-    if (self->state < 0) {
-	Py_INCREF(Py_None);
-	return Py_None;
-    }
-    return PyBool_FromLong(self->state == cPersistent_CHANGED_STATE);
-}
-
-static int
-Per_set_changed(cPersistentObject *self, PyObject *v)
-{
-    int deactivate = 0, true;
-    if (!v) {
-	/* delattr is used to invalidate an object even if it has changed. */
-	if (self->state != cPersistent_GHOST_STATE)
-	    self->state = cPersistent_UPTODATE_STATE;
-	deactivate = 1;
-    }
-    else if (v == Py_None)
-	deactivate = 1;
-
-    if (deactivate) {
-	PyObject *res, *meth;
-	meth = PyObject_GetAttr((PyObject *)self, py__p_deactivate);
-	if (meth == NULL)
-	    return -1;
-	res = PyObject_CallObject(meth, NULL);
-	if (res)
-	    Py_DECREF(res);
-	else {
-	    /* an error occured in _p_deactivate().
-
-	    It's not clear what we should do here.  The code is
-	    obviously ignoring the exception, but it shouldn't return
-	    0 for a getattr and set an exception.  The simplest change
-	    is to clear the exception, but that simply masks the
-	    error.
-
-	    This prints an error to stderr just like exceptions in
-	    __del__().  It would probably be better to log it but that
-	    would be painful from C.
-	    */
-	    PyErr_WriteUnraisable(meth);
-	}
-	Py_DECREF(meth);
-	return 0;
-    }
-    true = PyObject_IsTrue(v);
-    if (true == -1)
-	return -1;
-    else if (true)
-	return changed(self);
-
-    if (self->state >= 0)
-	self->state = cPersistent_UPTODATE_STATE;
-    return 0;
-}
-
-static PyObject *
-Per_get_oid(cPersistentObject *self)
-{
-    PyObject *oid = self->oid ? self->oid : Py_None;
-    Py_INCREF(oid);
-    return oid;
-}
-
-static int
-Per_set_oid(cPersistentObject *self, PyObject *v)
-{
-    if (self->cache) {
-	int result;
-
-	if (v == NULL) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can't delete _p_oid of cached object");
-	    return -1;
-	}
-	if (PyObject_Cmp(self->oid, v, &result) < 0)
-	    return -1;
-	if (result) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can not change _p_oid of cached object");
-	    return -1;
-	}
-    }
-    Py_XDECREF(self->oid);
-    Py_XINCREF(v);
-    self->oid = v;
-    return 0;
-}
-
-static PyObject *
-Per_get_jar(cPersistentObject *self)
-{
-    PyObject *jar = self->jar ? self->jar : Py_None;
-    Py_INCREF(jar);
-    return jar;
-}
-
-static int
-Per_set_jar(cPersistentObject *self, PyObject *v)
-{
-    if (self->cache) {
-	int result;
-
-	if (v == NULL) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can't delete _p_jar of cached object");
-	    return -1;
-	}
-	if (PyObject_Cmp(self->jar, v, &result) < 0)
-	    return -1;
-	if (result) {
-	    PyErr_SetString(PyExc_ValueError,
-			    "can not change _p_jar of cached object");
-	    return -1;
-	}
-    }
-    Py_XDECREF(self->jar);
-    Py_XINCREF(v);
-    self->jar = v;
-    return 0;
-}
-
-static PyObject *
-Per_get_serial(cPersistentObject *self)
-{
-    return PyString_FromStringAndSize(self->serial, 8);
-}
-
-static int
-Per_set_serial(cPersistentObject *self, PyObject *v)
-{
-    if (v) {
-	if (PyString_Check(v) && PyString_GET_SIZE(v) == 8)
-	    memcpy(self->serial, PyString_AS_STRING(v), 8);
-	else {
-	    PyErr_SetString(PyExc_ValueError,
-			    "_p_serial must be an 8-character string");
-	    return -1;
-	}
-    } else
-	memset(self->serial, 0, 8);
-    return 0;
-}
-
-static PyObject *
-Per_get_mtime(cPersistentObject *self)
-{
-    PyObject *t, *v;
-
-    if (unghostify(self) < 0)
-	return NULL;
-
-    accessed(self);
-
-    if (memcmp(self->serial, "\0\0\0\0\0\0\0\0", 8) == 0) {
-	Py_INCREF(Py_None);
-	return Py_None;
-    }
-
-    t = PyObject_CallFunction(TimeStamp, "s#", self->serial, 8);
-    if (!t)
-	return NULL;
-    v = PyObject_CallMethod(t, "timeTime", "");
-    Py_DECREF(t);
-    return v;
-}
-
-static PyObject *
-Per_get_state(cPersistentObject *self)
-{
-    return PyInt_FromLong(self->state);
-}
-
-static PyGetSetDef Per_getsets[] = {
-    {"_p_changed", (getter)Per_get_changed, (setter)Per_set_changed},
-    {"_p_jar", (getter)Per_get_jar, (setter)Per_set_jar},
-    {"_p_mtime", (getter)Per_get_mtime},
-    {"_p_oid", (getter)Per_get_oid, (setter)Per_set_oid},
-    {"_p_serial", (getter)Per_get_serial, (setter)Per_set_serial},
-    {"_p_state", (getter)Per_get_state},
-    {NULL}
-};
-
-static struct PyMethodDef Per_methods[] = {
-  {"_p_deactivate", (PyCFunction)Per__p_deactivate, METH_NOARGS,
-   "_p_deactivate() -- Deactivate the object"},
-  {"_p_activate", (PyCFunction)Per__p_activate, METH_NOARGS,
-   "_p_activate() -- Activate the object"},
-  {"_p_invalidate", (PyCFunction)Per__p_invalidate, METH_NOARGS,
-   "_p_invalidate() -- Invalidate the object"},
-  {"_p_getattr", (PyCFunction)Per__p_getattr, METH_O,
-   "_p_getattr(name) -- Test whether the base class must handle the name\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-   "\n"
-   "This method should be called by subclass __getattribute__\n"
-   "implementations before doing anything else. If the method\n"
-   "returns True, then __getattribute__ implementations must delegate\n"
-   "to the base class, Persistent.\n"
-  },
-  {"_p_setattr", (PyCFunction)Per__p_setattr, METH_VARARGS,
-   "_p_setattr(name, value) -- Save persistent meta data\n"
-   "\n"
-   "This method should be called by subclass __setattr__ implementations\n"
-   "before doing anything else.  If it returns true, then the attribute\n"
-   "was handled by the base class.\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-  },
-  {"_p_delattr", (PyCFunction)Per__p_delattr, METH_O,
-   "_p_delattr(name) -- Delete persistent meta data\n"
-   "\n"
-   "This method should be called by subclass __delattr__ implementations\n"
-   "before doing anything else.  If it returns true, then the attribute\n"
-   "was handled by the base class.\n"
-   "\n"
-   "The method unghostifies the object, if necessary.\n"
-   "The method records the object access, if necessary.\n"
-  },
-  {"__getstate__", (PyCFunction)Per__getstate__, METH_NOARGS,
-   pickle___getstate__doc },
-  {"__setstate__", (PyCFunction)pickle___setstate__, METH_O,
-   pickle___setstate__doc},
-  {"__reduce__", (PyCFunction)pickle___reduce__, METH_NOARGS,
-   pickle___reduce__doc},
-
-  {NULL,		NULL}		/* sentinel */
-};
-
-/* This module is compiled as a shared library.  Some compilers don't
-   allow addresses of Python objects defined in other libraries to be
-   used in static initializers here.  The DEFERRED_ADDRESS macro is
-   used to tag the slots where such addresses appear; the module init
-   function must fill in the tagged slots at runtime.  The argument is
-   for documentation -- the macro ignores it.
-*/
-#define DEFERRED_ADDRESS(ADDR) 0
-
-static PyTypeObject Pertype = {
-    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyPersist_MetaType))
-    0,					/* ob_size */
-    "persistent.Persistent",		/* tp_name */
-    sizeof(cPersistentObject),		/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)Per_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    0,					/* tp_as_number */
-    0,					/* tp_as_sequence */
-    0,					/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    (getattrofunc)Per_getattro,		/* tp_getattro */
-    (setattrofunc)Per_setattro,		/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
-    					/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)Per_traverse,		/* tp_traverse */
-    0,					/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    0,					/* tp_iter */
-    0,					/* tp_iternext */
-    Per_methods,			/* tp_methods */
-    0,					/* tp_members */
-    Per_getsets,			/* tp_getset */
-};
-
-/* End of code for Persistent objects */
-/* -------------------------------------------------------- */
-
-typedef int (*intfunctionwithpythonarg)(PyObject*);
-
-/* Load the object's state if necessary and become sticky */
-static int
-Per_setstate(cPersistentObject *self)
-{
-    if (unghostify(self) < 0)
-        return -1;
-    self->state = cPersistent_STICKY_STATE;
-    return 0;
-}
-
-static PyObject *
-simple_new(PyObject *self, PyObject *type_object)
-{
-    return PyType_GenericNew((PyTypeObject *)type_object, NULL, NULL);
-}
-
-static PyMethodDef cPersistence_methods[] = {
-    {"simple_new", simple_new, METH_O,
-     "Create an object by simply calling a class's __new__ method without "
-     "arguments."},
-    {NULL, NULL}
-};
-
-
-static cPersistenceCAPIstruct
-truecPersistenceCAPI = {
-    &Pertype,
-    (getattrofunc)Per_getattro,	/*tp_getattr with object key*/
-    (setattrofunc)Per_setattro,	/*tp_setattr with object key*/
-    changed,
-    accessed,
-    ghostify,
-    (intfunctionwithpythonarg)Per_setstate,
-    NULL /* The percachedel slot is initialized in cPickleCache.c when
-	    the module is loaded.  It uses a function in a different
-	    shared library. */
-};
-
-void
-initcPersistence(void)
-{
-    PyObject *m, *s;
-    PyObject *copy_reg;
-
-    if (init_strings() < 0)
-      return;
-
-    m = Py_InitModule3("cPersistence", cPersistence_methods,
-		       cPersistence_doc_string);
-
-    Pertype.ob_type = &PyType_Type;
-    Pertype.tp_new = PyType_GenericNew;
-    if (PyType_Ready(&Pertype) < 0)
-	return;
-    if (PyModule_AddObject(m, "Persistent", (PyObject *)&Pertype) < 0)
-	return;
-
-    cPersistenceCAPI = &truecPersistenceCAPI;
-    s = PyCObject_FromVoidPtr(cPersistenceCAPI, NULL);
-    if (!s)
-	return;
-    if (PyModule_AddObject(m, "CAPI", s) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "GHOST", cPersistent_GHOST_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "UPTODATE", cPersistent_UPTODATE_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "CHANGED", cPersistent_CHANGED_STATE) < 0)
-	return;
-
-    if (PyModule_AddIntConstant(m, "STICKY", cPersistent_STICKY_STATE) < 0)
-	return;
-
-    py_simple_new = PyObject_GetAttrString(m, "simple_new");
-    if (!py_simple_new)
-        return;
-
-    copy_reg = PyImport_ImportModule("copy_reg");
-    if (!copy_reg)
-	return;
-
-    copy_reg_slotnames = PyObject_GetAttrString(copy_reg, "_slotnames");
-    if (!copy_reg_slotnames) {
-	Py_DECREF(copy_reg);
-	return;
-    }
-
-    __newobj__ = PyObject_GetAttrString(copy_reg, "__newobj__");
-    if (!__newobj__) {
-	Py_DECREF(copy_reg);
-	return;
-    }
-
-    if (!TimeStamp) {
-        m = PyImport_ImportModule("persistent.TimeStamp");
-        if (!m)
-	    return;
-        TimeStamp = PyObject_GetAttrString(m, "TimeStamp");
-        Py_DECREF(m);
-        /* fall through to immediate return on error */
-    }
-}
diff --git a/branches/bug1734/src/persistent/cPersistence.h b/branches/bug1734/src/persistent/cPersistence.h
deleted file mode 100644
index 25a8462d..00000000
--- a/branches/bug1734/src/persistent/cPersistence.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#ifndef CPERSISTENCE_H
-#define CPERSISTENCE_H
-
-#include "Python.h"
-#include "ring.h"
-
-#define CACHE_HEAD \
-    PyObject_HEAD \
-    CPersistentRing ring_home; \
-    int non_ghost_count;
-
-struct ccobject_head_struct;
-
-typedef struct ccobject_head_struct PerCache;
-
-/* How big is a persistent object?
-
-   12  PyGC_Head is two pointers and an int
-    8  PyObject_HEAD is an int and a pointer
- 
-   12  jar, oid, cache pointers
-    8  ring struct
-    8  serialno
-    4  state + extra
-
-  (52) so far
-
-    4  dict ptr
-    4  weaklist ptr
-  -------------------------
-   64  only need 62, but obmalloc rounds up to multiple of eight
-
-  Even a ghost requires 64 bytes.  It's possible to make a persistent
-  instance with slots and no dict, which changes the storage needed.
-
-*/
-
-#define cPersistent_HEAD \
-    PyObject_HEAD \
-    PyObject *jar; \
-    PyObject *oid; \
-    PerCache *cache; \
-    CPersistentRing ring; \
-    char serial[8]; \
-    signed char state; \
-    unsigned char reserved[3];
-
-#define cPersistent_GHOST_STATE -1
-#define cPersistent_UPTODATE_STATE 0
-#define cPersistent_CHANGED_STATE 1
-#define cPersistent_STICKY_STATE 2
-
-typedef struct {
-    cPersistent_HEAD
-} cPersistentObject;
-
-typedef void (*percachedelfunc)(PerCache *, PyObject *);
-
-typedef struct {
-    PyTypeObject *pertype;
-    getattrofunc getattro;
-    setattrofunc setattro;
-    int (*changed)(cPersistentObject*);
-    void (*accessed)(cPersistentObject*);
-    void (*ghostify)(cPersistentObject*);
-    int (*setstate)(PyObject*);
-    percachedelfunc percachedel;
-} cPersistenceCAPIstruct;
-
-#define cPersistenceType cPersistenceCAPI->pertype
-
-#ifndef DONT_USE_CPERSISTENCECAPI
-static cPersistenceCAPIstruct *cPersistenceCAPI;
-#endif
-
-#define cPersistanceModuleName "cPersistence"
-
-#define PER_TypeCheck(O) PyObject_TypeCheck((O), cPersistenceCAPI->pertype)
-
-#define PER_USE_OR_RETURN(O,R) {if((O)->state==cPersistent_GHOST_STATE && cPersistenceCAPI->setstate((PyObject*)(O)) < 0) return (R); else if ((O)->state==cPersistent_UPTODATE_STATE) (O)->state=cPersistent_STICKY_STATE;}
-
-#define PER_CHANGED(O) (cPersistenceCAPI->changed((cPersistentObject*)(O)))
-
-#define PER_GHOSTIFY(O) (cPersistenceCAPI->ghostify((cPersistentObject*)(O)))
-
-/* If the object is sticky, make it non-sticky, so that it can be ghostified.
-   The value is not meaningful
- */
-#define PER_ALLOW_DEACTIVATION(O) ((O)->state==cPersistent_STICKY_STATE && ((O)->state=cPersistent_UPTODATE_STATE))
-
-#define PER_PREVENT_DEACTIVATION(O)  ((O)->state==cPersistent_UPTODATE_STATE && ((O)->state=cPersistent_STICKY_STATE))
-
-/* 
-   Make a persistent object usable from C by:
-
-   - Making sure it is not a ghost
-
-   - Making it sticky.
-
-   IMPORTANT: If you call this and don't call PER_ALLOW_DEACTIVATION, 
-              your object will not be ghostified.
-
-   PER_USE returns a 1 on success and 0 failure, where failure means
-   error.
- */
-#define PER_USE(O) \
-(((O)->state != cPersistent_GHOST_STATE \
-  || (cPersistenceCAPI->setstate((PyObject*)(O)) >= 0)) \
- ? (((O)->state==cPersistent_UPTODATE_STATE) \
-    ? ((O)->state=cPersistent_STICKY_STATE) : 1) : 0)
-
-#define PER_ACCESSED(O)  (cPersistenceCAPI->accessed((cPersistentObject*)(O)))
-
-#endif
diff --git a/branches/bug1734/src/persistent/cPickleCache.c b/branches/bug1734/src/persistent/cPickleCache.c
deleted file mode 100644
index 8982aa7c..00000000
--- a/branches/bug1734/src/persistent/cPickleCache.c
+++ /dev/null
@@ -1,1127 +0,0 @@
- /*****************************************************************************
-
-  Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-/*
-
-Objects are stored under three different regimes:
-
-Regime 1: Persistent Classes
-
-Persistent Classes are part of ZClasses. They are stored in the
-self->data dictionary, and are never garbage collected.
-
-The klass_items() method returns a sequence of (oid,object) tuples for
-every Persistent Class, which should make it possible to implement
-garbage collection in Python if necessary.
-
-Regime 2: Ghost Objects
-
-There is no benefit to keeping a ghost object which has no external
-references, therefore a weak reference scheme is used to ensure that
-ghost objects are removed from memory as soon as possible, when the
-last external reference is lost.
-
-Ghost objects are stored in the self->data dictionary. Normally a
-dictionary keeps a strong reference on its values, however this
-reference count is 'stolen'.
-
-This weak reference scheme leaves a dangling reference, in the
-dictionary, when the last external reference is lost. To clean up this
-dangling reference the persistent object dealloc function calls
-self->cache->_oid_unreferenced(self->oid). The cache looks up the oid
-in the dictionary, ensures it points to an object whose reference
-count is zero, then removes it from the dictionary. Before removing
-the object from the dictionary it must temporarily resurrect the
-object in much the same way that class instances are resurrected
-before their __del__ is called.
-
-Since ghost objects are stored under a different regime to non-ghost
-objects, an extra ghostify function in cPersistenceAPI replaces
-self->state=GHOST_STATE assignments that were common in other
-persistent classes (such as BTrees).
-
-Regime 3: Non-Ghost Objects
-
-Non-ghost objects are stored in two data structures: the dictionary
-mapping oids to objects and a doubly-linked list that encodes the
-order in which the objects were accessed.  The dictionary reference is
-borrowed, as it is for ghosts.  The list reference is a new reference;
-the list stores recently used objects, even if they are otherwise
-unreferenced, to avoid loading the object from the database again.
-
-The doubly-link-list nodes contain next and previous pointers linking
-together the cache and all non-ghost persistent objects.
-
-The node embedded in the cache is the home position. On every
-attribute access a non-ghost object will relink itself just behind the
-home position in the ring. Objects accessed least recently will
-eventually find themselves positioned after the home position.
-
-Occasionally other nodes are temporarily inserted in the ring as
-position markers. The cache contains a ring_lock flag which must be
-set and unset before and after doing so. Only if the flag is unset can
-the cache assume that all nodes are either his own home node, or nodes
-from persistent objects. This assumption is useful during the garbage
-collection process.
-
-The number of non-ghost objects is counted in self->non_ghost_count.
-The garbage collection process consists of traversing the ring, and
-deactivating (that is, turning into a ghost) every object until
-self->non_ghost_count is down to the target size, or until it
-reaches the home position again.
-
-Note that objects in the sticky or changed states are still kept in
-the ring, however they can not be deactivated. The garbage collection
-process must skip such objects, rather than deactivating them.
-
-*/
-
-static char cPickleCache_doc_string[] =
-"Defines the PickleCache used by ZODB Connection objects.\n"
-"\n"
-"$Id$\n";
-
-#define DONT_USE_CPERSISTENCECAPI
-#include "cPersistence.h"
-#include "structmember.h"
-#include <time.h>
-#include <stddef.h>
-#undef Py_FindMethod
-
-/* Python string objects to speed lookups; set by module init. */
-static PyObject *py__p_changed;
-static PyObject *py__p_deactivate;
-static PyObject *py__p_jar;
-static PyObject *py__p_oid;
-
-static cPersistenceCAPIstruct *capi;
-
-/* This object is the pickle cache.  The CACHE_HEAD macro guarantees
-   that layout of this struct is the same as the start of
-   ccobject_head in cPersistence.c */
-typedef struct {
-    CACHE_HEAD
-    int klass_count;                     /* count of persistent classes */
-    PyObject *data;                      /* oid -> object dict */
-    PyObject *jar;                       /* Connection object */
-    PyObject *setklassstate;             /* ??? */
-    int cache_size;                      /* target number of items in cache */
-
-    /* Most of the time the ring contains only:
-       * many nodes corresponding to persistent objects
-       * one 'home' node from the cache.
-    In some cases it is handy to temporarily add other types
-    of node into the ring as placeholders. 'ring_lock' is a boolean
-    indicating that someone has already done this. Currently this
-    is only used by the garbage collection code. */
-
-    int ring_lock;
-
-    /* 'cache_drain_resistance' controls how quickly the cache size will drop
-    when it is smaller than the configured size. A value of zero means it will
-    not drop below the configured size (suitable for most caches). Otherwise,
-    it will remove cache_non_ghost_count/cache_drain_resistance items from
-    the cache every time (suitable for rarely used caches, such as those
-    associated with Zope versions. */
-
-    int cache_drain_resistance;
-
-} ccobject;
-
-static int cc_ass_sub(ccobject *self, PyObject *key, PyObject *v);
-
-/* ---------------------------------------------------------------- */
-
-#define OBJECT_FROM_RING(SELF, HERE) \
-    ((cPersistentObject *)(((char *)here) - offsetof(cPersistentObject, ring)))
-
-/* Insert self into the ring, following after. */
-static void
-insert_after(CPersistentRing *self, CPersistentRing *after)
-{
-    assert(self != NULL);
-    assert(after != NULL);
-    self->r_prev = after;
-    self->r_next = after->r_next;
-    after->r_next->r_prev = self;
-    after->r_next = self;
-}
-
-/* Remove self from the ring. */
-static void
-unlink_from_ring(CPersistentRing *self)
-{
-    assert(self != NULL);
-    self->r_prev->r_next = self->r_next;
-    self->r_next->r_prev = self->r_prev;
-}
-
-static int
-scan_gc_items(ccobject *self, int target)
-{
-    /* This function must only be called with the ring lock held,
-       because it places non-object placeholders in the ring.
-    */
-    cPersistentObject *object;
-    CPersistentRing *here;
-    CPersistentRing before_original_home;
-    int result = -1;   /* guilty until proved innocent */
-
-    /* Scan the ring, from least to most recently used, deactivating
-     * up-to-date objects, until we either find the ring_home again or
-     * or we've ghosted enough objects to reach the target size.
-     * Tricky:  __getattr__ and __del__ methods can do anything, and in
-     * particular if we ghostify an object with a __del__ method, that method
-     * can load the object again, putting it back into the MRU part of the
-     * ring.  Waiting to find ring_home again can thus cause an infinite
-     * loop (Collector #1208).  So before_original_home records the MRU
-     * position we start with, and we stop the scan when we reach that.
-     */
-    insert_after(&before_original_home, self->ring_home.r_prev);
-    here = self->ring_home.r_next;   /* least recently used object */
-    while (here != &before_original_home && self->non_ghost_count > target) {
-	assert(self->ring_lock);
-	assert(here != &self->ring_home);
-
-        /* At this point we know that the ring only contains nodes
-	   from persistent objects, plus our own home node.  We know
-	   this because the ring lock is held.  We can safely assume
-	   the current ring node is a persistent object now we know it
-	   is not the home */
-        object = OBJECT_FROM_RING(self, here);
-
-        if (object->state == cPersistent_UPTODATE_STATE) {
-            CPersistentRing placeholder;
-            PyObject *method;
-            PyObject *temp;
-            int error_occurred = 0;
-            /* deactivate it. This is the main memory saver. */
-
-            /* Add a placeholder, a dummy node in the ring.  We need
-	       to do this to mark our position in the ring.  It is
-	       possible that the PyObject_GetAttr() call below will
-	       invoke a __getattr__() hook in Python.  Also possible
-	       that deactivation will lead to a __del__ method call.
-	       So another thread might run, and mutate the ring as a side
-	       effect of object accesses.  There's no predicting then where
-	       in the ring here->next will point after that.  The
-	       placeholder won't move as a side effect of calling Python
-	       code.
-	    */
-            insert_after(&placeholder, here);
-	    method = PyObject_GetAttr((PyObject *)object, py__p_deactivate);
-	    if (method == NULL)
-	        error_occurred = 1;
-	    else {
- 		temp = PyObject_CallObject(method, NULL);
-                Py_DECREF(method);
-	        if (temp == NULL)
-	            error_occurred = 1;
-	    }
-
-            here = placeholder.r_next;
-            unlink_from_ring(&placeholder);
-            if (error_occurred)
-                goto Done;
-        }
-        else
-            here = here->r_next;
-    }
-    result = 0;
- Done:
-    unlink_from_ring(&before_original_home);
-    return result;
-}
-
-static PyObject *
-lockgc(ccobject *self, int target_size)
-{
-    /* This is thread-safe because of the GIL, and there's nothing
-     * in between checking the ring_lock and acquiring it that calls back
-     * into Python.
-     */
-    if (self->ring_lock) {
-        Py_INCREF(Py_None);
-        return Py_None;
-    }
-
-    self->ring_lock = 1;
-    if (scan_gc_items(self, target_size) < 0) {
-        self->ring_lock = 0;
-        return NULL;
-    }
-    self->ring_lock = 0;
-
-    Py_INCREF(Py_None);
-    return Py_None;
-}
-
-static PyObject *
-cc_incrgc(ccobject *self, PyObject *args)
-{
-    int obsolete_arg = -999;
-    int starting_size = self->non_ghost_count;
-    int target_size = self->cache_size;
-
-    if (self->cache_drain_resistance >= 1) {
-        /* This cache will gradually drain down to a small size. Check
-           a (small) number of objects proportional to the current size */
-
-        int target_size_2 = (starting_size - 1
-			     - starting_size / self->cache_drain_resistance);
-        if (target_size_2 < target_size)
-            target_size = target_size_2;
-    }
-
-
-    if (!PyArg_ParseTuple(args, "|i:incrgc", &obsolete_arg))
-	return NULL;
-
-    if (obsolete_arg != -999
-        &&
-        (PyErr_Warn(PyExc_DeprecationWarning,
-                    "No argument expected")
-         < 0))
-        return NULL;
-
-    return lockgc(self, target_size);
-}
-
-static PyObject *
-cc_full_sweep(ccobject *self, PyObject *args)
-{
-    int dt = -999;
-
-    /* TODO:  This should be deprecated;  */
-
-    if (!PyArg_ParseTuple(args, "|i:full_sweep", &dt))
-	return NULL;
-    if (dt == -999)
-	return lockgc(self, 0);
-    else
-	return cc_incrgc(self, args);
-}
-
-static PyObject *
-cc_minimize(ccobject *self, PyObject *args)
-{
-    int ignored = -999;
-
-    if (!PyArg_ParseTuple(args, "|i:minimize", &ignored))
-	return NULL;
-
-    if (ignored != -999
-        &&
-        (PyErr_Warn(PyExc_DeprecationWarning,
-                    "No argument expected")
-         < 0))
-        return NULL;
-
-    return lockgc(self, 0);
-}
-
-static void
-_invalidate(ccobject *self, PyObject *key)
-{
-    static PyObject *_p_invalidate;
-    PyObject *v = PyDict_GetItem(self->data, key);
-
-    if (!_p_invalidate) {
-	_p_invalidate = PyString_InternFromString("_p_invalidate");
-	if (!_p_invalidate) {
-	    /* It doesn't make any sense to ignore this error, but
-	       the caller ignores all errors.
-	    */
-	    PyErr_Clear();
-	    return;
-	}
-    }
-
-    if (!v)
-	return;
-    if (PyType_Check(v)) {
-        /* This looks wrong, but it isn't. We use strong references to types
-           because they don't have the ring members.
-
-           The result is that we *never* remove classes unless
-           they are modified.
-
-         */
-	if (v->ob_refcnt <= 1) {
-	    self->klass_count--;
-	    if (PyDict_DelItem(self->data, key) < 0)
-		PyErr_Clear();
-	}
-	else {
-	    v = PyObject_CallFunction(self->setklassstate, "O", v);
-	    if (v)
-		Py_DECREF(v);
-	    else
-		PyErr_Clear();
-	}
-    } else {
-	PyObject *meth, *err;
-
-	meth = PyObject_GetAttr(v, _p_invalidate);
-	if (!meth) {
-	    PyErr_Clear();
-	    return;
-	}
-	err = PyObject_CallObject(meth, NULL);
-	Py_DECREF(meth);
-	if (!err)
-	    PyErr_Clear();
-    }
-}
-
-static PyObject *
-cc_invalidate(ccobject *self, PyObject *inv)
-{
-  PyObject *key, *v;
-  int i = 0;
-
-  if (PyDict_Check(inv)) {
-      while (PyDict_Next(inv, &i, &key, &v))
-	  _invalidate(self, key);
-      PyDict_Clear(inv);
-  }
-  else {
-      if (PyString_Check(inv))
-	  _invalidate(self, inv);
-      else {
-	  int l;
-
-	  l = PyObject_Length(inv);
-	  if (l < 0)
-	      return NULL;
-	  for (i=l; --i >= 0; ) {
-	      key = PySequence_GetItem(inv, i);
-	      if (!key)
-		  return NULL;
-	      _invalidate(self, key);
-	      Py_DECREF(key);
-	  }
-	  /* Dubious:  modifying the input may be an unexpected side effect. */
-	  PySequence_DelSlice(inv, 0, l);
-      }
-  }
-
-  Py_INCREF(Py_None);
-  return Py_None;
-}
-
-static PyObject *
-cc_get(ccobject *self, PyObject *args)
-{
-    PyObject *r, *key, *d = NULL;
-
-    if (!PyArg_ParseTuple(args, "O|O:get", &key, &d))
-	return NULL;
-
-    r = PyDict_GetItem(self->data, key);
-    if (!r) {
-	if (d)
-	    r = d;
-	else
-	    r = Py_None;
-    }
-    Py_INCREF(r);
-    return r;
-}
-
-static PyObject *
-cc_items(ccobject *self)
-{
-    return PyObject_CallMethod(self->data, "items", "");
-}
-
-static PyObject *
-cc_klass_items(ccobject *self)
-{
-    PyObject *l,*k,*v;
-    int p = 0;
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    while (PyDict_Next(self->data, &p, &k, &v)) {
-        if(PyType_Check(v)) {
-	    v = Py_BuildValue("OO", k, v);
-	    if (v == NULL) {
-		Py_DECREF(l);
-		return NULL;
-	    }
-	    if (PyList_Append(l, v) < 0) {
-		Py_DECREF(v);
-		Py_DECREF(l);
-		return NULL;
-	    }
-	    Py_DECREF(v);
-        }
-    }
-
-    return l;
-}
-
-static PyObject *
-cc_debug_info(ccobject *self)
-{
-    PyObject *l,*k,*v;
-    int p = 0;
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    while (PyDict_Next(self->data, &p, &k, &v))
-      {
-        if (v->ob_refcnt <= 0)
-          v = Py_BuildValue("Oi", k, v->ob_refcnt);
-
-        else if (! PyType_Check(v) &&
-                 (v->ob_type->tp_basicsize >= sizeof(cPersistentObject))
-                 )
-          v = Py_BuildValue("Oisi",
-                            k, v->ob_refcnt, v->ob_type->tp_name,
-                            ((cPersistentObject*)v)->state);
-        else
-          v = Py_BuildValue("Ois", k, v->ob_refcnt, v->ob_type->tp_name);
-
-        if (v == NULL)
-          goto err;
-
-        if (PyList_Append(l, v) < 0)
-          goto err;
-      }
-
-    return l;
-
- err:
-    Py_DECREF(l);
-    return NULL;
-
-}
-
-static PyObject *
-cc_lru_items(ccobject *self)
-{
-    PyObject *l;
-    CPersistentRing *here;
-
-    if (self->ring_lock) {
-	/* When the ring lock is held, we have no way of know which
-	   ring nodes belong to persistent objects, and which a
-	   placeholders. */
-        PyErr_SetString(PyExc_ValueError,
-		".lru_items() is unavailable during garbage collection");
-        return NULL;
-    }
-
-    l = PyList_New(0);
-    if (l == NULL)
-	return NULL;
-
-    here = self->ring_home.r_next;
-    while (here != &self->ring_home) {
-        PyObject *v;
-        cPersistentObject *object = OBJECT_FROM_RING(self, here);
-
-        if (object == NULL) {
-            Py_DECREF(l);
-            return NULL;
-        }
-	v = Py_BuildValue("OO", object->oid, object);
-	if (v == NULL) {
-            Py_DECREF(l);
-            return NULL;
-	}
-	if (PyList_Append(l, v) < 0) {
-	    Py_DECREF(v);
-            Py_DECREF(l);
-            return NULL;
-	}
-        Py_DECREF(v);
-        here = here->r_next;
-    }
-
-    return l;
-}
-
-static void
-cc_oid_unreferenced(ccobject *self, PyObject *oid)
-{
-    /* This is called by the persistent object deallocation function
-       when the reference count on a persistent object reaches
-       zero. We need to fix up our dictionary; its reference is now
-       dangling because we stole its reference count. Be careful to
-       not release the global interpreter lock until this is
-       complete. */
-
-    PyObject *v;
-
-    /* If the cache has been cleared by GC, data will be NULL. */
-    if (!self->data)
-	return;
-
-    v = PyDict_GetItem(self->data, oid);
-    assert(v);
-    assert(v->ob_refcnt == 0);
-    /* Need to be very hairy here because a dictionary is about
-       to decref an already deleted object.
-    */
-
-#ifdef Py_TRACE_REFS
-    /* This is called from the deallocation function after the
-       interpreter has untracked the reference.  Track it again.
-     */
-    _Py_NewReference(v);
-    /* Don't increment total refcount as a result of the
-       shenanigans played in this function.  The _Py_NewReference()
-       call above creates artificial references to v.
-    */
-    _Py_RefTotal--;
-    assert(v->ob_type);
-#else
-    Py_INCREF(v);
-#endif
-    assert(v->ob_refcnt == 1);
-    /* Incremement the refcount again, because delitem is going to
-       DECREF it.  If it's refcount reached zero again, we'd call back to
-       the dealloc function that called us.
-    */
-    Py_INCREF(v);
-
-    /* TODO:  Should we call _Py_ForgetReference() on error exit? */
-    if (PyDict_DelItem(self->data, oid) < 0)
-	return;
-    Py_DECREF((ccobject *)((cPersistentObject *)v)->cache);
-    ((cPersistentObject *)v)->cache = NULL;
-
-    assert(v->ob_refcnt == 1);
-
-    /* Undo the temporary resurrection.
-       Don't DECREF the object, because this function is called from
-       the object's dealloc function. If the refcnt reaches zero, it
-       will all be invoked recursively.
-     */
-    _Py_ForgetReference(v);
-}
-
-static PyObject *
-cc_ringlen(ccobject *self)
-{
-    CPersistentRing *here;
-    int c = 0;
-
-    for (here = self->ring_home.r_next; here != &self->ring_home;
-	 here = here->r_next)
-	c++;
-    return PyInt_FromLong(c);
-}
-
-static struct PyMethodDef cc_methods[] = {
-    {"items", (PyCFunction)cc_items, METH_NOARGS,
-     "Return list of oid, object pairs for all items in cache."},
-    {"lru_items", (PyCFunction)cc_lru_items, METH_NOARGS,
-     "List (oid, object) pairs from the lru list, as 2-tuples."},
-    {"klass_items", (PyCFunction)cc_klass_items, METH_NOARGS,
-     "List (oid, object) pairs of cached persistent classes."},
-    {"full_sweep", (PyCFunction)cc_full_sweep, METH_VARARGS,
-     "full_sweep() -- Perform a full sweep of the cache."},
-    {"minimize",	(PyCFunction)cc_minimize, METH_VARARGS,
-     "minimize([ignored]) -- Remove as many objects as possible\n\n"
-     "Ghostify all objects that are not modified.  Takes an optional\n"
-     "argument, but ignores it."},
-    {"incrgc", (PyCFunction)cc_incrgc, METH_VARARGS,
-     "incrgc() -- Perform incremental garbage collection\n\n"
-     "This method had been depricated!"
-     "Some other implementations support an optional parameter 'n' which\n"
-     "indicates a repetition count; this value is ignored."},
-    {"invalidate", (PyCFunction)cc_invalidate, METH_O,
-     "invalidate(oids) -- invalidate one, many, or all ids"},
-    {"get", (PyCFunction)cc_get, METH_VARARGS,
-     "get(key [, default]) -- get an item, or a default"},
-    {"ringlen", (PyCFunction)cc_ringlen, METH_NOARGS,
-     "ringlen() -- Returns number of non-ghost items in cache."},
-    {"debug_info", (PyCFunction)cc_debug_info, METH_NOARGS,
-     "debug_info() -- Returns debugging data about objects in the cache."},
-    {NULL, NULL}		/* sentinel */
-};
-
-static int
-cc_init(ccobject *self, PyObject *args, PyObject *kwds)
-{
-    int cache_size = 100;
-    PyObject *jar;
-
-    if (!PyArg_ParseTuple(args, "O|i", &jar, &cache_size))
-	return -1;
-
-    self->setklassstate = self->jar = NULL;
-    self->data = PyDict_New();
-    if (self->data == NULL) {
-	Py_DECREF(self);
-	return -1;
-    }
-    /* Untrack the dict mapping oids to objects.
-
-    The dict contains uncounted references to ghost objects, so it
-    isn't safe for GC to visit it.  If GC finds an object with more
-    referents that refcounts, it will die with an assertion failure.
-
-    When the cache participates in GC, it will need to traverse the
-    objects in the doubly-linked list, which will account for all the
-    non-ghost objects.
-    */
-    PyObject_GC_UnTrack((void *)self->data);
-    self->setklassstate = PyObject_GetAttrString(jar, "setklassstate");
-    if (self->setklassstate == NULL) {
-	Py_DECREF(self);
-	return -1;
-    }
-    self->jar = jar;
-    Py_INCREF(jar);
-    self->cache_size = cache_size;
-    self->non_ghost_count = 0;
-    self->klass_count = 0;
-    self->cache_drain_resistance = 0;
-    self->ring_lock = 0;
-    self->ring_home.r_next = &self->ring_home;
-    self->ring_home.r_prev = &self->ring_home;
-    return 0;
-}
-
-static void
-cc_dealloc(ccobject *self)
-{
-    Py_XDECREF(self->data);
-    Py_XDECREF(self->jar);
-    Py_XDECREF(self->setklassstate);
-    PyObject_GC_Del(self);
-}
-
-static int
-cc_clear(ccobject *self)
-{
-    int pos = 0;
-    PyObject *k, *v;
-    /* Clearing the cache is delicate.
-
-    A non-ghost object will show up in the ring and in the dict.  If
-    we deallocating the dict before clearing the ring, the GC will
-    decref each object in the dict.  Since the dict references are
-    uncounted, this will lead to objects having negative refcounts.
-
-    Freeing the non-ghost objects should eliminate many objects from
-    the cache, but there may still be ghost objects left.  It's
-    not safe to decref the dict until it's empty, so we need to manually
-    clear those out of the dict, too.  We accomplish that by replacing
-    all the ghost objects with None.
-    */
-
-    /* We don't need to lock the ring, because the cache is unreachable.
-    It should be impossible for anyone to be modifying the cache.
-    */
-    assert(! self->ring_lock);
-
-    while (self->ring_home.r_next != &self->ring_home) {
-	CPersistentRing *here = self->ring_home.r_next;
-	cPersistentObject *o = OBJECT_FROM_RING(self, here);
-
-	if (o->cache) {
-	    Py_INCREF(o); /* account for uncounted reference */
-	    if (PyDict_DelItem(self->data, o->oid) < 0)
-		return -1;
-	}
-	o->cache = NULL;
-	Py_DECREF(self);
-	self->ring_home.r_next = here->r_next;
-	o->ring.r_prev = NULL;
-	o->ring.r_next = NULL;
-	Py_DECREF(o);
-	here = here->r_next;
-    }
-
-    Py_XDECREF(self->jar);
-    Py_XDECREF(self->setklassstate);
-
-    while (PyDict_Next(self->data, &pos, &k, &v)) {
-	Py_INCREF(v);
-	if (PyDict_SetItem(self->data, k, Py_None) < 0)
-	    return -1;
-    }
-    Py_XDECREF(self->data);
-    self->data = NULL;
-    self->jar = NULL;
-    self->setklassstate = NULL;
-    return 0;
-}
-
-static int
-cc_traverse(ccobject *self, visitproc visit, void *arg)
-{
-    int err;
-    CPersistentRing *here;
-
-    /* If we're in the midst of cleaning up old objects, the ring contains
-     * assorted junk we must not pass on to the visit() callback.  This
-     * should be rare (our cleanup code would need to have called back
-     * into Python, which in turn triggered Python's gc).  When it happens,
-     * simply don't chase any pointers.  The cache will appear to be a
-     * source of external references then, and at worst we miss cleaning
-     * up a dead cycle until the next time Python's gc runs.
-     */
-    if (self->ring_lock)
-    	return 0;
-
-#define VISIT(SLOT) \
-    if (SLOT) { \
-	err = visit((PyObject *)(SLOT), arg); \
-	if (err) \
-		     return err; \
-    }
-
-    VISIT(self->jar);
-    VISIT(self->setklassstate);
-
-    here = self->ring_home.r_next;
-
-    /* It is possible that an object is traversed after it is cleared.
-       In that case, there is no ring.
-    */
-    if (!here)
-	return 0;
-
-    while (here != &self->ring_home) {
-	cPersistentObject *o = OBJECT_FROM_RING(self, here);
-	VISIT(o);
-	here = here->r_next;
-    }
-#undef VISIT
-
-    return 0;
-}
-
-static int
-cc_length(ccobject *self)
-{
-    return PyObject_Length(self->data);
-}
-
-static PyObject *
-cc_subscript(ccobject *self, PyObject *key)
-{
-    PyObject *r;
-
-    r = PyDict_GetItem(self->data, key);
-    if (r == NULL) {
-	PyErr_SetObject(PyExc_KeyError, key);
-	return NULL;
-    }
-    Py_INCREF(r);
-
-    return r;
-}
-
-static int
-cc_add_item(ccobject *self, PyObject *key, PyObject *v)
-{
-    int result;
-    PyObject *oid, *object_again, *jar;
-    cPersistentObject *p;
-
-    /* Sanity check the value given to make sure it is allowed in the cache */
-    if (PyType_Check(v)) {
-        /* Its a persistent class, such as a ZClass. Thats ok. */
-    }
-    else if (v->ob_type->tp_basicsize < sizeof(cPersistentObject)) {
-        /* If it's not an instance of a persistent class, (ie Python
-	   classes that derive from persistent.Persistent, BTrees,
-	   etc), report an error.
-
-	   TODO:  checking sizeof() seems a poor test.
-	*/
-	PyErr_SetString(PyExc_TypeError,
-			"Cache values must be persistent objects.");
-	return -1;
-    }
-
-    /* Can't access v->oid directly because the object might be a
-     *  persistent class.
-     */
-    oid = PyObject_GetAttr(v, py__p_oid);
-    if (oid == NULL)
-	return -1;
-    if (! PyString_Check(oid)) {
-        PyErr_Format(PyExc_TypeError,
-                     "Cached object oid must be a string, not a %s",
-		     oid->ob_type->tp_name);
-	return -1;
-    }
-
-    /*  we know they are both strings.
-     *  now check if they are the same string.
-     */
-    result = PyObject_Compare(key, oid);
-    if (PyErr_Occurred()) {
-	Py_DECREF(oid);
-	return -1;
-    }
-    Py_DECREF(oid);
-    if (result) {
-	PyErr_SetString(PyExc_ValueError, "Cache key does not match oid");
-	return -1;
-    }
-
-    /* useful sanity check, but not strictly an invariant of this class */
-    jar = PyObject_GetAttr(v, py__p_jar);
-    if (jar == NULL)
-        return -1;
-    if (jar==Py_None) {
-        Py_DECREF(jar);
-        PyErr_SetString(PyExc_ValueError,
-                        "Cached object jar missing");
-	return -1;
-    }
-    Py_DECREF(jar);
-
-    object_again = PyDict_GetItem(self->data, key);
-    if (object_again) {
-	if (object_again != v) {
-	    PyErr_SetString(PyExc_ValueError,
-		    "A different object already has the same oid");
-	    return -1;
-	} else {
-	    /* re-register under the same oid - no work needed */
-	    return 0;
-	}
-    }
-
-    if (PyType_Check(v)) {
-	if (PyDict_SetItem(self->data, key, v) < 0)
-	    return -1;
-	self->klass_count++;
-	return 0;
-    } else {
-	PerCache *cache = ((cPersistentObject *)v)->cache;
-	if (cache) {
-	    if (cache != (PerCache *)self)
-		/* This object is already in a different cache. */
-		PyErr_SetString(PyExc_ValueError,
-				"Cache values may only be in one cache.");
-	    return -1;
-	}
-	/* else:
-
-	   This object is already one of ours, which is ok.  It
-	   would be very strange if someone was trying to register
-	   the same object under a different key.
-	*/
-    }
-
-    if (PyDict_SetItem(self->data, key, v) < 0)
-	return -1;
-    /* the dict should have a borrowed reference */
-    Py_DECREF(v);
-
-    p = (cPersistentObject *)v;
-    Py_INCREF(self);
-    p->cache = (PerCache *)self;
-    if (p->state >= 0) {
-	/* insert this non-ghost object into the ring just
-	   behind the home position. */
-	self->non_ghost_count++;
-	ring_add(&self->ring_home, &p->ring);
-	/* this list should have a new reference to the object */
-	Py_INCREF(v);
-    }
-    return 0;
-}
-
-static int
-cc_del_item(ccobject *self, PyObject *key)
-{
-    PyObject *v;
-    cPersistentObject *p;
-
-    /* unlink this item from the ring */
-    v = PyDict_GetItem(self->data, key);
-    if (v == NULL) {
-	PyErr_SetObject(PyExc_KeyError, key);
-	return -1;
-    }
-
-    if (PyType_Check(v)) {
-	self->klass_count--;
-    } else {
-	p = (cPersistentObject *)v;
-	if (p->state >= 0) {
-	    self->non_ghost_count--;
-	    ring_del(&p->ring);
-	    /* The DelItem below will account for the reference
-	       held by the list. */
-	} else {
-	    /* This is a ghost object, so we haven't kept a reference
-	       count on it.  For it have stayed alive this long
-	       someone else must be keeping a reference to
-	       it. Therefore we need to temporarily give it back a
-	       reference count before calling DelItem below */
-	    Py_INCREF(v);
-	}
-
-	Py_DECREF((PyObject *)p->cache);
-	p->cache = NULL;
-    }
-
-    if (PyDict_DelItem(self->data, key) < 0) {
-	PyErr_SetString(PyExc_RuntimeError,
-			"unexpectedly couldn't remove key in cc_ass_sub");
-	return -1;
-    }
-
-    return 0;
-}
-
-static int
-cc_ass_sub(ccobject *self, PyObject *key, PyObject *v)
-{
-    if (!PyString_Check(key)) {
-	PyErr_Format(PyExc_TypeError,
-                     "cPickleCache key must be a string, not a %s",
-		     key->ob_type->tp_name);
-	return -1;
-    }
-    if (v)
-	return cc_add_item(self, key, v);
-    else
-	return cc_del_item(self, key);
-}
-
-static PyMappingMethods cc_as_mapping = {
-  (inquiry)cc_length,		/*mp_length*/
-  (binaryfunc)cc_subscript,	/*mp_subscript*/
-  (objobjargproc)cc_ass_sub,	/*mp_ass_subscript*/
-};
-
-static PyObject *
-cc_cache_data(ccobject *self, void *context)
-{
-    return PyDict_Copy(self->data);
-}
-
-static PyGetSetDef cc_getsets[] = {
-    {"cache_data", (getter)cc_cache_data},
-    {NULL}
-};
-
-
-static PyMemberDef cc_members[] = {
-    {"cache_size", T_INT, offsetof(ccobject, cache_size)},
-    {"cache_drain_resistance", T_INT,
-     offsetof(ccobject, cache_drain_resistance)},
-    {"cache_non_ghost_count", T_INT, offsetof(ccobject, non_ghost_count), RO},
-    {"cache_klass_count", T_INT, offsetof(ccobject, klass_count), RO},
-    {NULL}
-};
-
-/* This module is compiled as a shared library.  Some compilers don't
-   allow addresses of Python objects defined in other libraries to be
-   used in static initializers here.  The DEFERRED_ADDRESS macro is
-   used to tag the slots where such addresses appear; the module init
-   function must fill in the tagged slots at runtime.  The argument is
-   for documentation -- the macro ignores it.
-*/
-#define DEFERRED_ADDRESS(ADDR) 0
-
-static PyTypeObject Cctype = {
-    PyObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type))
-    0,					/* ob_size */
-    "persistent.PickleCache",		/* tp_name */
-    sizeof(ccobject),			/* tp_basicsize */
-    0,					/* tp_itemsize */
-    (destructor)cc_dealloc,		/* tp_dealloc */
-    0,					/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    0,					/* tp_compare */
-    0,					/* tp_repr */
-    0,					/* tp_as_number */
-    0,					/* tp_as_sequence */
-    &cc_as_mapping,			/* tp_as_mapping */
-    0,					/* tp_hash */
-    0,					/* tp_call */
-    0,					/* tp_str */
-    0,					/* tp_getattro */
-    0,					/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
-    					/* tp_flags */
-    0,					/* tp_doc */
-    (traverseproc)cc_traverse,		/* tp_traverse */
-    (inquiry)cc_clear,			/* tp_clear */
-    0,					/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    0,					/* tp_iter */
-    0,					/* tp_iternext */
-    cc_methods,				/* tp_methods */
-    cc_members,				/* tp_members */
-    cc_getsets,				/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    (initproc)cc_init,			/* tp_init */
-};
-
-void
-initcPickleCache(void)
-{
-    PyObject *m;
-
-    Cctype.ob_type = &PyType_Type;
-    Cctype.tp_new = &PyType_GenericNew;
-    if (PyType_Ready(&Cctype) < 0) {
-	return;
-    }
-
-    m = Py_InitModule3("cPickleCache", NULL, cPickleCache_doc_string);
-
-    capi = (cPersistenceCAPIstruct *)PyCObject_Import(
-	"persistent.cPersistence", "CAPI");
-    if (!capi)
-	return;
-    capi->percachedel = (percachedelfunc)cc_oid_unreferenced;
-
-    py__p_changed = PyString_InternFromString("_p_changed");
-    if (!py__p_changed)
-        return;
-    py__p_deactivate = PyString_InternFromString("_p_deactivate");
-    if (!py__p_deactivate)
-        return;
-    py__p_jar = PyString_InternFromString("_p_jar");
-    if (!py__p_jar)
-        return;
-    py__p_oid = PyString_InternFromString("_p_oid");
-    if (!py__p_oid)
-        return;
-
-    if (PyModule_AddStringConstant(m, "cache_variant", "stiff/c") < 0)
-	return;
-
-    /* This leaks a reference to Cctype, but it doesn't matter. */
-    if (PyModule_AddObject(m, "PickleCache", (PyObject *)&Cctype) < 0)
-	return;
-}
diff --git a/branches/bug1734/src/persistent/dict.py b/branches/bug1734/src/persistent/dict.py
deleted file mode 100644
index ec8d68ed..00000000
--- a/branches/bug1734/src/persistent/dict.py
+++ /dev/null
@@ -1,77 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Python implementation of persistent container type
-
-$Id$
-"""
-
-import persistent
-from UserDict import IterableUserDict
-
-__metaclass__ = type
-
-class PersistentDict(persistent.Persistent, IterableUserDict):
-    """A persistent wrapper for mapping objects.
-
-    This class allows wrapping of mapping objects so that object
-    changes are registered.  As a side effect, mapping objects may be
-    subclassed.
-    """
-
-    # IterableUserDict provides all of the mapping behavior.  The
-    # PersistentDict class is responsible marking the persistent
-    # state as changed when a method actually changes the state.  At
-    # the mapping API evolves, we may need to add more methods here.
-
-    __super_delitem = IterableUserDict.__delitem__
-    __super_setitem = IterableUserDict.__setitem__
-    __super_clear = IterableUserDict.clear
-    __super_update = IterableUserDict.update
-    __super_setdefault = IterableUserDict.setdefault
-    __super_popitem = IterableUserDict.popitem
-
-    __super_p_init = persistent.Persistent.__init__
-    __super_init = IterableUserDict.__init__
-
-    def __init__(self, dict=None):
-        self.__super_init(dict)
-        self.__super_p_init()
-
-    def __delitem__(self, key):
-        self.__super_delitem(key)
-        self._p_changed = True
-
-    def __setitem__(self, key, v):
-        self.__super_setitem(key, v)
-        self._p_changed = True
-
-    def clear(self):
-        self.__super_clear()
-        self._p_changed = True
-
-    def update(self, b):
-        self.__super_update(b)
-        self._p_changed = True
-
-    def setdefault(self, key, failobj=None):
-        # We could inline all of UserDict's implementation into the
-        # method here, but I'd rather not depend at all on the
-        # implementation in UserDict (simple as it is).
-        if not self.has_key(key):
-            self._p_changed = True
-        return self.__super_setdefault(key, failobj)
-
-    def popitem(self):
-        self._p_changed = True
-        return self.__super_popitem()
diff --git a/branches/bug1734/src/persistent/interfaces.py b/branches/bug1734/src/persistent/interfaces.py
deleted file mode 100644
index 3504eefb..00000000
--- a/branches/bug1734/src/persistent/interfaces.py
+++ /dev/null
@@ -1,297 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Persistence Interfaces
-
-$Id$
-"""
-
-from zope.interface import Interface
-from zope.interface import Attribute
-
-class IPersistent(Interface):
-    """Python persistent interface
-
-    A persistent object can be in one of several states:
-
-    - Unsaved
-
-      The object has been created but not saved in a data manager.
-
-      In this state, the _p_changed attribute is non-None and false
-      and the _p_jar attribute is None.
-
-    - Saved
-
-      The object has been saved and has not been changed since it was saved.
-
-      In this state, the _p_changed attribute is non-None and false
-      and the _p_jar attribute is set to a data manager.
-
-    - Sticky
-
-      This state is identical to the saved state except that the
-      object cannot transition to the ghost state.  This is a special
-      state used by C methods of persistent objects to make sure that
-      state is not unloaded in the middle of computation.
-
-      In this state, the _p_changed attribute is non-None and false
-      and the _p_jar attribute is set to a data manager.
-
-      There is no Python API for detecting whether an object is in the
-      sticky state.
-
-    - Changed
-
-      The object has been changed.
-
-      In this state, the _p_changed attribute is true
-      and the _p_jar attribute is set to a data manager.
-
-    - Ghost
-
-      the object is in memory but its state has not been loaded from
-      the database (or its state has been unloaded).  In this state,
-      the object doesn't contain any application data.
-
-      In this state, the _p_changed attribute is None, and the _p_jar
-      attribute is set to the data manager from which the object was
-      obtained.
-
-    In all the above, _p_oid (the persistent object id) is set when
-    _p_jar first gets set.
-
-    The following state transactions are possible:
-
-    - Unsaved -> Saved
-
-      This transition occurs when an object is saved in the
-      database.  This usually happens when an unsaved object is added
-      to (e.g. as an attribute or item of) a saved (or changed) object
-      and the transaction is committed.
-
-    - Saved  -> Changed
-      Sticky -> Changed
-
-      This transition occurs when someone sets an attribute or sets
-      _p_changed to a true value on a saved or sticky object.  When the
-      transition occurs, the persistent object is required to call the
-      register() method on its data manager, passing itself as the
-      only argument.
-
-    - Saved -> Sticky
-
-      This transition occurs when C code marks the object as sticky to
-      prevent its deactivation.
-
-    - Saved -> Ghost
-
-      This transition occurs when a saved object is deactivated or
-      invalidated.  See discussion below.
-
-    - Sticky -> Saved
-
-      This transition occurs when C code unmarks the object as sticky to
-      allow its deactivation.
-
-    - Changed -> Saved
-
-      This transition occurs when a transaction is committed.  After
-      saving the state of a changed object during transaction commit,
-      the data manager sets the object's _p_changed to a non-None false
-      value.
-
-    - Changed -> Ghost
-
-      This transition occurs when a transaction is aborted.  All changed
-      objects are invalidated by the data manager by an abort.
-
-    - Ghost -> Saved
-
-      This transition occurs when an attribute or operation of a ghost
-      is accessed and the object's state is loaded from the database.
-
-    Note that there is a separate C API that is not included here.
-    The C API requires a specific data layout and defines the sticky
-    state.
-
-
-    About Invalidation, Deactivation and the Sticky & Ghost States
-
-    The sticky state is intended to be a short-lived state, to prevent
-    an object's state from being discarded while we're in C routines.  It
-    is an error to invalidate an object in the sticky state.
-
-    Deactivation is a request that an object discard its state (become
-    a ghost).  Deactivation is an optimization, and a request to
-    deactivate may be ignored.  There are two equivalent ways to
-    request deactivation:
-
-          - call _p_deactivate()
-          - set _p_changed to None
-
-    There are two ways to invalidate an object:  call the
-    _p_invalidate() method (preferred) or delete its _p_changed
-    attribute.  This cannot be ignored, and is used when semantics
-    require invalidation.  Normally, an invalidated object transitions
-    to the ghost state.  However, some objects cannot be ghosts.  When
-    these objects are invalidated, they immediately reload their state
-    from their data manager, and are then in the saved state.
-
-    """
-
-    _p_jar = Attribute(
-        """The data manager for the object.
-
-        The data manager implements the IPersistentDataManager interface.
-        If there is no data manager, then this is None.
-        """)
-
-    _p_oid = Attribute(
-        """The object id.
-
-        It is up to the data manager to assign this.
-        The special value None is reserved to indicate that an object
-        id has not been assigned.  Non-None object ids must be hashable
-        and totally ordered.
-        """)
-
-    _p_changed = Attribute(
-        """The persistent state of the object
-
-        This is one of:
-
-        None -- The object is a ghost.
-
-        false but not None -- The object is saved (or has never been saved).
-
-        true -- The object has been modified since it was last saved.
-
-        The object state may be changed by assigning or deleting this
-        attribute; however, assigning None is ignored if the object is
-        not in the saved state, and may be ignored even if the object is
-        in the saved state.
-
-        Note that an object can transition to the changed state only if
-        it has a data manager.  When such a state change occurs, the
-        'register' method of the data manager must be called, passing the
-        persistent object.
-
-        Deleting this attribute forces invalidation independent of
-        existing state, although it is an error if the sticky state is
-        current.
-        """)
-
-    _p_serial = Attribute(
-        """The object serial number.
-
-        This member is used by the data manager to distiguish distinct
-        revisions of a given persistent object.
-
-        This is an 8-byte string (not Unicode).
-        """)
-
-    def __getstate__():
-        """Get the object data.
-
-        The state should not include persistent attributes ("_p_name").
-        The result must be picklable.
-        """
-
-    def __setstate__(state):
-        """Set the object data.
-        """
-
-    def _p_activate():
-        """Activate the object.
-
-        Change the object to the saved state if it is a ghost.
-        """
-
-    def _p_deactivate():
-        """Deactivate the object.
-
-        Possibly change an object in the saved state to the
-        ghost state.  It may not be possible to make some persistent
-        objects ghosts, and, for optimization reasons, the implementation
-        may choose to keep an object in the saved state.
-        """
-
-    def _p_invalidate():
-        """Invalidate the object.
-
-        Invalidate the object.  This causes any data to be thrown
-        away, even if the object is in the changed state.  The object
-        is moved to the ghost state; further accesses will cause
-        object data to be reloaded.
-        """
-
-class IPersistentNoReadConflicts(IPersistent):
-    def _p_independent():
-        """Hook for subclasses to prevent read conflict errors.
-
-        A specific persistent object type can define this method and
-        have it return true if the data manager should ignore read
-        conflicts for this object.
-        """
-
-# TODO:  document conflict resolution.
-
-class IPersistentDataManager(Interface):
-    """Provide services for managing persistent state.
-
-    This interface is used by a persistent object to interact with its
-    data manager in the context of a transaction.
-    """
-
-    def setstate(object):
-        """Load the state for the given object.
-
-        The object should be in the ghost state. The object's state will be
-        set and the object will end up in the saved state.
-
-        The object must provide the IPersistent interface.
-        """
-
-    def oldstate(obj, tid):
-        """Return copy of 'obj' that was written by transaction 'tid'.
-
-        The returned object does not have the typical metadata (_p_jar, _p_oid,
-        _p_serial) set. I'm not sure how references to other peristent objects
-        are handled.
-
-        Parameters
-        obj: a persistent object from this Connection.
-        tid: id of a transaction that wrote an earlier revision.
-
-        Raises KeyError if tid does not exist or if tid deleted a revision of 
-            obj. 
-        """
-
-    def register(object):
-        """Register an IPersistent with the current transaction.
-
-        This method must be called when the object transitions to
-        the changed state.
-
-        A subclass could override this method to customize the default
-        policy of one transaction manager for each thread.
-        """
-
-    def mtime(object):
-        """Return the modification time of the object.
-
-        The modification time may not be known, in which case None
-        is returned.  If non-None, the return value is the kind of
-        timestamp supplied by Python's time.time().
-        """
diff --git a/branches/bug1734/src/persistent/list.py b/branches/bug1734/src/persistent/list.py
deleted file mode 100644
index cd8f390c..00000000
--- a/branches/bug1734/src/persistent/list.py
+++ /dev/null
@@ -1,96 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Python implementation of persistent list.
-
-$Id$"""
-
-import persistent
-from UserList import UserList
-
-class PersistentList(UserList, persistent.Persistent):
-    __super_setitem = UserList.__setitem__
-    __super_delitem = UserList.__delitem__
-    __super_setslice = UserList.__setslice__
-    __super_delslice = UserList.__delslice__
-    __super_iadd = UserList.__iadd__
-    __super_imul = UserList.__imul__
-    __super_append = UserList.append
-    __super_insert = UserList.insert
-    __super_pop = UserList.pop
-    __super_remove = UserList.remove
-    __super_reverse = UserList.reverse
-    __super_sort = UserList.sort
-    __super_extend = UserList.extend
-
-    def __setitem__(self, i, item):
-        self.__super_setitem(i, item)
-        self._p_changed = 1
-
-    def __delitem__(self, i):
-        self.__super_delitem(i)
-        self._p_changed = 1
-
-    def __setslice__(self, i, j, other):
-        self.__super_setslice(i, j, other)
-        self._p_changed = 1
-
-    def __delslice__(self, i, j):
-        self.__super_delslice(i, j)
-        self._p_changed = 1
-
-    def __iadd__(self, other):
-        L = self.__super_iadd(other)
-        self._p_changed = 1
-        return L
-
-    def __imul__(self, n):
-        L = self.__super_imul(n)
-        self._p_changed = 1
-        return L
-
-    def append(self, item):
-        self.__super_append(item)
-        self._p_changed = 1
-
-    def insert(self, i, item):
-        self.__super_insert(i, item)
-        self._p_changed = 1
-
-    def pop(self, i=-1):
-        rtn = self.__super_pop(i)
-        self._p_changed = 1
-        return rtn
-
-    def remove(self, item):
-        self.__super_remove(item)
-        self._p_changed = 1
-
-    def reverse(self):
-        self.__super_reverse()
-        self._p_changed = 1
-
-    def sort(self, *args):
-        self.__super_sort(*args)
-        self._p_changed = 1
-
-    def extend(self, other):
-        self.__super_extend(other)
-        self._p_changed = 1
-
-    # This works around a bug in Python 2.1.x (up to 2.1.2 at least) where the
-    # __cmp__ bogusly raises a RuntimeError, and because this is an extension
-    # class, none of the rich comparison stuff works anyway.
-    def __cmp__(self, other):
-        return cmp(self.data, self._UserList__cast(other))
diff --git a/branches/bug1734/src/persistent/mapping.py b/branches/bug1734/src/persistent/mapping.py
deleted file mode 100644
index f8145b78..00000000
--- a/branches/bug1734/src/persistent/mapping.py
+++ /dev/null
@@ -1,104 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Python implementation of persistent base types
-
-$Id$"""
-
-import persistent
-from UserDict import UserDict
-
-class PersistentMapping(UserDict, persistent.Persistent):
-    """A persistent wrapper for mapping objects.
-
-    This class allows wrapping of mapping objects so that object
-    changes are registered.  As a side effect, mapping objects may be
-    subclassed.
-
-    A subclass of PersistentMapping or any code that adds new
-    attributes should not create an attribute named _container.  This
-    is reserved for backwards compatibility reasons.
-    """
-
-    # UserDict provides all of the mapping behavior.  The
-    # PersistentMapping class is responsible marking the persistent
-    # state as changed when a method actually changes the state.  At
-    # the mapping API evolves, we may need to add more methods here.
-
-    __super_delitem = UserDict.__delitem__
-    __super_setitem = UserDict.__setitem__
-    __super_clear = UserDict.clear
-    __super_update = UserDict.update
-    __super_setdefault = UserDict.setdefault
-
-    def __delitem__(self, key):
-        self.__super_delitem(key)
-        self._p_changed = 1
-
-    def __setitem__(self, key, v):
-        self.__super_setitem(key, v)
-        self._p_changed = 1
-
-    def clear(self):
-        self.__super_clear()
-        self._p_changed = 1
-
-    def update(self, b):
-        self.__super_update(b)
-        self._p_changed = 1
-
-    def setdefault(self, key, failobj=None):
-        # We could inline all of UserDict's implementation into the
-        # method here, but I'd rather not depend at all on the
-        # implementation in UserDict (simple as it is).
-        if not self.has_key(key):
-            self._p_changed = 1
-        return self.__super_setdefault(key, failobj)
-
-    try:
-        __super_popitem = UserDict.popitem
-    except AttributeError:
-        pass
-    else:
-        def popitem(self):
-            self._p_changed = 1
-            return self.__super_popitem()
-
-    # If the internal representation of PersistentMapping changes,
-    # it causes compatibility problems for pickles generated by
-    # different versions of the code.  Compatibility works in both
-    # directions, because an application may want to share a database
-    # between applications using different versions of the code.
-
-    # Effectively, the original rep is part of the "API."  To provide
-    # full compatibility, the getstate and setstate must read and
-    # write objects using the old rep.
-
-    # As a result, the PersistentMapping must save and restore the
-    # actual internal dictionary using the name _container.
-
-    def __getstate__(self):
-        state = {}
-        state.update(self.__dict__)
-        state['_container'] = state['data']
-        del state['data']
-        return state
-
-    def __setstate__(self, state):
-        if state.has_key('_container'):
-            self.data = state['_container']
-            del state['_container']
-        elif not state.has_key('data'):
-            self.data = {}
-        self.__dict__.update(state)
diff --git a/branches/bug1734/src/persistent/ring.c b/branches/bug1734/src/persistent/ring.c
deleted file mode 100644
index ebc8a67b..00000000
--- a/branches/bug1734/src/persistent/ring.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2003 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-#define RING_C "$Id$\n"
-
-/* Support routines for the doubly-linked list of cached objects.
-
-The cache stores a doubly-linked list of persistent objects, with
-space for the pointers allocated in the objects themselves.  The cache
-stores the distinguished head of the list, which is not a valid
-persistent object.
-
-The next pointers traverse the ring in order starting with the least
-recently used object.  The prev pointers traverse the ring in order
-starting with the most recently used object.
-
-*/
-
-#include "Python.h"
-#include "ring.h"
-
-void
-ring_add(CPersistentRing *ring, CPersistentRing *elt)
-{
-    assert(!elt->r_next);
-    elt->r_next = ring;
-    elt->r_prev = ring->r_prev;
-    ring->r_prev->r_next = elt;
-    ring->r_prev = elt;
-}
-
-void
-ring_del(CPersistentRing *elt)
-{
-    elt->r_next->r_prev = elt->r_prev;
-    elt->r_prev->r_next = elt->r_next;
-    elt->r_next = NULL;
-    elt->r_prev = NULL;
-}
-
-void
-ring_move_to_head(CPersistentRing *ring, CPersistentRing *elt)
-{
-    elt->r_prev->r_next = elt->r_next;
-    elt->r_next->r_prev = elt->r_prev;
-    elt->r_next = ring;
-    elt->r_prev = ring->r_prev;
-    ring->r_prev->r_next = elt;
-    ring->r_prev = elt;
-}
diff --git a/branches/bug1734/src/persistent/ring.h b/branches/bug1734/src/persistent/ring.h
deleted file mode 100644
index 18f56e77..00000000
--- a/branches/bug1734/src/persistent/ring.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*****************************************************************************
-
-  Copyright (c) 2003 Zope Corporation and Contributors.
-  All Rights Reserved.
-
-  This software is subject to the provisions of the Zope Public License,
-  Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-  THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-  WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-  WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-  FOR A PARTICULAR PURPOSE
-
- ****************************************************************************/
-
-/* Support routines for the doubly-linked list of cached objects.
-
-The cache stores a headed, doubly-linked, circular list of persistent
-objects, with space for the pointers allocated in the objects themselves.
-The cache stores the distinguished head of the list, which is not a valid
-persistent object.  The other list members are non-ghost persistent
-objects, linked in LRU (least-recently used) order.
-
-The r_next pointers traverse the ring starting with the least recently used
-object.  The r_prev pointers traverse the ring starting with the most
-recently used object.
-
-Obscure:  While each object is pointed at twice by list pointers (once by
-its predecessor's r_next, again by its successor's r_prev), the refcount
-on the object is bumped only by 1.  This leads to some possibly surprising
-sequences of incref and decref code.  Note that since the refcount is
-bumped at least once, the list does hold a strong reference to each
-object in it.
-*/
-
-typedef struct CPersistentRing_struct
-{
-    struct CPersistentRing_struct *r_prev;
-    struct CPersistentRing_struct *r_next;
-} CPersistentRing;
-
-/* The list operations here take constant time independent of the
- * number of objects in the list:
- */
-
-/* Add elt as the most recently used object.  elt must not already be
- * in the list, although this isn't checked.
- */
-void ring_add(CPersistentRing *ring, CPersistentRing *elt);
-
-/* Remove elt from the list.  elt must already be in the list, although
- * this isn't checked.
- */
-void ring_del(CPersistentRing *elt);
-
-/* elt must already be in the list, although this isn't checked.  It's
- * unlinked from its current position, and relinked into the list as the
- * most recently used object (which is arguably the tail of the list
- * instead of the head -- but the name of this function could be argued
- * either way).  This is equivalent to
- *
- *     ring_del(elt);
- *     ring_add(ring, elt);
- *
- * but may be a little quicker.
- */
-void ring_move_to_head(CPersistentRing *ring, CPersistentRing *elt);
diff --git a/branches/bug1734/src/persistent/tests/__init__.py b/branches/bug1734/src/persistent/tests/__init__.py
deleted file mode 100644
index 5bb534f7..00000000
--- a/branches/bug1734/src/persistent/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# package
diff --git a/branches/bug1734/src/persistent/tests/persistent.txt b/branches/bug1734/src/persistent/tests/persistent.txt
deleted file mode 100644
index 09a19b52..00000000
--- a/branches/bug1734/src/persistent/tests/persistent.txt
+++ /dev/null
@@ -1,448 +0,0 @@
-Tests for persistent.Persistent
-===============================
-
-This document is an extended doc test that covers the basics of the
-Persistent base class.  The test expects a class named 'P' to be
-provided in its globals.  The P class implements the Persistent
-interface.
-
-Test framework
---------------
-
-The class P needs to behave like ExampleP.  (Note that the code below
-is *not* part of the tests.)
-
-class ExampleP(Persistent):
-    def __init__(self):
-        self.x = 0
-    def inc(self):
-        self.x += 1
-
-The tests use stub data managers.  A data manager is responsible for
-loading and storing the state of a persistent object.  It's stored in
-the _p_jar attribute of a persistent object.
-
->>> class DM:
-...     def __init__(self):
-...         self.called = 0
-...     def register(self, ob):
-...         self.called += 1
-...     def setstate(self, ob):
-...         ob.__setstate__({'x': 42})
-
->>> class BrokenDM(DM):
-...     def register(self,ob):
-...         self.called += 1
-...         raise NotImplementedError
-...     def setstate(self,ob):
-...         raise NotImplementedError
-
->>> from persistent import Persistent
-
-Test Persistent without Data Manager
-------------------------------------
-
-First do some simple tests of a Persistent instance that does not have
-a data manager (_p_jar).
-
->>> p = P()
->>> p.x
-0
->>> p._p_changed
-False
->>> p._p_state
-0
->>> p._p_jar
->>> p._p_oid
-
-Verify that modifications have no effect on _p_state of _p_changed.
-
->>> p.inc()
->>> p.inc()
->>> p.x
-2
->>> p._p_changed
-False
->>> p._p_state
-0
-
-Try all sorts of different ways to change the object's state.
-
->>> p._p_deactivate()
->>> p._p_state
-0
->>> p._p_changed = True
->>> p._p_state
-0
->>> del p._p_changed
->>> p._p_changed
-False
->>> p._p_state
-0
->>> p.x
-2
-
-Test Persistent with Data Manager
----------------------------------
-
-Next try some tests of an object with a data manager.  The DM class is
-a simple testing stub.
-    
->>> p = P()
->>> dm = DM()
->>> p._p_oid = "00000012"
->>> p._p_jar = dm
->>> p._p_changed
-0
->>> dm.called
-0
-
-Modifying the object marks it as changed and registers it with the
-data manager.  Subsequent modifications don't have additional
-side-effects.
-
->>> p.inc()
->>> p._p_changed
-1
->>> dm.called
-1
->>> p.inc()
->>> p._p_changed
-1
->>> dm.called
-1
-
-It's not possible to deactivate a modified object.
-
->>> p._p_deactivate()
->>> p._p_changed
-1
-
-It is possible to invalidate it.  That's the key difference
-between deactivation and invalidation.
-
->>> p._p_invalidate()
->>> p._p_state
--1
-
-Now that the object is a ghost, any attempt to modify it will
-require that it be unghosted first.  The test data manager
-has the odd property that it sets the object's 'x' attribute
-to 42 when it is unghosted.
-
->>> p.inc()
->>> p.x
-43
->>> dm.called
-2
-
-You can manually reset the changed field to False, although
-it's not clear why you would want to do that.  The object
-changes to the UPTODATE state but retains its modifications.
-
->>> p._p_changed = False
->>> p._p_state
-0
->>> p._p_changed
-False
->>> p.x
-43
-
->>> p.inc()
->>> p._p_changed
-True
->>> dm.called
-3
-
-__getstate__() and __setstate__()
----------------------------------
-
-The next several tests cover the __getstate__() and __setstate__()
-implementations.
-
->>> p = P()
->>> state = p.__getstate__()
->>> isinstance(state, dict)
-True
->>> state['x']
-0
->>> p._p_state
-0
-
-Calling setstate always leaves the object in the uptodate state?
-(I'm not entirely clear on this one.)
-
->>> p.__setstate__({'x': 5})
->>> p._p_state
-0
-
-Assigning to a volatile attribute has no effect on the object state.
-
->>> p._v_foo = 2
->>> p.__getstate__()
-{'x': 5}
->>> p._p_state
-0
-
-The _p_serial attribute is not affected by calling setstate.
-
->>> p._p_serial = "00000012"
->>> p.__setstate__(p.__getstate__())
->>> p._p_serial
-'00000012'
-
-Change Ghost test
------------------
-
-If an object is a ghost and it's _p_changed is set to True, it should
-have no effect.
-
->>> p = P()
->>> p._p_jar = DM()
->>> p._p_oid = 1
->>> p._p_deactivate()
->>> p._p_changed
->>> p._p_state
--1
->>> p._p_changed = True
->>> p._p_changed
->>> p._p_state
--1
-
-Activate, deactivate, and invalidate
-------------------------------------
-
-Some of these tests are redundant, but are included to make sure there
-are explicit and simple tests of _p_activate(), _p_deactivate(), and
-_p_invalidate().
-
->>> p = P()
->>> p._p_oid = 1
->>> p._p_jar = DM()
->>> p._p_deactivate()
->>> p._p_state
--1
->>> p._p_activate()
->>> p._p_state
-0
->>> p.x
-42
->>> p.inc()
->>> p.x
-43
->>> p._p_state
-1
->>> p._p_invalidate()
->>> p._p_state
--1
->>> p.x
-42
-
-Test failures
--------------
-
-The following tests cover various errors cases.
-
-When an object is modified, it registers with its data manager.  If
-that registration fails, the exception is propagated and the object
-stays in the up-to-date state.  It shouldn't change to the modified
-state, because it won't be saved when the transaction commits.
-
->>> p = P()
->>> p._p_oid = 1
->>> p._p_jar = BrokenDM()
->>> p._p_state
-0
->>> p._p_jar.called
-0
->>> p._p_changed = 1
-Traceback (most recent call last):
-  ...
-NotImplementedError
->>> p._p_jar.called
-1
->>> p._p_state
-0
-
-Make sure that exceptions that occur inside the data manager's
-setstate() method propagate out to the caller.
-
->>> p = P()
->>> p._p_oid = 1
->>> p._p_jar = BrokenDM()
->>> p._p_deactivate()
->>> p._p_state
--1
->>> p._p_activate()
-Traceback (most recent call last):
-  ...
-NotImplementedError
->>> p._p_state
--1
-
-
-Special test to cover layout of __dict__
-----------------------------------------
-
-We once had a bug in the Persistent class that calculated an incorrect
-offset for the __dict__ attribute.  It assigned __dict__ and _p_jar to
-the same location in memory.  This is a simple test to make sure they
-have different locations.
-
->>> p = P()
->>> p.inc()
->>> p.inc()
->>> 'x' in p.__dict__
-True
->>> p._p_jar
-
-
-Inheritance and metaclasses
----------------------------
-
-Simple tests to make sure it's possible to inherit from the Persistent
-base class multiple times.  There used to be metaclasses involved in
-Persistent that probably made this a more interesting test.
-
->>> class A(Persistent):
-...     pass
->>> class B(Persistent):
-...     pass
->>> class C(A, B):
-...     pass
->>> class D(object):
-...     pass
->>> class E(D, B):
-...     pass
->>> a = A()
->>> b = B()
->>> c = C()
->>> d = D()
->>> e = E()
-
-Also make sure that it's possible to define Persistent classes that
-have a custom metaclass.
-
->>> class alternateMeta(type):
-...     type
->>> class alternate(object):
-...     __metaclass__ = alternateMeta
->>> class mixedMeta(alternateMeta, type):
-...     pass
->>> class mixed(alternate, Persistent):
-...     pass
->>> class mixed(Persistent, alternate):
-...     pass
-
-
-Basic type structure
---------------------
-
->>> Persistent.__dictoffset__ 
-0
->>> Persistent.__weakrefoffset__
-0
->>> Persistent.__basicsize__ > object.__basicsize__
-True
->>> P.__dictoffset__ > 0
-True
->>> P.__weakrefoffset__ > 0
-True
->>> P.__dictoffset__ < P.__weakrefoffset__
-True
->>> P.__basicsize__ > Persistent.__basicsize__
-True
-
-
-Slots
------
-
-These are some simple tests of classes that have an __slots__
-attribute.  Some of the classes should have slots, others shouldn't.
-
->>> class noDict(object):
-...     __slots__ = ['foo']
->>> class p_noDict(Persistent):
-...     __slots__ = ['foo']
->>> class p_shouldHaveDict(p_noDict):
-...     pass
-
->>> p_noDict.__dictoffset__
-0
->>> x = p_noDict()
->>> x.foo = 1
->>> x.foo
-1
->>> x.bar = 1
-Traceback (most recent call last):
-  ...
-AttributeError: 'p_noDict' object has no attribute 'bar'
->>> x._v_bar = 1
-Traceback (most recent call last):
-  ...
-AttributeError: 'p_noDict' object has no attribute '_v_bar'
->>> x.__dict__
-Traceback (most recent call last):
-  ...
-AttributeError: 'p_noDict' object has no attribute '__dict__'
-
-The various _p_ attributes are unaffected by slots.
->>> p._p_oid
->>> p._p_jar
->>> p._p_state
-0
-
-If the most-derived class does not specify 
-
->>> p_shouldHaveDict.__dictoffset__ > 0
-True
->>> x = p_shouldHaveDict()
->>> isinstance(x.__dict__, dict)
-True
-
-
-Pickling
---------
-
-There's actually a substantial effort involved in making subclasses of
-Persistent work with plain-old pickle.  The ZODB serialization layer
-never calls pickle on an object; it pickles the object's class
-description and its state as two separate pickles.
-
->>> import pickle
->>> p = P()
->>> p.inc()
->>> p2 = pickle.loads(pickle.dumps(p))
->>> p2.__class__ is P
-True
->>> p2.x == p.x
-True
-
-We should also test that pickle works with custom getstate and
-setstate.  Perhaps even reduce.  The problem is that pickling depends
-on finding the class in a particular module, and classes defined here
-won't appear in any module.  We could require each user of the tests
-to define a base class, but that might be tedious.
-
-Interfaces
-----------
-
-Some versions of Zope and ZODB have the zope.interfaces package
-available.  If it is available, then persistent will be associated
-with several interfaces.  It's hard to write a doctest test that runs
-the tests only if zope.interface is available, so this test looks a
-little unusual.  One problem is that the assert statements won't do
-anything if you run with -O.
-
->>> try:
-...     import zope.interface
-... except ImportError:
-...     pass
-... else:
-...     from persistent.interfaces import IPersistent
-...     assert IPersistent.implementedBy(Persistent)
-...     p = Persistent()
-...     assert IPersistent.providedBy(p)
-...     assert IPersistent.implementedBy(P)
-...     p = P()
-...     assert IPersistent.providedBy(p)
diff --git a/branches/bug1734/src/persistent/tests/persistenttestbase.py b/branches/bug1734/src/persistent/tests/persistenttestbase.py
deleted file mode 100644
index 0cde2ea2..00000000
--- a/branches/bug1734/src/persistent/tests/persistenttestbase.py
+++ /dev/null
@@ -1,372 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import unittest
-
-from persistent import Persistent
-from persistent.interfaces import IPersistent
-
-try:
-    import zope.interface
-except ImportError:
-    interfaces = False
-else:
-    interfaces = True
-
-class Test(unittest.TestCase):
-
-    klass = None # override in subclass
-
-    def testSaved(self):
-        p = self.klass()
-        p._p_oid = '\0\0\0\0\0\0hi'
-        dm = DM()
-        p._p_jar = dm
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(dm.called, 0)
-        p.inc()
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 1)
-        p.inc()
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 1)
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 1)
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 1)
-        del p._p_changed
-        # deal with current cPersistence implementation
-        if p._p_changed != 3:
-            self.assertEqual(p._p_changed, None)
-        self.assertEqual(dm.called, 1)
-        p.inc()
-        self.assertEqual(p.x, 43)
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 2)
-        p._p_changed = 0
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(dm.called, 2)
-        self.assertEqual(p.x, 43)
-        p.inc()
-        self.assertEqual(p._p_changed, 1)
-        self.assertEqual(dm.called, 3)
-
-    def testUnsaved(self):
-        p = self.klass()
-
-        self.assertEqual(p.x, 0)
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(p._p_jar, None)
-        self.assertEqual(p._p_oid, None)
-        p.inc()
-        p.inc()
-        self.assertEqual(p.x, 2)
-        self.assertEqual(p._p_changed, 0)
-
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, 0)
-        p._p_changed = 1
-        self.assertEqual(p._p_changed, 0)
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, 0)
-        del p._p_changed
-        self.assertEqual(p._p_changed, 0)
-        if self.has_dict:
-            self.failUnless(p.__dict__)
-        self.assertEqual(p.x, 2)
-
-    def testState(self):
-        p = self.klass()
-        self.assertEqual(p.__getstate__(), {'x': 0})
-        self.assertEqual(p._p_changed, 0)
-        p.__setstate__({'x':5})
-        self.assertEqual(p._p_changed, 0)
-        if self.has_dict:
-            p._v_foo = 2
-        self.assertEqual(p.__getstate__(), {'x': 5})
-        self.assertEqual(p._p_changed, 0)
-
-    def testSetStateSerial(self):
-        p = self.klass()
-        p._p_serial = '00000012'
-        p.__setstate__(p.__getstate__())
-        self.assertEqual(p._p_serial, '00000012')
-
-    def testDirectChanged(self):
-        p = self.klass()
-        p._p_oid = 1
-        dm = DM()
-        p._p_jar = dm
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(dm.called, 0)
-        p._p_changed = 1
-        self.assertEqual(dm.called, 1)
-
-    def testGhostChanged(self):
-        # An object is a ghost, and it's _p_changed it set to True.
-        # This assignment should have no effect.
-        p = self.klass()
-        p._p_oid = 1
-        dm = DM()
-        p._p_jar = dm
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, None)
-        p._p_changed = True
-        self.assertEqual(p._p_changed, None)
-
-    def testRegistrationFailure(self):
-        p = self.klass()
-        p._p_oid = 1
-        dm = BrokenDM()
-        p._p_jar = dm
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(dm.called, 0)
-        try:
-            p._p_changed = 1
-        except NotImplementedError:
-            pass
-        else:
-            raise AssertionError("Exception not propagated")
-        self.assertEqual(dm.called, 1)
-        self.assertEqual(p._p_changed, 0)
-
-    def testLoadFailure(self):
-        p = self.klass()
-        p._p_oid = 1
-        dm = BrokenDM()
-        p._p_jar = dm
-        p._p_deactivate()  # make it a ghost
-
-        try:
-            p._p_activate()
-        except NotImplementedError:
-            pass
-        else:
-            raise AssertionError("Exception not propagated")
-        self.assertEqual(p._p_changed, None)
-
-    def testActivate(self):
-        p = self.klass()
-        dm = DM()
-        p._p_oid = 1
-        p._p_jar = dm
-        p._p_changed = 0
-        p._p_deactivate()
-        # Unsure: does this really test the activate method?
-        p._p_activate()
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(p.x, 42)
-
-    def testDeactivate(self):
-        p = self.klass()
-        dm = DM()
-        p._p_oid = 1
-        p._p_deactivate() # this deactive has no effect
-        self.assertEqual(p._p_changed, 0)
-        p._p_jar = dm
-        p._p_changed = 0
-        p._p_deactivate()
-        self.assertEqual(p._p_changed, None)
-        p._p_activate()
-        self.assertEqual(p._p_changed, 0)
-        self.assertEqual(p.x, 42)
-
-    if interfaces:
-        def testInterface(self):
-            self.assert_(IPersistent.implementedBy(Persistent),
-                         "%s does not implement IPersistent" % Persistent)
-            p = Persistent()
-            self.assert_(IPersistent.providedBy(p),
-                         "%s does not implement IPersistent" % p)
-
-            self.assert_(IPersistent.implementedBy(P),
-                         "%s does not implement IPersistent" % P)
-            p = self.klass()
-            self.assert_(IPersistent.providedBy(p),
-                         "%s does not implement IPersistent" % p)
-
-    def testDataManagerAndAttributes(self):
-        # Test to cover an odd bug where the instance __dict__ was
-        # set at the same location as the data manager in the C type.
-        p = P()
-        p.inc()
-        p.inc()
-        self.assert_('x' in p.__dict__)
-        self.assert_(p._p_jar is None)
-
-    def testMultipleInheritance(self):
-        # make sure it is possible to inherit from two different
-        # subclasses of persistent.
-        class A(Persistent):
-            pass
-        class B(Persistent):
-            pass
-        class C(A, B):
-            pass
-        class D(object):
-            pass
-        class E(D, B):
-            pass
-
-    def testMultipleMeta(self):
-        # make sure it's possible to define persistent classes
-        # with a base whose metaclass is different
-        class alternateMeta(type):
-            pass
-        class alternate(object):
-            __metaclass__ = alternateMeta
-        class mixedMeta(alternateMeta, type):
-            pass
-        class mixed(alternate,Persistent):
-            __metaclass__ = mixedMeta
-
-    def testSlots(self):
-        # Verify that Persistent classes behave the same way
-        # as pure Python objects where '__slots__' and '__dict__'
-        # are concerned.
-
-        class noDict(object):
-            __slots__ = ['foo']
-
-        class shouldHaveDict(noDict):
-            pass
-
-        class p_noDict(Persistent):
-            __slots__ = ['foo']
-
-        class p_shouldHaveDict(p_noDict):
-            pass
-
-        self.assertEqual(noDict.__dictoffset__, 0)
-        self.assertEqual(p_noDict.__dictoffset__, 0)
-
-        self.assert_(shouldHaveDict.__dictoffset__ <> 0)
-        self.assert_(p_shouldHaveDict.__dictoffset__ <> 0)
-
-    def testBasicTypeStructure(self):
-        # test that a persistent class has a sane C type structure
-        # use P (defined below) as simplest example
-        self.assertEqual(Persistent.__dictoffset__, 0)
-        self.assertEqual(Persistent.__weakrefoffset__, 0)
-        self.assert_(Persistent.__basicsize__ > object.__basicsize__)
-        self.assert_(P.__dictoffset__)
-        self.assert_(P.__weakrefoffset__)
-        self.assert_(P.__dictoffset__ < P.__weakrefoffset__)
-        self.assert_(P.__basicsize__ > Persistent.__basicsize__)
-
-# Unsure:  Can anyone defend/explain the test below? The tests classes defined
-# here don't define __call__, so this weird test will always pass, but to what
-# end?  If a klass is given that happens to define __call__, the test *may*
-# mysteriously fail. Who cares?
-
-##     def testDeactivateErrors(self):
-##         p = self.klass()
-##         p._p_oid = '\0\0\0\0\0\0hi'
-##         dm = DM()
-##         p._p_jar = dm
-
-##         def typeerr(*args, **kwargs):
-##             self.assertRaises(TypeError, p, *args, **kwargs)
-
-##         typeerr(1)
-##         typeerr(1, 2)
-##         typeerr(spam=1)
-##         typeerr(spam=1, force=1)
-
-##         p._p_changed = True
-##         class Err(object):
-##             def __nonzero__(self):
-##                 raise RuntimeError
-
-##         typeerr(force=Err())
-
-class P(Persistent):
-    def __init__(self):
-        self.x = 0
-    def inc(self):
-        self.x += 1
-
-class P2(P):
-    def __getstate__(self):
-        return 42
-    def __setstate__(self, v):
-        self.v = v
-
-class B(Persistent):
-
-    __slots__ = ["x", "_p_serial"]
-
-    def __init__(self):
-        self.x = 0
-
-    def inc(self):
-        self.x += 1
-
-    def __getstate__(self):
-        return {'x': self.x}
-
-    def __setstate__(self, state):
-        self.x = state['x']
-
-class DM:
-    def __init__(self):
-        self.called = 0
-    def register(self, ob):
-        self.called += 1
-    def setstate(self, ob):
-        ob.__setstate__({'x': 42})
-
-class BrokenDM(DM):
-
-    def register(self,ob):
-        self.called += 1
-        raise NotImplementedError
-
-    def setstate(self,ob):
-        raise NotImplementedError
-
-class PersistentTest(Test):
-    klass = P
-    has_dict = 1
-
-    def testPicklable(self):
-        import pickle
-
-        p = self.klass()
-        p.inc()
-        p2 = pickle.loads(pickle.dumps(p))
-        self.assertEqual(p2.__class__, self.klass)
-
-        # verify that the inc is reflected:
-        self.assertEqual(p2.x, p.x)
-
-        # This assertion would be invalid.  Interfaces
-        # are compared by identity and copying doesn't
-        # preserve identity. We would get false negatives due
-        # to the differing identities of the original and copied
-        # PersistentInterface:
-        # self.assertEqual(p2.__dict__, p.__dict__)
-
-    def testPicklableWCustomState(self):
-        import pickle
-
-        p = P2()
-        p2 = pickle.loads(pickle.dumps(p))
-        self.assertEqual(p2.__class__, P2);
-        self.assertEqual(p2.__dict__, {'v': 42})
-
-class BasePersistentTest(Test):
-    klass = B
-    has_dict = 0
diff --git a/branches/bug1734/src/persistent/tests/testPersistent.py b/branches/bug1734/src/persistent/tests/testPersistent.py
deleted file mode 100644
index 89269444..00000000
--- a/branches/bug1734/src/persistent/tests/testPersistent.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-import pickle
-import time
-import unittest
-
-from persistent import Persistent, GHOST, UPTODATE, CHANGED, STICKY
-from persistent.cPickleCache import PickleCache
-from persistent.TimeStamp import TimeStamp
-from ZODB.utils import p64
-
-class Jar(object):
-    """Testing stub for _p_jar attribute."""
-
-    def __init__(self):
-        self.cache = PickleCache(self)
-        self.oid = 1
-        self.registered = {}
-
-    def add(self, obj):
-        obj._p_oid = p64(self.oid)
-        self.oid += 1
-        obj._p_jar = self
-        self.cache[obj._p_oid] = obj
-
-    def close(self):
-        pass
-
-    # the following methods must be implemented to be a jar
-
-    def setklassstate(self):
-        # I don't know what this method does, but the pickle cache
-        # constructor calls it.
-        pass
-
-    def register(self, obj):
-        self.registered[obj] = 1
-
-    def setstate(self, obj):
-        # Trivial setstate() implementation that just re-initializes
-        # the object.  This isn't what setstate() is supposed to do,
-        # but it suffices for the tests.
-        obj.__class__.__init__(obj)
-
-class P(Persistent):
-    pass
-
-class H1(Persistent):
-
-    def __init__(self):
-        self.n = 0
-
-    def __getattr__(self, attr):
-        self.n += 1
-        return self.n
-
-class H2(Persistent):
-
-    def __init__(self):
-        self.n = 0
-
-    def __getattribute__(self, attr):
-        supergetattr = super(H2, self).__getattribute__
-        try:
-            return supergetattr(attr)
-        except AttributeError:
-            n = supergetattr("n")
-            self.n = n + 1
-            return n + 1
-
-class PersistenceTest(unittest.TestCase):
-
-    def setUp(self):
-        self.jar = Jar()
-
-    def tearDown(self):
-        self.jar.close()
-
-    def testOidAndJarAttrs(self):
-        obj = P()
-        self.assertEqual(obj._p_oid, None)
-        obj._p_oid = 12
-        self.assertEqual(obj._p_oid, 12)
-        del obj._p_oid
-
-        self.jar.add(obj)
-
-        # Can't change oid of cache object.
-        def deloid():
-            del obj._p_oid
-        self.assertRaises(ValueError, deloid)
-        def setoid():
-            obj._p_oid = 12
-        self.assertRaises(ValueError, setoid)
-
-        def deloid():
-            del obj._p_jar
-        self.assertRaises(ValueError, deloid)
-        def setoid():
-            obj._p_jar = 12
-        self.assertRaises(ValueError, setoid)
-
-    def testChangedAndState(self):
-        obj = P()
-        self.jar.add(obj)
-
-        # The value returned for _p_changed can be one of:
-        # 0 -- it is not changed
-        # 1 -- it is changed
-        # None -- it is a ghost
-
-        obj.x = 1
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj._p_state, CHANGED)
-        self.assert_(obj in self.jar.registered)
-
-        obj._p_changed = 0
-        self.assertEqual(obj._p_changed, 0)
-        self.assertEqual(obj._p_state, UPTODATE)
-        self.jar.registered.clear()
-
-        obj._p_changed = 1
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj._p_state, CHANGED)
-        self.assert_(obj in self.jar.registered)
-
-        # setting obj._p_changed to None ghostifies if the
-        # object is in the up-to-date state, but not otherwise.
-        obj._p_changed = None
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj._p_state, CHANGED)
-        obj._p_changed = 0
-        # Now it's a ghost.
-        obj._p_changed = None
-        self.assertEqual(obj._p_changed, None)
-        self.assertEqual(obj._p_state, GHOST)
-
-        obj = P()
-        self.jar.add(obj)
-        obj._p_changed = 1
-        # You can transition directly from modified to ghost if
-        # you delete the _p_changed attribute.
-        del obj._p_changed
-        self.assertEqual(obj._p_changed, None)
-        self.assertEqual(obj._p_state, GHOST)
-
-    def testStateReadonly(self):
-        # make sure we can't write to _p_state; we don't want yet
-        # another way to change state!
-        obj = P()
-        def setstate(value):
-            obj._p_state = value
-        self.assertRaises(TypeError, setstate, GHOST)
-        self.assertRaises(TypeError, setstate, UPTODATE)
-        self.assertRaises(TypeError, setstate, CHANGED)
-        self.assertRaises(TypeError, setstate, STICKY)
-
-    def testInvalidate(self):
-        obj = P()
-        self.jar.add(obj)
-
-        self.assertEqual(obj._p_changed, 0)
-        self.assertEqual(obj._p_state, UPTODATE)
-        obj._p_invalidate()
-        self.assertEqual(obj._p_changed, None)
-        self.assertEqual(obj._p_state, GHOST)
-
-        obj._p_activate()
-        obj.x = 1
-        obj._p_invalidate()
-        self.assertEqual(obj._p_changed, None)
-        self.assertEqual(obj._p_state, GHOST)
-
-    def testSerial(self):
-        noserial = "\000" * 8
-        obj = P()
-        self.assertEqual(obj._p_serial, noserial)
-
-        def set(val):
-            obj._p_serial = val
-        self.assertRaises(ValueError, set, 1)
-        self.assertRaises(ValueError, set, "0123")
-        self.assertRaises(ValueError, set, "012345678")
-        self.assertRaises(ValueError, set, u"01234567")
-
-        obj._p_serial = "01234567"
-        del obj._p_serial
-        self.assertEqual(obj._p_serial, noserial)
-
-    def testMTime(self):
-        obj = P()
-        self.assertEqual(obj._p_mtime, None)
-
-        t = int(time.time())
-        ts = TimeStamp(*time.gmtime(t)[:6])
-        obj._p_serial = repr(ts)
-        self.assertEqual(obj._p_mtime, t)
-        self.assert_(isinstance(obj._p_mtime, float))
-
-    def testPicklable(self):
-        obj = P()
-        obj.attr = "test"
-        s = pickle.dumps(obj)
-        obj2 = pickle.loads(s)
-        self.assertEqual(obj.attr, obj2.attr)
-
-    def testGetattr(self):
-        obj = H1()
-        self.assertEqual(obj.larry, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-        self.jar.add(obj)
-        obj._p_deactivate()
-
-        # The simple Jar used for testing re-initializes the object.
-        self.assertEqual(obj.larry, 1)
-        # The getattr hook modified the object, so it should now be
-        # in the changed state.
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj._p_state, CHANGED)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-    def testGetattribute(self):
-        obj = H2()
-        self.assertEqual(obj.larry, 1)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-        self.jar.add(obj)
-        obj._p_deactivate()
-
-        # The simple Jar used for testing re-initializes the object.
-        self.assertEqual(obj.larry, 1)
-        # The getattr hook modified the object, so it should now be
-        # in the changed state.
-        self.assertEqual(obj._p_changed, 1)
-        self.assertEqual(obj._p_state, CHANGED)
-        self.assertEqual(obj.curly, 2)
-        self.assertEqual(obj.moe, 3)
-
-    # TODO:  Need to decide how __setattr__ and __delattr__ should work,
-    # then write tests.
-
-
-def test_suite():
-    return unittest.makeSuite(PersistenceTest)
diff --git a/branches/bug1734/src/persistent/tests/test_PickleCache.py b/branches/bug1734/src/persistent/tests/test_PickleCache.py
deleted file mode 100644
index 20b64a36..00000000
--- a/branches/bug1734/src/persistent/tests/test_PickleCache.py
+++ /dev/null
@@ -1,52 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Unit tests for PickleCache
-
-$Id$
-"""
-
-class DummyConnection:
-
-    def setklassstate(self, obj):
-        """Method used by PickleCache."""
-
-
-def test_delitem():
-    """
-    >>> from persistent import PickleCache
-    >>> conn = DummyConnection()
-    >>> cache = PickleCache(conn)
-    >>> del cache['']
-    Traceback (most recent call last):
-    ...
-    KeyError: ''
-    >>> from persistent import Persistent
-    >>> p = Persistent()
-    >>> p._p_oid = 'foo'
-    >>> p._p_jar = conn
-    >>> cache['foo'] = p
-    >>> del cache['foo']
-
-    """
-
-from doctest import DocTestSuite
-import unittest
-
-def test_suite():
-    return unittest.TestSuite((
-        DocTestSuite(),
-        ))
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/persistent/tests/test_list.py b/branches/bug1734/src/persistent/tests/test_list.py
deleted file mode 100644
index 30f595ae..00000000
--- a/branches/bug1734/src/persistent/tests/test_list.py
+++ /dev/null
@@ -1,229 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the list interface to PersistentList
-"""
-
-import unittest
-from persistent.list import PersistentList
-
-l0 = []
-l1 = [0]
-l2 = [0, 1]
-
-class TestPList(unittest.TestCase):
-
-    def testTheWorld(self):
-        # Test constructors
-        u = PersistentList()
-        u0 = PersistentList(l0)
-        u1 = PersistentList(l1)
-        u2 = PersistentList(l2)
-
-        uu = PersistentList(u)
-        uu0 = PersistentList(u0)
-        uu1 = PersistentList(u1)
-        uu2 = PersistentList(u2)
-
-        v = PersistentList(tuple(u))
-        class OtherList:
-            def __init__(self, initlist):
-                self.__data = initlist
-            def __len__(self):
-                return len(self.__data)
-            def __getitem__(self, i):
-                return self.__data[i]
-        v0 = PersistentList(OtherList(u0))
-        vv = PersistentList("this is also a sequence")
-
-        # Test __repr__
-        eq = self.assertEqual
-
-        eq(str(u0), str(l0), "str(u0) == str(l0)")
-        eq(repr(u1), repr(l1), "repr(u1) == repr(l1)")
-        eq(`u2`, `l2`, "`u2` == `l2`")
-
-        # Test __cmp__ and __len__
-
-        def mycmp(a, b):
-            r = cmp(a, b)
-            if r < 0: return -1
-            if r > 0: return 1
-            return r
-
-        all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
-        for a in all:
-            for b in all:
-                eq(mycmp(a, b), mycmp(len(a), len(b)),
-                      "mycmp(a, b) == mycmp(len(a), len(b))")
-
-        # Test __getitem__
-
-        for i in range(len(u2)):
-            eq(u2[i], i, "u2[i] == i")
-
-        # Test __setitem__
-
-        uu2[0] = 0
-        uu2[1] = 100
-        try:
-            uu2[2] = 200
-        except IndexError:
-            pass
-        else:
-            raise TestFailed("uu2[2] shouldn't be assignable")
-
-        # Test __delitem__
-
-        del uu2[1]
-        del uu2[0]
-        try:
-            del uu2[0]
-        except IndexError:
-            pass
-        else:
-            raise TestFailed("uu2[0] shouldn't be deletable")
-
-        # Test __getslice__
-
-        for i in range(-3, 4):
-            eq(u2[:i], l2[:i], "u2[:i] == l2[:i]")
-            eq(u2[i:], l2[i:], "u2[i:] == l2[i:]")
-            for j in range(-3, 4):
-                eq(u2[i:j], l2[i:j], "u2[i:j] == l2[i:j]")
-
-        # Test __setslice__
-
-        for i in range(-3, 4):
-            u2[:i] = l2[:i]
-            eq(u2, l2, "u2 == l2")
-            u2[i:] = l2[i:]
-            eq(u2, l2, "u2 == l2")
-            for j in range(-3, 4):
-                u2[i:j] = l2[i:j]
-                eq(u2, l2, "u2 == l2")
-
-        uu2 = u2[:]
-        uu2[:0] = [-2, -1]
-        eq(uu2, [-2, -1, 0, 1], "uu2 == [-2, -1, 0, 1]")
-        uu2[0:] = []
-        eq(uu2, [], "uu2 == []")
-
-        # Test __contains__
-        for i in u2:
-            self.failUnless(i in u2, "i in u2")
-        for i in min(u2)-1, max(u2)+1:
-            self.failUnless(i not in u2, "i not in u2")
-
-        # Test __delslice__
-
-        uu2 = u2[:]
-        del uu2[1:2]
-        del uu2[0:1]
-        eq(uu2, [], "uu2 == []")
-
-        uu2 = u2[:]
-        del uu2[1:]
-        del uu2[:1]
-        eq(uu2, [], "uu2 == []")
-
-        # Test __add__, __radd__, __mul__ and __rmul__
-
-        #self.failUnless(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
-        self.failUnless(u1 + [1] == u2, "u1 + [1] == u2")
-        #self.failUnless([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
-        self.failUnless(u2 == u2*1 == 1*u2, "u2 == u2*1 == 1*u2")
-        self.failUnless(u2+u2 == u2*2 == 2*u2, "u2+u2 == u2*2 == 2*u2")
-        self.failUnless(u2+u2+u2 == u2*3 == 3*u2, "u2+u2+u2 == u2*3 == 3*u2")
-
-        # Test append
-
-        u = u1[:]
-        u.append(1)
-        eq(u, u2, "u == u2")
-
-        # Test insert
-
-        u = u2[:]
-        u.insert(0, -1)
-        eq(u, [-1, 0, 1], "u == [-1, 0, 1]")
-
-        # Test pop
-
-        u = PersistentList([0, -1, 1])
-        u.pop()
-        eq(u, [0, -1], "u == [0, -1]")
-        u.pop(0)
-        eq(u, [-1], "u == [-1]")
-
-        # Test remove
-
-        u = u2[:]
-        u.remove(1)
-        eq(u, u1, "u == u1")
-
-        # Test count
-        u = u2*3
-        eq(u.count(0), 3, "u.count(0) == 3")
-        eq(u.count(1), 3, "u.count(1) == 3")
-        eq(u.count(2), 0, "u.count(2) == 0")
-
-
-        # Test index
-
-        eq(u2.index(0), 0, "u2.index(0) == 0")
-        eq(u2.index(1), 1, "u2.index(1) == 1")
-        try:
-            u2.index(2)
-        except ValueError:
-            pass
-        else:
-            raise TestFailed("expected ValueError")
-
-        # Test reverse
-
-        u = u2[:]
-        u.reverse()
-        eq(u, [1, 0], "u == [1, 0]")
-        u.reverse()
-        eq(u, u2, "u == u2")
-
-        # Test sort
-
-        u = PersistentList([1, 0])
-        u.sort()
-        eq(u, u2, "u == u2")
-
-        # Test extend
-
-        u = u1[:]
-        u.extend(u2)
-        eq(u, u1 + u2, "u == u1 + u2")
-
-        # Test iadd
-        u = u1[:]
-        u += u2
-        eq(u, u1 + u2, "u == u1 + u2")
-
-        # Test imul
-        u = u1[:]
-        u *= 3
-        eq(u, u1 + u1 + u1, "u == u1 + u1 + u1")
-
-
-def test_suite():
-    return unittest.makeSuite(TestPList)
-
-if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    unittest.main(testLoader=loader)
diff --git a/branches/bug1734/src/persistent/tests/test_overriding_attrs.py b/branches/bug1734/src/persistent/tests/test_overriding_attrs.py
deleted file mode 100644
index de3eb7ee..00000000
--- a/branches/bug1734/src/persistent/tests/test_overriding_attrs.py
+++ /dev/null
@@ -1,402 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Overriding attr methods
-
-This module tests and documents, through example, overriding attribute
-access methods.
-
-$Id$
-"""
-
-from persistent import Persistent
-import transaction
-from ZODB.tests.util import DB
-
-class SampleOverridingGetattr(Persistent):
-    """Example of overriding __getattr__
-    """
-
-    def __getattr__(self, name):
-        """Get attributes that can't be gotten the usual way
-
-        The __getattr__ method works pretty much the same for persistent
-        classes as it does for other classes.  No special handling is
-        needed.  If an object is a ghost, then it will be activated before
-        __getattr__ is called.
-
-        In this example, our objects returns a tuple with the attribute
-        name, converted to upper case and the value of _p_changed, for any
-        attribute that isn't handled by the default machinery.
-
-        >>> o = SampleOverridingGetattr()
-        >>> o._p_changed
-        False
-        >>> o._p_oid
-        >>> o._p_jar
-        >>> o.spam
-        ('SPAM', False)
-        >>> o.spam = 1
-        >>> o.spam
-        1
-
-        We'll save the object, so it can be deactivated:
-
-        >>> db = DB()
-        >>> conn = db.open()
-        >>> conn.root()['o'] = o
-        >>> transaction.commit()
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        And now, if we ask for an attribute it doesn't have,
-
-        >>> o.eggs
-        ('EGGS', False)
-
-        And we see that the object was activated before calling the
-        __getattr__ method.
-
-        We always close databases after we use them:
-
-        >>> db.close()
-        """
-        # Don't pretend we have any special attributes.
-        if name.startswith("__") and name.endswrith("__"):
-            raise AttributeError, name
-        else:
-            return name.upper(), self._p_changed
-
-class SampleOverridingGetattributeSetattrAndDelattr(Persistent):
-    """Example of overriding __getattribute__, __setattr__, and __delattr__
-
-    In this example, we'll provide an example that shows how to
-    override the __getattribute__, __setattr__, and __delattr__
-    methods.  We'll create a class that stores it's attributes in a
-    secret dictionary within it's instance dictionary.
-
-    The class will have the policy that variables with names starting
-    with 'tmp_' will be volatile.
-
-    """
-
-    def __init__(self, **kw):
-        self.__dict__['__secret__'] = kw.copy()
-
-    def __getattribute__(self, name):
-        """Get an attribute value
-
-        The __getattribute__ method is called for all attribute
-        accesses.  It overrides the attribute access support inherited
-        from Persistent.
-
-        Our sample class let's us provide initial values as keyword
-        arguments to the constructor:
-
-        >>> o = SampleOverridingGetattributeSetattrAndDelattr(x=1)
-        >>> o._p_changed
-        0
-        >>> o._p_oid
-        >>> o._p_jar
-        >>> o.x
-        1
-        >>> o.y
-        Traceback (most recent call last):
-        ...
-        AttributeError: y
-
-        Next, we'll save the object in a database so that we can
-        deactivate it:
-
-        >>> db = DB()
-        >>> conn = db.open()
-        >>> conn.root()['o'] = o
-        >>> transaction.commit()
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        And we'll get some data:
-
-        >>> o.x
-        1
-
-        which activates the object:
-
-        >>> o._p_changed
-        0
-
-        It works for missing attribes too:
-
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        >>> o.y
-        Traceback (most recent call last):
-        ...
-        AttributeError: y
-
-        >>> o._p_changed
-        0
-
-        See the very important note in the comment below!
-
-        We always close databases after we use them:
-
-        >>> db.close()
-        """
-
-        #################################################################
-        # IMPORTANT! READ THIS! 8->
-        #
-        # We *always* give Persistent a chance first.
-        # Persistent handles certain special attributes, like _p_
-        # attributes. In particular, the base class handles __dict__
-        # and __class__.
-        #
-        # We call _p_getattr. If it returns True, then we have to
-        # use Persistent.__getattribute__ to get the value.
-        #
-        #################################################################
-        if Persistent._p_getattr(self, name):
-            return Persistent.__getattribute__(self, name)
-
-        # Data should be in our secret dictionary:
-        secret = self.__dict__['__secret__']
-        if name in secret:
-            return secret[name]
-
-        # Maybe it's a method:
-        meth = getattr(self.__class__, name, None)
-        if meth is None:
-            raise AttributeError, name
-
-        return meth.__get__(self, self.__class__)
-
-
-    def __setattr__(self, name, value):
-        """Set an attribute value
-
-        The __setattr__ method is called for all attribute
-        assignments.  It overrides the attribute assignment support
-        inherited from Persistent.
-
-        Implementors of __setattr__ methods:
-
-        1. Must call Persistent._p_setattr first to allow it
-           to handle some attributes and to make sure that the object
-           is activated if necessary, and
-
-        2. Must set _p_changed to mark objects as changed.
-
-        See the comments in the source below.
-
-        >>> o = SampleOverridingGetattributeSetattrAndDelattr()
-        >>> o._p_changed
-        0
-        >>> o._p_oid
-        >>> o._p_jar
-        >>> o.x
-        Traceback (most recent call last):
-        ...
-        AttributeError: x
-
-        >>> o.x = 1
-        >>> o.x
-        1
-
-        Because the implementation doesn't store attributes directly
-        in the instance dictionary, we don't have a key for the attribute:
-
-        >>> 'x' in o.__dict__
-        False
-
-        Next, we'll save the object in a database so that we can
-        deactivate it:
-
-        >>> db = DB()
-        >>> conn = db.open()
-        >>> conn.root()['o'] = o
-        >>> transaction.commit()
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        We'll modify an attribute
-
-        >>> o.y = 2
-        >>> o.y
-        2
-
-        which reactivates it, and markes it as modified, because our
-        implementation marked it as modified:
-
-        >>> o._p_changed
-        1
-
-        Now, if commit:
-
-        >>> transaction.commit()
-        >>> o._p_changed
-        0
-
-        And deactivate the object:
-
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        and then set a variable with a name starting with 'tmp_',
-        The object will be activated, but not marked as modified,
-        because our __setattr__ implementation  doesn't mark the
-        object as changed if the name starts with 'tmp_':
-
-        >>> o.tmp_foo = 3
-        >>> o._p_changed
-        0
-        >>> o.tmp_foo
-        3
-
-        We always close databases after we use them:
-
-        >>> db.close()
-
-        """
-
-        #################################################################
-        # IMPORTANT! READ THIS! 8->
-        #
-        # We *always* give Persistent a chance first.
-        # Persistent handles certain special attributes, like _p_
-        # attributes.
-        #
-        # We call _p_setattr. If it returns True, then we are done.
-        # It has already set the attribute.
-        #
-        #################################################################
-        if Persistent._p_setattr(self, name, value):
-            return
-
-        self.__dict__['__secret__'][name] = value
-
-        if not name.startswith('tmp_'):
-            self._p_changed = 1
-
-    def __delattr__(self, name):
-        """Delete an attribute value
-
-        The __delattr__ method is called for all attribute
-        deletions.  It overrides the attribute deletion support
-        inherited from Persistent.
-
-        Implementors of __delattr__ methods:
-
-        1. Must call Persistent._p_delattr first to allow it
-           to handle some attributes and to make sure that the object
-           is activated if necessary, and
-
-        2. Must set _p_changed to mark objects as changed.
-
-        See the comments in the source below.
-
-        >>> o = SampleOverridingGetattributeSetattrAndDelattr(
-        ...         x=1, y=2, tmp_z=3)
-        >>> o._p_changed
-        0
-        >>> o._p_oid
-        >>> o._p_jar
-        >>> o.x
-        1
-        >>> del o.x
-        >>> o.x
-        Traceback (most recent call last):
-        ...
-        AttributeError: x
-
-        Next, we'll save the object in a database so that we can
-        deactivate it:
-
-        >>> db = DB()
-        >>> conn = db.open()
-        >>> conn.root()['o'] = o
-        >>> transaction.commit()
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        If we delete an attribute:
-
-        >>> del o.y
-
-        The object is activated.  It is also marked as changed because
-        our implementation marked it as changed.
-
-        >>> o._p_changed
-        1
-        >>> o.y
-        Traceback (most recent call last):
-        ...
-        AttributeError: y
-
-        >>> o.tmp_z
-        3
-
-        Now, if commit:
-
-        >>> transaction.commit()
-        >>> o._p_changed
-        0
-
-        And deactivate the object:
-
-        >>> o._p_deactivate()
-        >>> o._p_changed
-
-        and then delete a variable with a name starting with 'tmp_',
-        The object will be activated, but not marked as modified,
-        because our __delattr__ implementation  doesn't mark the
-        object as changed if the name starts with 'tmp_':
-
-        >>> del o.tmp_z
-        >>> o._p_changed
-        0
-        >>> o.tmp_z
-        Traceback (most recent call last):
-        ...
-        AttributeError: tmp_z
-
-        We always close databases after we use them:
-
-        >>> db.close()
-
-        """
-
-        #################################################################
-        # IMPORTANT! READ THIS! 8->
-        #
-        # We *always* give Persistent a chance first.
-        # Persistent handles certain special attributes, like _p_
-        # attributes.
-        #
-        # We call _p_delattr. If it returns True, then we are done.
-        # It has already deleted the attribute.
-        #
-        #################################################################
-        if Persistent._p_delattr(self, name):
-            return
-
-        del self.__dict__['__secret__'][name]
-
-        if not name.startswith('tmp_'):
-            self._p_changed = 1
-
-
-def test_suite():
-    from doctest import DocTestSuite
-    return DocTestSuite()
diff --git a/branches/bug1734/src/persistent/tests/test_persistent.py b/branches/bug1734/src/persistent/tests/test_persistent.py
deleted file mode 100644
index d5ef79cb..00000000
--- a/branches/bug1734/src/persistent/tests/test_persistent.py
+++ /dev/null
@@ -1,26 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-from persistent import Persistent
-
-from zope.testing.doctestunit import DocFileSuite
-
-class P(Persistent):
-    def __init__(self):
-        self.x = 0
-    def inc(self):
-        self.x += 1
-
-def test_suite():
-    return DocFileSuite("persistent.txt", globs={"P": P})
diff --git a/branches/bug1734/src/persistent/tests/test_pickle.py b/branches/bug1734/src/persistent/tests/test_pickle.py
deleted file mode 100644
index f54dd306..00000000
--- a/branches/bug1734/src/persistent/tests/test_pickle.py
+++ /dev/null
@@ -1,280 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Basic pickling tests
-
-$Id$
-"""
-
-from persistent import Persistent
-import pickle
-
-
-def print_dict(d):
-    d = d.items()
-    d.sort()
-    print '{%s}' % (', '.join(
-        [('%r: %r' % (k, v)) for (k, v) in d]
-        ))
-
-def cmpattrs(self, other, *attrs):
-    for attr in attrs:
-        if attr[:3] in ('_v_', '_p_'):
-            continue
-        c = cmp(getattr(self, attr, None), getattr(other, attr, None))
-        if c:
-            return c
-    return 0
-
-class Simple(Persistent):
-    def __init__(self, name, **kw):
-        self.__name__ = name
-        self.__dict__.update(kw)
-        self._v_favorite_color = 'blue'
-        self._p_foo = 'bar'
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other, '__class__', *(self.__dict__.keys()))
-
-def test_basic_pickling():
-    """
-    >>> x = Simple('x', aaa=1, bbb='foo')
-
-    >>> print_dict(x.__getstate__())
-    {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'}
-
-    >>> f, (c,), state = x.__reduce__()
-    >>> f.__name__
-    '__newobj__'
-    >>> f.__module__
-    'copy_reg'
-    >>> c.__name__
-    'Simple'
-
-    >>> print_dict(state)
-    {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.__setstate__({'z': 1})
-    >>> x.__dict__
-    {'z': 1}
-
-    """
-
-class Custom(Simple):
-
-    def __new__(cls, x, y):
-        r = Persistent.__new__(cls)
-        r.x, r.y = x, y
-        return r
-
-    def __init__(self, x, y):
-        self.a = 42
-
-    def __getnewargs__(self):
-        return self.x, self.y
-
-    def __getstate__(self):
-        return self.a
-
-    def __setstate__(self, a):
-        self.a = a
-
-
-def test_pickling_w_overrides():
-    """
-    >>> x = Custom('x', 'y')
-    >>> x.a = 99
-
-    >>> (f, (c, ax, ay), a) = x.__reduce__()
-    >>> f.__name__
-    '__newobj__'
-    >>> f.__module__
-    'copy_reg'
-    >>> c.__name__
-    'Custom'
-    >>> ax, ay, a
-    ('x', 'y', 99)
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-class Slotted(Persistent):
-    __slots__ = 's1', 's2', '_p_splat', '_v_eek'
-    def __init__(self, s1, s2):
-        self.s1, self.s2 = s1, s2
-        self._v_eek = 1
-        self._p_splat = 2
-
-class SubSlotted(Slotted):
-    __slots__ = 's3', 's4'
-    def __init__(self, s1, s2, s3):
-        Slotted.__init__(self, s1, s2)
-        self.s3 = s3
-
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other, '__class__', 's1', 's2', 's3', 's4')
-
-
-def test_pickling_w_slots_only():
-    """
-    >>> x = SubSlotted('x', 'y', 'z')
-
-    >>> d, s = x.__getstate__()
-    >>> d
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> d
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-class SubSubSlotted(SubSlotted):
-
-    def __init__(self, s1, s2, s3, **kw):
-        SubSlotted.__init__(self, s1, s2, s3)
-        self.__dict__.update(kw)
-        self._v_favorite_color = 'blue'
-        self._p_foo = 'bar'
-
-    def __cmp__(self, other):
-        return cmpattrs(self, other,
-                        '__class__', 's1', 's2', 's3', 's4',
-                        *(self.__dict__.keys()))
-
-def test_pickling_w_slots():
-    """
-    >>> x = SubSubSlotted('x', 'y', 'z', aaa=1, bbb='foo')
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {'aaa': 1, 'bbb': 'foo'}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {'aaa': 1, 'bbb': 'foo'}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-def test_pickling_w_slots_w_empty_dict():
-    """
-    >>> x = SubSubSlotted('x', 'y', 'z')
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    >>> x.s4 = 'spam'
-
-    >>> d, s = x.__getstate__()
-    >>> print_dict(d)
-    {}
-    >>> print_dict(s)
-    {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'}
-
-    >>> pickle.loads(pickle.dumps(x)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 0)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 1)) == x
-    1
-    >>> pickle.loads(pickle.dumps(x, 2)) == x
-    1
-
-    """
-
-from doctest import DocTestSuite
-import unittest
-
-def test_suite():
-    return unittest.TestSuite((
-        DocTestSuite(),
-        ))
-
-if __name__ == '__main__': unittest.main()
diff --git a/branches/bug1734/src/persistent/tests/test_wref.py b/branches/bug1734/src/persistent/tests/test_wref.py
deleted file mode 100644
index ffe9bc07..00000000
--- a/branches/bug1734/src/persistent/tests/test_wref.py
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""
-$Id$
-"""
-import unittest
-from doctest import DocTestSuite
-
-def test_suite():
-    return DocTestSuite('persistent.wref')
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/persistent/wref.py b/branches/bug1734/src/persistent/wref.py
deleted file mode 100644
index 531edd3b..00000000
--- a/branches/bug1734/src/persistent/wref.py
+++ /dev/null
@@ -1,300 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""ZODB-based persistent weakrefs
-
-$Id$
-"""
-
-from persistent import Persistent
-
-WeakRefMarker = object()
-
-class WeakRef(object):
-    """Persistent weak references
-
-    Persistent weak references are used much like Python weak
-    references.  The major difference is that you can't specify an
-    object to be called when the object is removed from the database.
-
-    Here's an example. We'll start by creating a persistent object and
-    a refernce to it:
-
-    >>> import persistent.list
-    >>> import ZODB.tests.util
-    >>> ob = persistent.list.PersistentList()
-    >>> ref = WeakRef(ob)
-    >>> ref() is ob
-    True
-
-    The hash of the ref if the same as the hash of the referenced object:
-
-    >>> hash(ref) == hash(ob)
-    True
-
-    Two refs to the same object are equal:
-
-    >>> WeakRef(ob) == ref
-    True
-
-    >>> ob2 = persistent.list.PersistentList([1])
-    >>> WeakRef(ob2) == ref
-    False
-
-    Lets save the reference and the referenced object in a database:
-
-    >>> db = ZODB.tests.util.DB()
-
-    >>> conn1 = db.open()
-    >>> conn1.root()['ob'] = ob
-    >>> conn1.root()['ref'] = ref
-    >>> ZODB.tests.util.commit()
-
-    If we open a new connection, we can use the reference:
-
-    >>> conn2 = db.open()
-    >>> conn2.root()['ref']() is conn2.root()['ob']
-    True
-    >>> hash(conn2.root()['ref']) == hash(conn2.root()['ob'])
-    True
-
-    But if we delete the referenced object and pack:
-
-    >>> del conn2.root()['ob']
-    >>> ZODB.tests.util.commit()
-    >>> ZODB.tests.util.pack(db)
-
-    And then look in a new connection:
-
-    >>> conn3 = db.open()
-    >>> conn3.root()['ob']
-    Traceback (most recent call last):
-    ...
-    KeyError: 'ob'
-
-    Trying to dereference the reference returns None:
-
-    >>> conn3.root()['ref']()
-
-    Trying to get a hash, raises a type error:
-
-    >>> hash(conn3.root()['ref'])
-    Traceback (most recent call last):
-    ...
-    TypeError: Weakly-referenced object has gone away
-
-    Always explicitly close databases: :)
-
-    >>> db.close()
-
-    """
-
-    # We set _p_oid to a marker so that the serialization system can
-    # provide special handling of weakrefs.
-    _p_oid = WeakRefMarker
-
-    def __init__(self, ob):
-        self._v_ob = ob
-        self.oid = ob._p_oid
-        self.dm = ob._p_jar
-
-    def __call__(self):
-        try:
-            return self._v_ob
-        except AttributeError:
-            try:
-                self._v_ob = self.dm[self.oid]
-            except KeyError:
-                return None
-            return self._v_ob
-
-    def __hash__(self):
-        self = self()
-        if self is None:
-            raise TypeError('Weakly-referenced object has gone away')
-        return hash(self)
-
-    def __eq__(self, other):
-        self = self()
-        if self is None:
-            raise TypeError('Weakly-referenced object has gone away')
-        other = other()
-        if other is None:
-            raise TypeError('Weakly-referenced object has gone away')
-
-        return self == other
-
-
-class PersistentWeakKeyDictionary(Persistent):
-    """Persistent weak key dictionary
-
-    This is akin to WeakKeyDictionaries. Note, however, that removal
-    of items is extremely lazy. See below.
-
-    We'll start by creating a PersistentWeakKeyDictionary and adding
-    some persistent objects to it.
-
-    >>> d = PersistentWeakKeyDictionary()
-    >>> import ZODB.tests.util
-    >>> p1 = ZODB.tests.util.P('p1')
-    >>> p2 = ZODB.tests.util.P('p2')
-    >>> p3 = ZODB.tests.util.P('p3')
-    >>> d[p1] = 1
-    >>> d[p2] = 2
-    >>> d[p3] = 3
-
-    We'll create an extra persistent object that's not in the dict:
-
-    >>> p4 = ZODB.tests.util.P('p4')
-
-    Now we'll excercise iteration and item access:
-
-    >>> l = [(str(k), d[k], d.get(k)) for k in d]
-    >>> l.sort()
-    >>> l
-    [('P(p1)', 1, 1), ('P(p2)', 2, 2), ('P(p3)', 3, 3)]
-
-    And the containment operator:
-
-    >>> [p in d for p in [p1, p2, p3, p4]]
-    [True, True, True, False]
-
-    We can add the dict and the referenced objects to a database:
-
-    >>> db = ZODB.tests.util.DB()
-
-    >>> conn1 = db.open()
-    >>> conn1.root()['p1'] = p1
-    >>> conn1.root()['d'] = d
-    >>> conn1.root()['p2'] = p2
-    >>> conn1.root()['p3'] = p3
-    >>> ZODB.tests.util.commit()
-
-    And things still work, as before:
-
-    >>> l = [(str(k), d[k], d.get(k)) for k in d]
-    >>> l.sort()
-    >>> l
-    [('P(p1)', 1, 1), ('P(p2)', 2, 2), ('P(p3)', 3, 3)]
-    >>> [p in d for p in [p1, p2, p3, p4]]
-    [True, True, True, False]
-
-    Likewise, we can read the objects from another connection and
-    things still work.
-
-    >>> conn2 = db.open()
-    >>> d = conn2.root()['d']
-    >>> p1 = conn2.root()['p1']
-    >>> p2 = conn2.root()['p2']
-    >>> p3 = conn2.root()['p3']
-    >>> l = [(str(k), d[k], d.get(k)) for k in d]
-    >>> l.sort()
-    >>> l
-    [('P(p1)', 1, 1), ('P(p2)', 2, 2), ('P(p3)', 3, 3)]
-    >>> [p in d for p in [p1, p2, p3, p4]]
-    [True, True, True, False]
-
-    Now, we'll delete one of the objects from the database, but *not*
-    from the dictionary:
-
-    >>> del conn2.root()['p2']
-    >>> ZODB.tests.util.commit()
-
-    And pack the database, so that the no-longer referenced p2 is
-    actually removed from the database.
-
-    >>> ZODB.tests.util.pack(db)
-
-    Now if we access the dictionary in a new connection, it no longer
-    has p2:
-
-    >>> conn3 = db.open()
-    >>> d = conn3.root()['d']
-    >>> l = [(str(k), d[k], d.get(k)) for k in d]
-    >>> l.sort()
-    >>> l
-    [('P(p1)', 1, 1), ('P(p3)', 3, 3)]
-
-    It's worth nothing that that the versions of the dictionary in
-    conn1 and conn2 still have p2, because p2 is still in the caches
-    for those connections.
-
-    Always explicitly close databases: :)
-
-    >>> db.close()
-
-    """
-    # TODO:  It's expensive trying to load dead objects from the database.
-    # It would be helpful if the data manager/connection cached these.
-
-    def __init__(self, adict=None, **kwargs):
-        self.data = {}
-        if adict is not None:
-            keys = getattr(adict, "keys", None)
-            if keys is None:
-                adict = dict(adict)
-            self.update(adict)
-        if kwargs:
-            self.update(kwargs)
-
-    def __getstate__(self):
-        state = Persistent.__getstate__(self)
-        state['data'] = state['data'].items()
-        return state
-
-    def __setstate__(self, state):
-        state['data'] = dict([
-            (k, v) for (k, v) in state['data']
-            if k() is not None
-            ])
-        Persistent.__setstate__(self, state)
-
-    def __setitem__(self, key, value):
-        self.data[WeakRef(key)] = value
-
-    def __getitem__(self, key):
-        return self.data[WeakRef(key)]
-
-    def __delitem__(self, key):
-        del self.data[WeakRef(key)]
-
-    def get(self, key, default=None):
-        """D.get(k[, d]) -> D[k] if k in D, else d.
-
-        >>> import ZODB.tests.util
-        >>> key = ZODB.tests.util.P("key")
-        >>> missing = ZODB.tests.util.P("missing")
-        >>> d = PersistentWeakKeyDictionary([(key, 1)])
-        >>> d.get(key)
-        1
-        >>> d.get(missing)
-        >>> d.get(missing, 12)
-        12
-        """
-        return self.data.get(WeakRef(key), default)
-
-    def __contains__(self, key):
-        return WeakRef(key) in self.data
-
-    def __iter__(self):
-        for k in self.data:
-            yield k()
-
-    def update(self, adict):
-        if isinstance(adict, PersistentWeakKeyDictionary):
-            self.data.update(adict.update)
-        else:
-            for k, v in adict.items():
-                self.data[WeakRef(k)] = v
-
-    # TODO:  May need more methods, and tests.
diff --git a/branches/bug1734/src/scripts/README.txt b/branches/bug1734/src/scripts/README.txt
deleted file mode 100644
index 5034dd07..00000000
--- a/branches/bug1734/src/scripts/README.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-This directory contains a collection of utilities for managing ZODB
-databases.  Some are more useful than others.  If you install ZODB
-using distutils ("python setup.py install"), fsdump.py, fstest.py,
-repozo.py, and zeopack.py will be installed in /usr/local/bin.
-
-Unless otherwise noted, these scripts are invoked with the name of the
-Data.fs file as their only argument.  Example: checkbtrees.py data.fs.
-
-
-analyze.py -- a transaction analyzer for FileStorage
-
-Reports on the data in a FileStorage.  The report is organized by
-class.  It shows total data, as well as separate reports for current
-and historical revisions of objects.
-
-
-checkbtrees.py -- checks BTrees in a FileStorage for corruption
-
-Attempts to find all the BTrees contained in a Data.fs, calls their
-_check() methods, and runs them through BTrees.check.check().
-
-
-fsdump.py -- summarize FileStorage contents, one line per revision
-
-Prints a report of FileStorage contents, with one line for each
-transaction and one line for each data record in that transaction.
-Includes time stamps, file positions, and class names.
-
-
-fsoids.py -- trace all uses of specified oids in a FileStorage
-
-For heavy debugging.
-A set of oids is specified by text file listing and/or command line.
-A report is generated showing all uses of these oids in the database:
-all new-revision creation/modifications, all references from all
-revisions of other objects, and all creation undos.
-
-
-fstest.py -- simple consistency checker for FileStorage
-
-usage: fstest.py [-v] data.fs
-
-The fstest tool will scan all the data in a FileStorage and report an
-error if it finds any corrupt transaction data.  The tool will print a
-message when the first error is detected an exit.
-
-The tool accepts one or more -v arguments.  If a single -v is used, it
-will print a line of text for each transaction record it encounters.
-If two -v arguments are used, it will also print a line of text for
-each object.  The objects for a transaction will be printed before the
-transaction itself.
-
-Note: It does not check the consistency of the object pickles.  It is
-possible for the damage to occur only in the part of the file that
-stores object pickles.  Those errors will go undetected.
-
-
-space.py -- report space used by objects in a FileStorage
-
-usage: space.py [-v] data.fs
-
-This ignores revisions and versions.
-
-
-netspace.py -- hackish attempt to report on size of objects
-
-usage: netspace.py [-P | -v] data.fs
-
--P: do a pack first
--v: print info for all objects, even if a traversal path isn't found
-
-Traverses objects from the database root and attempts to calculate
-size of object, including all reachable subobjects.
-
-
-parsezeolog.py -- parse BLATHER logs from ZEO server
-
-This script may be obsolete.  It has not been tested against the
-current log output of the ZEO server.
-
-Reports on the time and size of transactions committed by a ZEO
-server, by inspecting log messages at BLATHER level.
-
-
-repozo.py -- incremental backup utility for FileStorage
-
-Run the script with the -h option to see usage details.
-
-
-timeout.py -- script to test transaction timeout
-
-usage: timeout.py address delay [storage-name]
-
-This script connects to a storage, begins a transaction, calls store()
-and tpc_vote(), and then sleeps forever.  This should trigger the
-transaction timeout feature of the server.
-
-
-zeopack.py -- pack a ZEO server
-
-The script connects to a server and calls pack() on a specific
-storage.  See the script for usage details.
-
-
-zeoreplay.py -- experimental script to replay transactions from a ZEO log
-
-Like parsezeolog.py, this may be obsolete because it was written
-against an earlier version of the ZEO server.  See the script for
-usage details.
-
-
-zeoup.py
-
-usage: zeoup.py [options]
-
-The test will connect to a ZEO server, load the root object, and
-attempt to update the zeoup counter in the root.  It will report
-success if it updates to counter or if it gets a ConflictError.  A
-ConflictError is considered a success, because the client was able to
-start a transaction.
-
-See the script for details about the options.
-
-
-zodbload.py -- exercise ZODB under a heavy synthesized Zope-like load
-
-See the module docstring for details.  Note that this script requires
-Zope.  New in ZODB3 3.1.4.
-
-
-zeoserverlog.py -- analyze ZEO server log for performance statistics
-
-See the module docstring for details; there are a large number of
-options.  New in ZODB3 3.1.4.
-
-
-fsrefs.py -- check FileStorage for dangling references
-
-
-fstail.py -- display the most recent transactions in a FileStorage
-
-usage:  fstail.py [-n nxtn] data.fs
-
-The most recent ntxn transactions are displayed, to stdout.
-Optional argument -n specifies ntxn, and defaults to 10.
-
-
-migrate.py -- do a storage migration and gather statistics
-
-See the module docstring for details.
-
-
-zeoqueue.py -- report number of clients currently waiting in the ZEO queue
-
-See the module docstring for details.
diff --git a/branches/bug1734/src/scripts/analyze.py b/branches/bug1734/src/scripts/analyze.py
deleted file mode 100755
index 319ef94b..00000000
--- a/branches/bug1734/src/scripts/analyze.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python2.3
-
-# Based on a transaction analyzer by Matt Kromer.
-
-import pickle
-import re
-import sys
-import types
-from ZODB.FileStorage import FileStorage
-
-class Report:
-    def __init__(self):
-        self.OIDMAP = {}
-        self.TYPEMAP = {}
-        self.TYPESIZE = {}
-        self.FREEMAP = {}
-        self.USEDMAP = {}
-        self.TIDS = 0
-        self.OIDS = 0
-        self.DBYTES = 0
-        self.COIDS = 0
-        self.CBYTES = 0
-        self.FOIDS = 0
-        self.FBYTES = 0
-
-def shorten(s, n):
-    l = len(s)
-    if l <= n:
-        return s
-    while len(s) + 3 > n: # account for ...
-        i = s.find(".")
-        if i == -1:
-            # In the worst case, just return the rightmost n bytes
-            return s[-n:]
-        else:
-            s = s[i + 1:]
-            l = len(s)
-    return "..." + s
-
-def report(rep):
-    print "Processed %d records in %d transactions" % (rep.OIDS, rep.TIDS)
-    print "Average record size is %7.2f bytes" % (rep.DBYTES * 1.0 / rep.OIDS)
-    print ("Average transaction size is %7.2f bytes" %
-           (rep.DBYTES * 1.0 / rep.TIDS))
-
-    print "Types used:"
-    fmt = "%-46s %7s %9s %6s %7s"
-    fmtp = "%-46s %7d %9d %5.1f%% %7.2f" # per-class format
-    fmts = "%46s %7d %8dk %5.1f%% %7.2f" # summary format
-    print fmt % ("Class Name", "Count", "TBytes", "Pct", "AvgSize")
-    print fmt % ('-'*46, '-'*7, '-'*9, '-'*5, '-'*7)
-    typemap = rep.TYPEMAP.keys()
-    typemap.sort()
-    cumpct = 0.0
-    for t in typemap:
-        pct = rep.TYPESIZE[t] * 100.0 / rep.DBYTES
-        cumpct += pct
-        print fmtp % (shorten(t, 46), rep.TYPEMAP[t], rep.TYPESIZE[t],
-                      pct, rep.TYPESIZE[t] * 1.0 / rep.TYPEMAP[t])
-
-    print fmt % ('='*46, '='*7, '='*9, '='*5, '='*7)
-    print "%46s %7d %9s %6s %6.2fk" % ('Total Transactions', rep.TIDS, ' ',
-        ' ', rep.DBYTES * 1.0 / rep.TIDS / 1024.0)
-    print fmts % ('Total Records', rep.OIDS, rep.DBYTES / 1024.0, cumpct,
-                  rep.DBYTES * 1.0 / rep.OIDS)
-
-    print fmts % ('Current Objects', rep.COIDS, rep.CBYTES / 1024.0,
-                  rep.CBYTES * 100.0 / rep.DBYTES,
-                  rep.CBYTES * 1.0 / rep.COIDS)
-    if rep.FOIDS:
-        print fmts % ('Old Objects', rep.FOIDS, rep.FBYTES / 1024.0,
-                      rep.FBYTES * 100.0 / rep.DBYTES,
-                      rep.FBYTES * 1.0 / rep.FOIDS)
-
-def analyze(path):
-    fs = FileStorage(path, read_only=1)
-    fsi = fs.iterator()
-    report = Report()
-    for txn in fsi:
-        analyze_trans(report, txn)
-    return report
-
-def analyze_trans(report, txn):
-    report.TIDS += 1
-    for rec in txn:
-        analyze_rec(report, rec)
-
-def get_type(record):
-    try:
-        classinfo = pickle.loads(record.data)[0]
-    except SystemError, err:
-        s = str(err)
-        mo = re.match('Failed to import class (\S+) from module (\S+)', s)
-        if mo is None:
-            raise
-        else:
-            klass, mod = mo.group(1, 2)
-            return "%s.%s" % (mod, klass)
-    if isinstance(classinfo, types.TupleType):
-        mod, klass = classinfo
-        return "%s.%s" % (mod, klass)
-    else:
-        return str(classinfo)
-
-def analyze_rec(report, record):
-    oid = record.oid
-    report.OIDS += 1
-    if record.data is None:
-        # No pickle -- aborted version or undo of object creation.
-        return
-    try:
-        size = len(record.data) # Ignores various overhead
-        report.DBYTES += size
-        if oid not in report.OIDMAP:
-            type = get_type(record)
-            report.OIDMAP[oid] = type
-            report.USEDMAP[oid] = size
-            report.COIDS += 1
-            report.CBYTES += size
-        else:
-            type = report.OIDMAP[oid]
-            fsize = report.USEDMAP[oid]
-            report.FREEMAP[oid] = report.FREEMAP.get(oid, 0) + fsize
-            report.USEDMAP[oid] = size
-            report.FOIDS += 1
-            report.FBYTES += fsize
-            report.CBYTES += size - fsize
-        report.TYPEMAP[type] = report.TYPEMAP.get(type, 0) + 1
-        report.TYPESIZE[type] = report.TYPESIZE.get(type, 0) + size
-    except Exception, err:
-        print err
-
-if __name__ == "__main__":
-    path = sys.argv[1]
-    report(analyze(path))
diff --git a/branches/bug1734/src/scripts/checkbtrees.py b/branches/bug1734/src/scripts/checkbtrees.py
deleted file mode 100755
index a4c6edf1..00000000
--- a/branches/bug1734/src/scripts/checkbtrees.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Check the consistency of BTrees in a Data.fs
-
-usage: checkbtrees.py data.fs
-
-Try to find all the BTrees in a Data.fs, call their _check() methods,
-and run them through BTrees.check.check().
-"""
-
-from types import IntType
-
-import ZODB
-from ZODB.FileStorage import FileStorage
-from BTrees.check import check
-
-# Set of oids we've already visited.  Since the object structure is
-# a general graph, this is needed to prevent unbounded paths in the
-# presence of cycles.  It's also helpful in eliminating redundant
-# checking when a BTree is pointed to by many objects.
-oids_seen = {}
-
-# Append (obj, path) to L if and only if obj is a persistent object
-# and we haven't seen it before.
-def add_if_new_persistent(L, obj, path):
-    global oids_seen
-
-    getattr(obj, '_', None) # unghostify
-    if hasattr(obj, '_p_oid'):
-        oid = obj._p_oid
-        if not oids_seen.has_key(oid):
-            L.append((obj, path))
-            oids_seen[oid] = 1
-
-def get_subobjects(obj):
-    getattr(obj, '_', None) # unghostify
-    sub = []
-    try:
-        attrs = obj.__dict__.items()
-    except AttributeError:
-        attrs = ()
-    for pair in attrs:
-        sub.append(pair)
-
-    # what if it is a mapping?
-    try:
-        items = obj.items()
-    except AttributeError:
-        items = ()
-    for k, v in items:
-        if not isinstance(k, IntType):
-            sub.append(("<key>", k))
-        if not isinstance(v, IntType):
-            sub.append(("[%s]" % repr(k), v))
-
-    # what if it is a sequence?
-    i = 0
-    while 1:
-        try:
-            elt = obj[i]
-        except:
-            break
-        sub.append(("[%d]" % i, elt))
-        i += 1
-
-    return sub
-
-def main(fname):
-    fs = FileStorage(fname, read_only=1)
-    cn = ZODB.DB(fs).open()
-    rt = cn.root()
-    todo = []
-    add_if_new_persistent(todo, rt, '')
-
-    found = 0
-    while todo:
-        obj, path = todo.pop(0)
-        found += 1
-        if not path:
-            print "<root>", repr(obj)
-        else:
-            print path, repr(obj)
-
-        mod = str(obj.__class__.__module__)
-        if mod.startswith("BTrees"):
-            if hasattr(obj, "_check"):
-                try:
-                    obj._check()
-                except AssertionError, msg:
-                    print "*" * 60
-                    print msg
-                    print "*" * 60
-
-                try:
-                    check(obj)
-                except AssertionError, msg:
-                    print "*" * 60
-                    print msg
-                    print "*" * 60
-
-        if found % 100 == 0:
-            cn.cacheMinimize()
-
-        for k, v in get_subobjects(obj):
-            if k.startswith('['):
-                # getitem
-                newpath = "%s%s" % (path, k)
-            else:
-                newpath = "%s.%s" % (path, k)
-            add_if_new_persistent(todo, v, newpath)
-
-    print "total", len(fs._index), "found", found
-
-if __name__ == "__main__":
-    import sys
-    try:
-        fname, = sys.argv[1:]
-    except:
-        print __doc__
-        sys.exit(2)
-
-    main(fname)
diff --git a/branches/bug1734/src/scripts/fsdump.py b/branches/bug1734/src/scripts/fsdump.py
deleted file mode 100644
index 16808323..00000000
--- a/branches/bug1734/src/scripts/fsdump.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Print a text summary of the contents of a FileStorage."""
-
-from ZODB.FileStorage.fsdump import fsdump
-
-if __name__ == "__main__":
-    import sys
-    fsdump(sys.argv[1])
diff --git a/branches/bug1734/src/scripts/fsoids.py b/branches/bug1734/src/scripts/fsoids.py
deleted file mode 100644
index 8d6ce517..00000000
--- a/branches/bug1734/src/scripts/fsoids.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""FileStorage oid-tracer.
-
-usage: fsoids.py [-f oid_file] Data.fs [oid]...
-
-Display information about all occurrences of specified oids in a FileStorage.
-This is meant for heavy debugging.
-
-This includes all revisions of the oids, all objects referenced by the
-oids, and all revisions of all objects referring to the oids.
-
-If specified, oid_file is an input text file, containing one oid per
-line.  oids are specified as integers, in any of Python's integer
-notations (typically like 0x341a).  One or more oids can also be specified
-on the command line.
-
-The output is grouped by oid, from smallest to largest, and sub-grouped
-by transaction, from oldest to newest.
-
-This will not alter the FileStorage, but running against a live FileStorage
-is not recommended (spurious error messages may result).
-
-See testfsoids.py for a tutorial doctest.
-"""
-
-import sys
-
-from ZODB.FileStorage.fsoids import Tracer
-
-def usage():
-    print __doc__
-
-def main():
-    import getopt
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'f:')
-        if not args:
-            usage()
-            raise ValueError, "Must specify a FileStorage"
-        path = None
-        for k, v in opts:
-            if k == '-f':
-                path = v
-    except (getopt.error, ValueError):
-        usage()
-        raise
-
-    c = Tracer(args[0])
-    for oid in args[1:]:
-        as_int = int(oid, 0) # 0 == auto-detect base
-        c.register_oids(as_int)
-    if path is not None:
-        for line in open(path):
-            as_int = int(line, 0)
-            c.register_oids(as_int)
-    if not c.oids:
-        raise ValueError("no oids specified")
-    c.run()
-    c.report()
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/scripts/fsrefs.py b/branches/bug1734/src/scripts/fsrefs.py
deleted file mode 100644
index db0a55d5..00000000
--- a/branches/bug1734/src/scripts/fsrefs.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Check FileStorage for dangling references.
-
-usage: fsrefs.py [-v] data.fs
-
-fsrefs.py checks object sanity by trying to load the current revision of
-every object O in the database, and also verifies that every object
-directly reachable from each such O exists in the database.
-
-It's hard to explain exactly what it does because it relies on undocumented
-features in Python's cPickle module:  many of the crucial steps of loading
-an object are taken, but application objects aren't actually created.  This
-saves a lot of time, and allows fsrefs to be run even if the code
-implementing the object classes isn't available.
-
-A read-only connection to the specified FileStorage is made, but it is not
-recommended to run fsrefs against a live FileStorage.  Because a live
-FileStorage is mutating while fsrefs runs, it's not possible for fsrefs to
-get a wholly consistent view of the database across the entire time fsrefs
-is running; spurious error messages may result.
-
-fsrefs doesn't normally produce any output.  If an object fails to load, the
-oid of the object is given in a message saying so, and if -v was specified
-then the traceback corresponding to the load failure is also displayed
-(this is the only effect of the -v flag).
-
-Three other kinds of errors are also detected, when an object O loads OK,
-and directly refers to a persistent object P but there's a problem with P:
-
- - If P doesn't exist in the database, a message saying so is displayed.
-   The unsatisifiable reference to P is often called a "dangling
-   reference"; P is called "missing" in the error output.
-
- - If the current state of the database is such that P's creation has
-   been undone, then P can't be loaded either.  This is also a kind of
-   dangling reference, but is identified as "object creation was undone".
-
- - If P can't be loaded (but does exist in the database), a message saying
-   that O refers to an object that can't be loaded is displayed.
-
-fsrefs also (indirectly) checks that the .index file is sane, because
-fsrefs uses the index to get its idea of what constitutes "all the objects
-in the database".
-
-Note these limitations:  because fsrefs only looks at the current revision
-of objects, it does not attempt to load objects in versions, or non-current
-revisions of objects; therefore fsrefs cannot find problems in versions or
-in non-current revisions.
-"""
-
-import traceback
-import types
-
-from ZODB.FileStorage import FileStorage
-from ZODB.TimeStamp import TimeStamp
-from ZODB.utils import u64, oid_repr, get_refs, get_pickle_metadata
-from ZODB.POSException import POSKeyError
-
-VERBOSE = 0
-
-# There's a problem with oid.  'data' is its pickle, and 'serial' its
-# serial number.  'missing' is a list of (oid, class, reason) triples,
-# explaining what the problem(s) is(are).
-def report(oid, data, serial, missing):
-    from_mod, from_class = get_pickle_metadata(data)
-    if len(missing) > 1:
-        plural = "s"
-    else:
-        plural = ""
-    ts = TimeStamp(serial)
-    print "oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class)
-    print "last updated: %s, tid=%s" % (ts, hex(u64(serial)))
-    print "refers to invalid object%s:" % plural
-    for oid, info, reason in missing:
-        if isinstance(info, types.TupleType):
-            description = "%s.%s" % info
-        else:
-            description = str(info)
-        print "\toid %s %s: %r" % (oid_repr(oid), reason, description)
-    print
-
-def main(path):
-    fs = FileStorage(path, read_only=1)
-
-    # Set of oids in the index that failed to load due to POSKeyError.
-    # This is what happens if undo is applied to the transaction creating
-    # the object (the oid is still in the index, but its current data
-    # record has a backpointer of 0, and POSKeyError is raised then
-    # because of that backpointer).
-    undone = {}
-
-    # Set of oids that were present in the index but failed to load.
-    # This does not include oids in undone.
-    noload = {}
-
-    for oid in fs._index.keys():
-        try:
-            data, serial = fs.load(oid, "")
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except POSKeyError:
-            undone[oid] = 1
-        except:
-            if VERBOSE:
-                traceback.print_exc()
-            noload[oid] = 1
-
-    inactive = noload.copy()
-    inactive.update(undone)
-    for oid in fs._index.keys():
-        if oid in inactive:
-            continue
-        data, serial = fs.load(oid, "")
-        refs = get_refs(data)
-        missing = [] # contains 3-tuples of oid, klass-metadata, reason
-        for info in refs:
-            try:
-                ref, klass = info
-            except (ValueError, TypeError):
-                # failed to unpack
-                ref = info
-                klass = '<unknown>'
-            if ref not in fs._index:
-                missing.append((ref, klass, "missing"))
-            if ref in noload:
-                missing.append((ref, klass, "failed to load"))
-            if ref in undone:
-                missing.append((ref, klass, "object creation was undone"))
-        if missing:
-            report(oid, data, serial, missing)
-
-if __name__ == "__main__":
-    import sys
-    import getopt
-
-    opts, args = getopt.getopt(sys.argv[1:], "v")
-    for k, v in opts:
-        if k == "-v":
-            VERBOSE += 1
-
-    path, = args
-    main(path)
diff --git a/branches/bug1734/src/scripts/fsstats.py b/branches/bug1734/src/scripts/fsstats.py
deleted file mode 100755
index 4f4ef2c3..00000000
--- a/branches/bug1734/src/scripts/fsstats.py
+++ /dev/null
@@ -1,199 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Print details statistics from fsdump output."""
-
-import re
-import sys
-
-rx_txn = re.compile("tid=([0-9a-f]+).*size=(\d+)")
-rx_data = re.compile("oid=([0-9a-f]+) class=(\S+) size=(\d+)")
-
-def sort_byhsize(seq, reverse=False):
-    L = [(v.size(), k, v) for k, v in seq]
-    L.sort()
-    if reverse:
-        L.reverse()
-    return [(k, v) for n, k, v in L]
-
-class Histogram(dict):
-
-    def add(self, size):
-        self[size] = self.get(size, 0) + 1
-
-    def size(self):
-        return sum(self.itervalues())
-
-    def mean(self):
-        product = sum([k * v for k, v in self.iteritems()])
-        return product / self.size()
-
-    def median(self):
-        # close enough?
-        n = self.size() / 2
-        L = self.keys()
-        L.sort()
-        L.reverse()
-        while 1:
-            k = L.pop()
-            if self[k] > n:
-                return k
-            n -= self[k]
-
-    def mode(self):
-        mode = 0
-        value = 0
-        for k, v in self.iteritems():
-            if v > value:
-                value = v
-                mode = k
-        return mode
-
-    def make_bins(self, binsize):
-        maxkey = max(self.iterkeys())
-        self.binsize = binsize
-        self.bins = [0] * (1 + maxkey / binsize)
-        for k, v in self.iteritems():
-            b = k / binsize
-            self.bins[b] += v
-
-    def report(self, name, binsize=50, usebins=False, gaps=True, skip=True):
-        if usebins:
-            # Use existing bins with whatever size they have
-            binsize = self.binsize
-        else:
-            # Make new bins
-            self.make_bins(binsize)
-        maxval = max(self.bins)
-        # Print up to 40 dots for a value
-        dot = max(maxval / 40, 1)
-        tot = sum(self.bins)
-        print name
-        print "Total", tot,
-        print "Median", self.median(),
-        print "Mean", self.mean(),
-        print "Mode", self.mode(),
-        print "Max", max(self)
-        print "One * represents", dot
-        gap = False
-        cum = 0
-        for i, n in enumerate(self.bins):
-            if gaps and (not n or (skip and not n / dot)):
-                if not gap:
-                    print "   ..."
-                gap = True
-                continue
-            gap = False
-            p = 100 * n / tot
-            cum += n
-            pc = 100 * cum / tot
-            print "%6d %6d %3d%% %3d%% %s" % (
-                i * binsize, n, p, pc, "*" * (n / dot))
-        print
-
-def class_detail(class_size):
-    # summary of classes
-    fmt = "%5s %6s %6s %6s   %-50.50s"
-    labels = ["num", "median", "mean", "mode", "class"]
-    print fmt % tuple(labels)
-    print fmt % tuple(["-" * len(s) for s in labels])
-    for klass, h in sort_byhsize(class_size.iteritems()):
-        print fmt % (h.size(), h.median(), h.mean(), h.mode(), klass)
-    print
-
-    # per class details
-    for klass, h in sort_byhsize(class_size.iteritems(), reverse=True):
-        h.make_bins(50)
-        if len(filter(None, h.bins)) == 1:
-            continue
-        h.report("Object size for %s" % klass, usebins=True)
-
-def revision_detail(lifetimes, classes):
-    # Report per-class details for any object modified more than once
-    for name, oids in classes.iteritems():
-        h = Histogram()
-        keep = False
-        for oid in dict.fromkeys(oids, 1):
-            L = lifetimes.get(oid)
-            n = len(L)
-            h.add(n)
-            if n > 1:
-                keep = True
-        if keep:
-            h.report("Number of revisions for %s" % name, binsize=10)
-
-def main(path):
-    txn_objects = Histogram() # histogram of txn size in objects
-    txn_bytes = Histogram() # histogram of txn size in bytes
-    obj_size = Histogram() # histogram of object size
-    n_updates = Histogram() # oid -> num updates
-    n_classes = Histogram() # class -> num objects
-    lifetimes = {} # oid -> list of tids
-    class_size = {} # class -> histogram of object size
-    classes = {} # class -> list of oids
-
-    MAX = 0
-    tid = None
-
-    f = open(path, "rb")
-    for i, line in enumerate(f):
-        if MAX and i > MAX:
-            break
-        if line.startswith("  data"):
-            m = rx_data.search(line)
-            if not m:
-                continue
-            oid, klass, size = m.groups()
-            size = int(size)
-
-            obj_size.add(size)
-            n_updates.add(oid)
-            n_classes.add(klass)
-
-            h = class_size.get(klass)
-            if h is None:
-                h = class_size[klass] = Histogram()
-            h.add(size)
-
-            L = lifetimes.setdefault(oid, [])
-            L.append(tid)
-
-            L = classes.setdefault(klass, [])
-            L.append(oid)
-            objects += 1
-
-        elif line.startswith("Trans"):
-
-            if tid is not None:
-                txn_objects.add(objects)
-
-            m = rx_txn.search(line)
-            if not m:
-                continue
-            tid, size = m.groups()
-            size = int(size)
-            objects = 0
-
-            txn_bytes.add(size)
-    f.close()
-
-    print "Summary: %d txns, %d objects, %d revisions" % (
-        txn_objects.size(), len(n_updates), n_updates.size())
-    print
-
-    txn_bytes.report("Transaction size (bytes)", binsize=1024)
-    txn_objects.report("Transaction size (objects)", binsize=10)
-    obj_size.report("Object size", binsize=128)
-
-    # object lifetime info
-    h = Histogram()
-    for k, v in lifetimes.items():
-        h.add(len(v))
-    h.report("Number of revisions", binsize=10, skip=False)
-
-    # details about revisions
-    revision_detail(lifetimes, classes)
-
-    class_detail(class_size)
-
-if __name__ == "__main__":
-    main(sys.argv[1])
diff --git a/branches/bug1734/src/scripts/fstail.py b/branches/bug1734/src/scripts/fstail.py
deleted file mode 100644
index 17f47e8d..00000000
--- a/branches/bug1734/src/scripts/fstail.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Tool to dump the last few transactions from a FileStorage."""
-
-from ZODB.fstools import prev_txn
-
-import binascii
-import getopt
-import sha
-import sys
-
-def main(path, ntxn):
-    f = open(path, "rb")
-    f.seek(0, 2)
-    th = prev_txn(f)
-    i = ntxn
-    while th and i > 0:
-        hash = sha.sha(th.get_raw_data()).digest()
-        l = len(str(th.get_timestamp())) + 1
-        th.read_meta()
-        print "%s: hash=%s" % (th.get_timestamp(),
-                               binascii.hexlify(hash))
-        print ("user=%r description=%r length=%d"
-               % (th.user, th.descr, th.length))
-        print
-        th = th.prev_txn()
-        i -= 1
-
-if __name__ == "__main__":
-    ntxn = 10
-    opts, args = getopt.getopt(sys.argv[1:], "n:")
-    path, = args
-    for k, v in opts:
-        if k == '-n':
-            ntxn = int(v)
-    main(path, ntxn)
diff --git a/branches/bug1734/src/scripts/fstest.py b/branches/bug1734/src/scripts/fstest.py
deleted file mode 100644
index 3a274bcd..00000000
--- a/branches/bug1734/src/scripts/fstest.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Simple consistency checker for FileStorage.
-
-usage: fstest.py [-v] data.fs
-
-The fstest tool will scan all the data in a FileStorage and report an
-error if it finds any corrupt transaction data.  The tool will print a
-message when the first error is detected, then exit.
-
-The tool accepts one or more -v arguments.  If a single -v is used, it
-will print a line of text for each transaction record it encounters.
-If two -v arguments are used, it will also print a line of text for
-each object.  The objects for a transaction will be printed before the
-transaction itself.
-
-Note: It does not check the consistency of the object pickles.  It is
-possible for the damage to occur only in the part of the file that
-stores object pickles.  Those errors will go undetected.
-"""
-
-# The implementation is based closely on the read_index() function in
-# ZODB.FileStorage.  If anything about the FileStorage layout changes,
-# this file will need to be udpated.
-
-import string
-import struct
-import sys
-
-class FormatError(ValueError):
-    """There is a problem with the format of the FileStorage."""
-
-class Status:
-    checkpoint = 'c'
-    undone = 'u'
-
-packed_version = 'FS21'
-
-TREC_HDR_LEN = 23
-DREC_HDR_LEN = 42
-
-VERBOSE = 0
-
-def hexify(s):
-    """Format an 8-bite string as hex"""
-    l = []
-    for c in s:
-        h = hex(ord(c))
-        if h[:2] == '0x':
-            h = h[2:]
-        if len(h) == 1:
-            l.append("0")
-        l.append(h)
-    return "0x" + string.join(l, '')
-
-def chatter(msg, level=1):
-    if VERBOSE >= level:
-        sys.stdout.write(msg)
-
-def U64(v):
-    """Unpack an 8-byte string as a 64-bit long"""
-    h, l = struct.unpack(">II", v)
-    if h:
-        return (h << 32) + l
-    else:
-        return l
-
-def check(path):
-    file = open(path, 'rb')
-
-    file.seek(0, 2)
-    file_size = file.tell()
-    if file_size == 0:
-        raise FormatError("empty file")
-    file.seek(0)
-    if file.read(4) != packed_version:
-        raise FormatError("invalid file header")
-
-    pos = 4L
-    tid = '\000' * 8 # lowest possible tid to start
-    i = 0
-    while pos:
-        _pos = pos
-        pos, tid = check_trec(path, file, pos, tid, file_size)
-        if tid is not None:
-            chatter("%10d: transaction tid %s #%d \n" %
-                    (_pos, hexify(tid), i))
-            i = i + 1
-
-
-def check_trec(path, file, pos, ltid, file_size):
-    """Read an individual transaction record from file.
-
-    Returns the pos of the next transaction and the transaction id.
-    It also leaves the file pointer set to pos.  The path argument is
-    used for generating error messages.
-    """
-
-    h = file.read(TREC_HDR_LEN)
-    if not h:
-        return None, None
-    if len(h) != TREC_HDR_LEN:
-        raise FormatError("%s truncated at %s" % (path, pos))
-
-    tid, stl, status, ul, dl, el = struct.unpack(">8s8scHHH", h)
-    if el < 0:
-        el = t32 - el
-    tmeta_len = TREC_HDR_LEN + ul + dl + el
-
-    if tid <= ltid:
-        raise FormatError("%s time-stamp reduction at %s: %s <= %s" %
-                          (path, pos, hexify(tid), hexify(ltid)))
-    ltid = tid
-
-    tl = U64(stl) # transaction record length - 8
-    if pos + tl + 8 > file_size:
-        raise FormatError("%s truncated possibly because of"
-                          " damaged records at %s" % (path, pos))
-    if status == Status.checkpoint:
-        raise FormatError("%s checkpoint flag was not cleared at %s"
-                          % (path, pos))
-    if status not in ' up':
-        raise FormatError("%s has invalid status '%s' at %s" %
-                          (path, status, pos))
-
-    if tmeta_len > tl:
-        raise FormatError("%s has an invalid transaction header"
-                          " at %s" % (path, pos))
-
-    tpos = pos
-    tend = tpos + tl
-
-    if status != Status.undone:
-        pos = tpos + tmeta_len
-        file.read(ul + dl + el) # skip transaction metadata
-
-        i = 0
-        while pos < tend:
-            _pos = pos
-            pos, oid = check_drec(path, file, pos, tpos, tid)
-            if pos > tend:
-                raise FormatError("%s has data records that extend beyond"
-                                  " the transaction record; end at %s" %
-                                  (path, pos))
-            chatter("%10d: object oid %s #%d\n" % (_pos, hexify(oid), i),
-                    level=2)
-            i = i + 1
-
-    file.seek(tend)
-    rtl = file.read(8)
-    if rtl != stl:
-        raise FormatError("%s has inconsistent transaction length"
-                          " for undone transaction at %s" % (path, pos))
-    pos = tend + 8
-    return pos, tid
-
-def check_drec(path, file, pos, tpos, tid):
-    """Check a data record for the current transaction record"""
-
-    h = file.read(DREC_HDR_LEN)
-    if len(h) != DREC_HDR_LEN:
-        raise FormatError("%s truncated at %s" % (path, pos))
-    oid, serial, _prev, _tloc, vlen, _plen = (
-        struct.unpack(">8s8s8s8sH8s", h))
-    prev = U64(_prev)
-    tloc = U64(_tloc)
-    plen = U64(_plen)
-    dlen = DREC_HDR_LEN + (plen or 8)
-
-    if vlen:
-        dlen = dlen + 16 + vlen
-        file.seek(8, 1)
-        pv = U64(file.read(8))
-        file.seek(vlen, 1) # skip the version data
-
-    if tloc != tpos:
-        raise FormatError("%s data record exceeds transaction record "
-                          "at %s: tloc %d != tpos %d" %
-                          (path, pos, tloc, tpos))
-
-    pos = pos + dlen
-    if plen:
-        file.seek(plen, 1)
-    else:
-        file.seek(8, 1)
-        # _loadBack() ?
-
-    return pos, oid
-
-def usage():
-    print __doc__
-    sys.exit(-1)
-
-if __name__ == "__main__":
-    import getopt
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'v')
-        if len(args) != 1:
-            raise ValueError, "expected one argument"
-        for k, v in opts:
-            if k == '-v':
-                VERBOSE = VERBOSE + 1
-    except (getopt.error, ValueError):
-        usage()
-
-    try:
-        check(args[0])
-    except FormatError, msg:
-        print msg
-        sys.exit(-1)
-
-    chatter("no errors detected")
diff --git a/branches/bug1734/src/scripts/manual_tests/test-checker.fs b/branches/bug1734/src/scripts/manual_tests/test-checker.fs
deleted file mode 100644
index 4afe2ae98d3e7b0345677c2e9f768aa35b9b94b4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 802
zcmZ<@HZo+kpJx)o;bhDJ0h1Mgv?K#VW?p7VW@3&)N@7W3QettcLUK`RVo7Fx9#kCz
znm(8?3yi^FsgWFzT2!1_T#}lXoXQ1fm-r?Y6lCV5a}_fBmFU$LGKF%*C+FvtBxdHN
z78Nqr7VANcm<+OFrqbe>Q2s0hxY=L^nzRX2c0OEHq=rT)H{9Oj)I!EkK{yxYj6$Z8
zLT0c-SWq0oT3g5#%9t3+ACi$-tN=ub3MHw<C3=PI#c&tS0y@D|aAQn4#7ag(1-RW{
z2C6i}HmEX3xT+RPU8sTGiQbGFp&U?K7z;TxGO+rbv!o=j6z&;DG}pqkBV6l5scT_&
zF@jQ^sh;TuXuvTnP=E&shyivALYj#UBFj_-R~1jGQ$XP+3Xj^n{F02+BFqS0001$Q
By}<wg

diff --git a/branches/bug1734/src/scripts/manual_tests/testfstest.py b/branches/bug1734/src/scripts/manual_tests/testfstest.py
deleted file mode 100644
index d936c62b..00000000
--- a/branches/bug1734/src/scripts/manual_tests/testfstest.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""Verify that fstest.py can find errors.
-
-Note:  To run this test script fstest.py must be on your PYTHONPATH.
-"""
-
-from cStringIO import StringIO
-import os
-import re
-import struct
-import tempfile
-import unittest
-
-import fstest
-from fstest import FormatError, U64
-
-class TestCorruptedFS(unittest.TestCase):
-
-    f = open('test-checker.fs', 'rb')
-    datafs = f.read()
-    f.close()
-    del f
-
-    def setUp(self):
-        self._temp = tempfile.mktemp()
-        self._file = open(self._temp, 'wb')
-
-    def tearDown(self):
-        if not self._file.closed:
-            self._file.close()
-        if os.path.exists(self._temp):
-            try:
-                os.remove(self._temp)
-            except os.error:
-                pass
-
-    def noError(self):
-        if not self._file.closed:
-            self._file.close()
-        fstest.check(self._temp)
-
-    def detectsError(self, rx):
-        if not self._file.closed:
-            self._file.close()
-        try:
-            fstest.check(self._temp)
-        except FormatError, msg:
-            mo = re.search(rx, str(msg))
-            self.failIf(mo is None, "unexpected error: %s" % msg)
-        else:
-            self.fail("fstest did not detect corruption")
-
-    def getHeader(self):
-        buf = self._datafs.read(16)
-        if not buf:
-            return 0, ''
-        tl = U64(buf[8:])
-        return tl, buf
-
-    def copyTransactions(self, n):
-        """Copy at most n transactions from the good data"""
-        f = self._datafs = StringIO(self.datafs)
-        self._file.write(f.read(4))
-        for i in range(n):
-            tl, data = self.getHeader()
-            if not tl:
-                return
-            self._file.write(data)
-            rec = f.read(tl - 8)
-            self._file.write(rec)
-
-    def testGood(self):
-        self._file.write(self.datafs)
-        self.noError()
-
-    def testTwoTransactions(self):
-        self.copyTransactions(2)
-        self.noError()
-
-    def testEmptyFile(self):
-        self.detectsError("empty file")
-
-    def testInvalidHeader(self):
-        self._file.write('SF12')
-        self.detectsError("invalid file header")
-
-    def testTruncatedTransaction(self):
-        self._file.write(self.datafs[:4+22])
-        self.detectsError("truncated")
-
-    def testCheckpointFlag(self):
-        self.copyTransactions(2)
-        tl, data = self.getHeader()
-        assert tl > 0, "ran out of good transaction data"
-        self._file.write(data)
-        self._file.write('c')
-        self._file.write(self._datafs.read(tl - 9))
-        self.detectsError("checkpoint flag")
-
-    def testInvalidStatus(self):
-        self.copyTransactions(2)
-        tl, data = self.getHeader()
-        assert tl > 0, "ran out of good transaction data"
-        self._file.write(data)
-        self._file.write('Z')
-        self._file.write(self._datafs.read(tl - 9))
-        self.detectsError("invalid status")
-
-    def testTruncatedRecord(self):
-        self.copyTransactions(3)
-        tl, data = self.getHeader()
-        assert tl > 0, "ran out of good transaction data"
-        self._file.write(data)
-        buf = self._datafs.read(tl / 2)
-        self._file.write(buf)
-        self.detectsError("truncated possibly")
-
-    def testBadLength(self):
-        self.copyTransactions(2)
-        tl, data = self.getHeader()
-        assert tl > 0, "ran out of good transaction data"
-        self._file.write(data)
-        buf = self._datafs.read(tl - 8)
-        self._file.write(buf[0])
-        assert tl <= 1<<16, "can't use this transaction for this test"
-        self._file.write("\777\777")
-        self._file.write(buf[3:])
-        self.detectsError("invalid transaction header")
-
-    def testDecreasingTimestamps(self):
-        self.copyTransactions(0)
-        tl, data = self.getHeader()
-        buf = self._datafs.read(tl - 8)
-        t1 = data + buf
-
-        tl, data = self.getHeader()
-        buf = self._datafs.read(tl - 8)
-        t2 = data + buf
-
-        self._file.write(t2[:8] + t1[8:])
-        self._file.write(t1[:8] + t2[8:])
-        self.detectsError("time-stamp")
-
-    def testTruncatedData(self):
-        # This test must re-write the transaction header length in
-        # order to trigger the error in check_drec().  If it doesn't,
-        # the truncated data record would also caught a truncated
-        # transaction record.
-        self.copyTransactions(1)
-        tl, data = self.getHeader()
-        pos = self._file.tell()
-        self._file.write(data)
-        buf = self._datafs.read(tl - 8)
-        hdr = buf[:15]
-        ul, dl, el = struct.unpack(">HHH", hdr[-6:])
-        self._file.write(buf[:15 + ul + dl + el])
-        data = buf[15 + ul + dl + el:]
-        self._file.write(data[:24])
-        self._file.seek(pos + 8, 0)
-        newlen = struct.pack(">II", 0, tl - (len(data) - 24))
-        self._file.write(newlen)
-        self.detectsError("truncated at")
-
-    def testBadDataLength(self):
-        self.copyTransactions(1)
-        tl, data = self.getHeader()
-        self._file.write(data)
-        buf = self._datafs.read(tl - 8)
-        hdr = buf[:7]
-        # write the transaction meta data
-        ul, dl, el = struct.unpack(">HHH", hdr[-6:])
-        self._file.write(buf[:7 + ul + dl + el])
-
-        # write the first part of the data header
-        data = buf[7 + ul + dl + el:]
-        self._file.write(data[:24])
-        self._file.write("\000" * 4 + "\077" + "\000" * 3)
-        self._file.write(data[32:])
-        self.detectsError("record exceeds transaction")
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/branches/bug1734/src/scripts/manual_tests/testrepozo.py b/branches/bug1734/src/scripts/manual_tests/testrepozo.py
deleted file mode 100644
index a0262e3f..00000000
--- a/branches/bug1734/src/scripts/manual_tests/testrepozo.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Test repozo.py.
-
-This is a by-hand test.  It succeeds iff it doesn't blow up.  Run it with
-its home directory as the current directory.  It will destroy all files
-matching Data.* and Copy.* in this directory, and anything in a
-subdirectory of name 'backup'.
-"""
-
-import os
-import random
-import time
-import glob
-import sys
-import shutil
-
-import ZODB
-from ZODB import FileStorage
-import transaction
-
-PYTHON = sys.executable + ' '
-
-def cleanup():
-    for fname in glob.glob('Data.*') + glob.glob('Copy.*'):
-        os.remove(fname)
-
-    if os.path.isdir('backup'):
-        for fname in os.listdir('backup'):
-            os.remove(os.path.join('backup', fname))
-        os.rmdir('backup')
-
-class OurDB:
-    def __init__(self):
-        from BTrees.OOBTree import OOBTree
-        self.getdb()
-        conn = self.db.open()
-        conn.root()['tree'] = OOBTree()
-        transaction.commit()
-        self.close()
-
-    def getdb(self):
-        storage = FileStorage.FileStorage('Data.fs')
-        self.db = ZODB.DB(storage)
-
-    def gettree(self):
-        self.getdb()
-        conn = self.db.open()
-        return conn.root()['tree']
-
-    def pack(self):
-        self.getdb()
-        self.db.pack()
-
-    def close(self):
-        if self.db is not None:
-            self.db.close()
-            self.db = None
-
-# Do recovery to time 'when', and check that it's identical to correctpath.
-def check(correctpath='Data.fs', when=None):
-    if when is None:
-        extra = ''
-    else:
-        extra = ' -D ' + when
-    cmd = PYTHON + '../repozo.py -vRr backup -o Copy.fs' + extra
-    os.system(cmd)
-    f = file(correctpath, 'rb')
-    g = file('Copy.fs', 'rb')
-    fguts = f.read()
-    gguts = g.read()
-    f.close()
-    g.close()
-    if fguts != gguts:
-        raise ValueError("guts don't match\n"
-                         "    correctpath=%r when=%r\n"
-                         "    cmd=%r" % (correctpath, when, cmd))
-
-def mutatedb(db):
-    # Make random mutations to the btree in the database.
-    tree = db.gettree()
-    for dummy in range(100):
-        if random.random() < 0.6:
-            tree[random.randrange(100000)] = random.randrange(100000)
-        else:
-            keys = tree.keys()
-            if keys:
-                del tree[keys[0]]
-    transaction.commit()
-    db.close()
-
-def main():
-    cleanup()
-    os.mkdir('backup')
-    d = OurDB()
-    # Every 9th time thru the loop, we save a full copy of Data.fs,
-    # and at the end we ensure we can reproduce those too.
-    saved_snapshots = []  # list of (name, time) pairs for copies.
-
-    for i in range(100):
-        # Make some mutations.
-        mutatedb(d)
-
-        # Pack about each tenth time.
-        if random.random() < 0.1:
-            print "packing"
-            d.pack()
-            d.close()
-
-        # Make an incremental backup, half the time with gzip (-z).
-        if random.random() < 0.5:
-            os.system(PYTHON + '../repozo.py -vBQr backup -f Data.fs')
-        else:
-            os.system(PYTHON + '../repozo.py -zvBQr backup -f Data.fs')
-
-        if i % 9 == 0:
-            copytime = '%04d-%02d-%02d-%02d-%02d-%02d' % (time.gmtime()[:6])
-            copyname = os.path.join('backup', "Data%d" % i) + '.fs'
-            shutil.copyfile('Data.fs', copyname)
-            saved_snapshots.append((copyname, copytime))
-
-        # Make sure the clock moves at least a second.
-        time.sleep(1.01)
-
-        # Verify current Data.fs can be reproduced exactly.
-        check()
-
-    # Verify snapshots can be reproduced exactly.
-    for copyname, copytime in saved_snapshots:
-        print "Checking that", copyname, "at", copytime, "is reproducible."
-        check(copyname, copytime)
-
-    # Tear it all down.
-    cleanup()
-    print 'Test passed!'
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/scripts/manual_tests/testzeopack.py b/branches/bug1734/src/scripts/manual_tests/testzeopack.py
deleted file mode 100644
index a5c898d5..00000000
--- a/branches/bug1734/src/scripts/manual_tests/testzeopack.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Some simple tests for zeopack.py
-# For this to work, zeopack.py must by on your PATH.
-
-from ZODB.FileStorage import FileStorage
-from ZODB.tests.StorageTestBase import StorageTestBase
-from ZEO.tests import forker
-import ZODB
-
-import os
-import socket
-import tempfile
-import threading
-import time
-import unittest
-
-# TODO:  The forker interface isn't clearly defined.  It's different on
-# different branches of ZEO.  This will break someday.
-
-# TODO:  Only handle the Unix variant of the forker.  Just to give Tim
-# something to do.
-
-class PackerTests(StorageTestBase):
-
-    def setUp(self):
-        self.started = 0
-
-    def start(self):
-        self.started =1
-        self.path = tempfile.mktemp(suffix=".fs")
-        self._storage = FileStorage(self.path)
-        self.db = ZODB.DB(self._storage)
-        self.do_updates()
-        self.pid, self.exit = forker.start_zeo_server(self._storage, self.addr)
-
-    def do_updates(self):
-        for i in range(100):
-            self._dostore()
-
-    def tearDown(self):
-        if not self.started:
-            return
-        self.db.close()
-        self._storage.close()
-        self.exit.close()
-        try:
-            os.kill(self.pid, 9)
-        except os.error:
-            pass
-        try:
-            os.waitpid(self.pid, 0)
-        except os.error, err:
-            ##print "waitpid failed", err
-            pass
-        for ext in '', '.old', '.lock', '.index', '.tmp':
-            path = self.path + ext
-            try:
-                os.remove(path)
-            except os.error:
-                pass
-
-    def set_inet_addr(self):
-        self.host = socket.gethostname()
-        self.port = forker.get_port()
-        self.addr = self.host, self.port
-
-    def testPack(self):
-        self.set_inet_addr()
-        self.start()
-        status = os.system("zeopack.py -h %s -p %s" % (self.host, self.port))
-        assert status == 0
-        assert os.path.exists(self.path + ".old")
-
-    def testPackDays(self):
-        self.set_inet_addr()
-        self.start()
-        status = os.system("zeopack.py -h %s -p %s -d 1" % (self.host,
-                                                            self.port))
-        # Since we specified one day, nothing should get packed
-        assert status == 0
-        assert not os.path.exists(self.path + ".old")
-
-    def testAF_UNIXPack(self):
-        self.addr = tempfile.mktemp(suffix=".zeo-socket")
-        self.start()
-        status = os.system("zeopack.py -U %s" % self.addr)
-        assert status == 0
-        assert os.path.exists(self.path + ".old")
-
-    def testNoServer(self):
-        status = os.system("zeopack.py -p 19")
-        assert status != 0
-
-    def testWaitForServer(self):
-        self.set_inet_addr()
-        def delayed_start():
-            time.sleep(11)
-            self.start()
-        t = threading.Thread(target=delayed_start)
-        t.start()
-        status = os.system("zeopack.py -h %s -p %s -W" % (self.host,
-                                                          self.port))
-        t.join()
-        assert status == 0
-        assert os.path.exists(self.path + ".old")
-
-class UpTest(unittest.TestCase):
-
-    def testUp(self):
-        status = os.system("zeoup.py -p 19")
-        # There is no ZEO server on port 19, so we should see non-zero
-        # exit status.
-        assert status != 0
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/branches/bug1734/src/scripts/migrate.py b/branches/bug1734/src/scripts/migrate.py
deleted file mode 100755
index ceb49251..00000000
--- a/branches/bug1734/src/scripts/migrate.py
+++ /dev/null
@@ -1,372 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2001, 2002, 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""A script to gather statistics while doing a storage migration.
-
-This is very similar to a standard storage's copyTransactionsFrom() method,
-except that it's geared to run as a script, and it collects useful pieces of
-information as it's working.  This script can be used to stress test a storage
-since it blasts transactions at it as fast as possible.  You can get a good
-sense of the performance of a storage by running this script.
-
-Actually it just counts the size of pickles in the transaction via the
-iterator protocol, so storage overheads aren't counted.
-
-Usage: %(PROGRAM)s [options] [source-storage-args] [destination-storage-args]
-Options:
-    -S sourcetype
-    --stype=sourcetype
-        This is the name of a recognized type for the source database.  Use -T
-        to print out the known types.  Defaults to "file".
-
-    -D desttype
-    --dtype=desttype
-        This is the name of the recognized type for the destination database.
-        Use -T to print out the known types.  Defaults to "file".
-
-    -o filename
-    --output=filename
-        Print results in filename, otherwise stdout.
-
-    -m txncount
-    --max=txncount
-        Stop after committing txncount transactions.
-
-    -k txncount
-    --skip=txncount
-        Skip the first txncount transactions.
-
-    -p/--profile
-        Turn on specialized profiling.
-
-    -t/--timestamps
-        Print tids as timestamps.
-
-    -T/--storage_types
-        Print all the recognized storage types and exit.
-
-    -v/--verbose
-        Turns on verbose output.  Multiple -v options increase the verbosity.
-
-    -h/--help
-        Print this message and exit.
-
-Positional arguments:
-
-    source-storage-args:
-        Semicolon separated list of arguments for the source storage, as
-        key=val pairs.  E.g. "file_name=Data.fs;read_only=1"
-
-    destination-storage-args:
-        Comma separated list of arguments for the source storage, as key=val
-        pairs.  E.g. "name=full;frequency=3600"
-"""
-
-import re
-import sys
-import time
-import getopt
-import marshal
-import profile
-
-from ZODB import utils
-from ZODB import StorageTypes
-from ZODB.TimeStamp import TimeStamp
-
-PROGRAM = sys.argv[0]
-ZERO = '\0'*8
-
-try:
-    True, False
-except NameError:
-    True = 1
-    False = 0
-
-
-
-def usage(code, msg=''):
-    print >> sys.stderr, __doc__ % globals()
-    if msg:
-        print >> sys.stderr, msg
-    sys.exit(code)
-
-
-def error(code, msg):
-    print >> sys.stderr, msg
-    print "use --help for usage message"
-    sys.exit(code)
-
-
-
-def main():
-    try:
-        opts, args = getopt.getopt(
-            sys.argv[1:],
-            'hvo:pm:k:D:S:Tt',
-            ['help', 'verbose',
-             'output=', 'profile', 'storage_types',
-             'max=', 'skip=', 'dtype=', 'stype=', 'timestamps'])
-    except getopt.error, msg:
-        error(2, msg)
-
-    class Options:
-        stype = 'FileStorage'
-        dtype = 'FileStorage'
-        verbose = 0
-        outfile = None
-        profilep = False
-        maxtxn = -1
-        skiptxn = -1
-        timestamps = False
-
-    options = Options()
-
-    for opt, arg in opts:
-        if opt in ('-h', '--help'):
-            usage(0)
-        elif opt in ('-v', '--verbose'):
-            options.verbose += 1
-        elif opt in ('-T', '--storage_types'):
-            print_types()
-            sys.exit(0)
-        elif opt in ('-S', '--stype'):
-            options.stype = arg
-        elif opt in ('-D', '--dtype'):
-            options.dtype = arg
-        elif opt in ('-o', '--output'):
-            options.outfile = arg
-        elif opt in ('-p', '--profile'):
-            options.profilep = True
-        elif opt in ('-m', '--max'):
-            options.maxtxn = int(arg)
-        elif opt in ('-k', '--skip'):
-            options.skiptxn = int(arg)
-        elif opt in ('-t', '--timestamps'):
-            options.timestamps = True
-
-    if len(args) > 2:
-        error(2, "too many arguments")
-
-    srckws = {}
-    if len(args) > 0:
-        srcargs = args[0]
-        for kv in re.split(r';\s*', srcargs):
-            key, val = kv.split('=')
-            srckws[key] = val
-
-    destkws = {}
-    if len(args) > 1:
-        destargs = args[1]
-        for kv in re.split(r';\s*', destargs):
-            key, val = kv.split('=')
-            destkws[key] = val
-
-    if options.stype not in StorageTypes.storage_types.keys():
-        usage(2, 'Source database type must be provided')
-    if options.dtype not in StorageTypes.storage_types.keys():
-        usage(2, 'Destination database type must be provided')
-
-    # Open the output file
-    if options.outfile is None:
-        options.outfp = sys.stdout
-        options.outclosep = False
-    else:
-        options.outfp = open(options.outfile, 'w')
-        options.outclosep = True
-
-    if options.verbose > 0:
-        print 'Opening source database...'
-    modname, sconv = StorageTypes.storage_types[options.stype]
-    kw = sconv(**srckws)
-    __import__(modname)
-    sclass = getattr(sys.modules[modname], options.stype)
-    srcdb = sclass(**kw)
-
-    if options.verbose > 0:
-        print 'Opening destination database...'
-    modname, dconv = StorageTypes.storage_types[options.dtype]
-    kw = dconv(**destkws)
-    __import__(modname)
-    dclass = getattr(sys.modules[modname], options.dtype)
-    dstdb = dclass(**kw)
-
-    try:
-        t0 = time.time()
-        doit(srcdb, dstdb, options)
-        t1 = time.time()
-        if options.verbose > 0:
-            print 'Migration time:          %8.3f' % (t1-t0)
-    finally:
-        # Done
-        srcdb.close()
-        dstdb.close()
-        if options.outclosep:
-            options.outfp.close()
-
-
-
-def doit(srcdb, dstdb, options):
-    outfp = options.outfp
-    profilep = options.profilep
-    verbose = options.verbose
-    # some global information
-    largest_pickle = 0
-    largest_txn_in_size = 0
-    largest_txn_in_objects = 0
-    total_pickle_size = 0L
-    total_object_count = 0
-    # Ripped from BaseStorage.copyTransactionsFrom()
-    ts = None
-    ok = True
-    prevrevids = {}
-    counter = 0
-    skipper = 0
-    if options.timestamps:
-        print "%4s. %26s %6s %8s %5s %5s %5s %5s %5s" % (
-            "NUM", "TID AS TIMESTAMP", "OBJS", "BYTES",
-            # Does anybody know what these times mean?
-            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3")
-    else:
-        print "%4s. %20s %6s %8s %6s %6s %6s %6s %6s" % (
-            "NUM", "TRANSACTION ID", "OBJS", "BYTES",
-            # Does anybody know what these times mean?
-            "t4-t0", "t1-t0", "t2-t1", "t3-t2", "t4-t3")
-    for txn in srcdb.iterator():
-        skipper += 1
-        if skipper <= options.skiptxn:
-            continue
-        counter += 1
-        if counter > options.maxtxn >= 0:
-            break
-        tid = txn.tid
-        if ts is None:
-            ts = TimeStamp(tid)
-        else:
-            t = TimeStamp(tid)
-            if t <= ts:
-                if ok:
-                    print >> sys.stderr, \
-                          'Time stamps are out of order %s, %s' % (ts, t)
-                    ok = False
-                    ts = t.laterThan(ts)
-                    tid = `ts`
-                else:
-                    ts = t
-                    if not ok:
-                        print >> sys.stderr, \
-                              'Time stamps are back in order %s' % t
-                        ok = True
-        if verbose > 1:
-            print ts
-
-        prof = None
-        if profilep and (counter % 100) == 0:
-            prof = profile.Profile()
-        objects = 0
-        size = 0
-        newrevids = RevidAccumulator()
-        t0 = time.time()
-        dstdb.tpc_begin(txn, tid, txn.status)
-        t1 = time.time()
-        for r in txn:
-            oid = r.oid
-            objects += 1
-            thissize = len(r.data)
-            size += thissize
-            if thissize > largest_pickle:
-                largest_pickle = thissize
-            if verbose > 1:
-                if not r.version:
-                    vstr = 'norev'
-                else:
-                    vstr = r.version
-                print utils.U64(oid), vstr, len(r.data)
-            oldrevid = prevrevids.get(oid, ZERO)
-            result = dstdb.store(oid, oldrevid, r.data, r.version, txn)
-            newrevids.store(oid, result)
-        t2 = time.time()
-        result = dstdb.tpc_vote(txn)
-        t3 = time.time()
-        newrevids.tpc_vote(result)
-        prevrevids.update(newrevids.get_dict())
-        # Profile every 100 transactions
-        if prof:
-            prof.runcall(dstdb.tpc_finish, txn)
-        else:
-            dstdb.tpc_finish(txn)
-        t4 = time.time()
-
-        # record the results
-        if objects > largest_txn_in_objects:
-            largest_txn_in_objects = objects
-        if size > largest_txn_in_size:
-            largest_txn_in_size = size
-        if options.timestamps:
-            tidstr = str(TimeStamp(tid))
-            format = "%4d. %26s %6d %8d %5.3f %5.3f %5.3f %5.3f %5.3f"
-        else:
-            tidstr = utils.U64(tid)
-            format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
-        print >> outfp, format % (skipper, tidstr, objects, size,
-                                  t4-t0, t1-t0, t2-t1, t3-t2, t4-t3)
-        total_pickle_size += size
-        total_object_count += objects
-
-        if prof:
-            prof.create_stats()
-            fp = open('profile-%02d.txt' % (counter / 100), 'wb')
-            marshal.dump(prof.stats, fp)
-            fp.close()
-    print >> outfp, "Largest pickle:          %8d" % largest_pickle
-    print >> outfp, "Largest transaction:     %8d" % largest_txn_in_size
-    print >> outfp, "Largest object count:    %8d" % largest_txn_in_objects
-    print >> outfp, "Total pickle size: %14d" % total_pickle_size
-    print >> outfp, "Total object count:      %8d" % total_object_count
-
-
-
-# helper to deal with differences between old-style store() return and
-# new-style store() return that supports ZEO
-import types
-
-class RevidAccumulator:
-
-    def __init__(self):
-        self.data = {}
-
-    def _update_from_list(self, list):
-        for oid, serial in list:
-            if not isinstance(serial, types.StringType):
-                raise serial
-            self.data[oid] = serial
-
-    def store(self, oid, result):
-        if isinstance(result, types.StringType):
-            self.data[oid] = result
-        elif result is not None:
-            self._update_from_list(result)
-
-    def tpc_vote(self, result):
-        if result is not None:
-            self._update_from_list(result)
-
-    def get_dict(self):
-        return self.data
-
-
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/scripts/netspace.py b/branches/bug1734/src/scripts/netspace.py
deleted file mode 100644
index c33e768f..00000000
--- a/branches/bug1734/src/scripts/netspace.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Report on the net size of objects counting subobjects.
-
-usage: netspace.py [-P | -v] data.fs
-
--P: do a pack first
--v: print info for all objects, even if a traversal path isn't found
-"""
-
-import ZODB
-from ZODB.FileStorage import FileStorage
-from ZODB.utils import U64, get_pickle_metadata
-from ZODB.referencesf import referencesf
-
-def find_paths(root, maxdist):
-    """Find Python attribute traversal paths for objects to maxdist distance.
-
-    Starting at a root object, traverse attributes up to distance levels
-    from the root, looking for persistent objects.  Return a dict
-    mapping oids to traversal paths.
-
-    TODO:  Assumes that the keys of the root are not themselves
-    persistent objects.
-
-    TODO:  Doesn't traverse containers.
-    """
-    paths = {}
-
-    # Handle the root as a special case because it's a dict
-    objs = []
-    for k, v in root.items():
-        oid = getattr(v, '_p_oid', None)
-        objs.append((k, v, oid, 0))
-
-    for path, obj, oid, dist in objs:
-        if oid is not None:
-            paths[oid] = path
-        if dist < maxdist:
-            getattr(obj, 'foo', None) # unghostify
-            try:
-                items = obj.__dict__.items()
-            except AttributeError:
-                continue
-            for k, v in items:
-                oid = getattr(v, '_p_oid', None)
-                objs.append(("%s.%s" % (path, k), v, oid, dist + 1))
-
-    return paths
-
-def main(path):
-    fs = FileStorage(path, read_only=1)
-    if PACK:
-        fs.pack()
-
-    db = ZODB.DB(fs)
-    rt = db.open().root()
-    paths = find_paths(rt, 3)
-
-    def total_size(oid):
-        cache = {}
-        cache_size = 1000
-        def _total_size(oid, seen):
-            v = cache.get(oid)
-            if v is not None:
-                return v
-            data, serialno = fs.load(oid, '')
-            size = len(data)
-            for suboid in referencesf(data):
-                if seen.has_key(suboid):
-                    continue
-                seen[suboid] = 1
-                size += _total_size(suboid, seen)
-            cache[oid] = size
-            if len(cache) == cache_size:
-                cache.popitem()
-            return size
-        return _total_size(oid, {})
-
-    keys = fs._index.keys()
-    keys.sort()
-    keys.reverse()
-
-    if not VERBOSE:
-        # If not running verbosely, don't print an entry for an object
-        # unless it has an entry in paths.
-        keys = filter(paths.has_key, keys)
-
-    fmt = "%8s %5d %8d %s %s.%s"
-
-    for oid in keys:
-        data, serialno = fs.load(oid, '')
-        mod, klass = get_pickle_metadata(data)
-        refs = referencesf(data)
-        path = paths.get(oid, '-')
-        print fmt % (U64(oid), len(data), total_size(oid), path, mod, klass)
-
-if __name__ == "__main__":
-    import sys
-    import getopt
-
-    PACK = 0
-    VERBOSE = 0
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'Pv')
-        path, = args
-    except getopt.error, err:
-        print err
-        print __doc__
-        sys.exit(2)
-    except ValueError:
-        print "expected one argument, got", len(args)
-        print __doc__
-        sys.exit(2)
-    for o, v in opts:
-        if o == '-P':
-            PACK = 1
-        if o == '-v':
-            VERBOSE += 1
-    main(path)
diff --git a/branches/bug1734/src/scripts/parsezeolog.py b/branches/bug1734/src/scripts/parsezeolog.py
deleted file mode 100644
index c2e71c70..00000000
--- a/branches/bug1734/src/scripts/parsezeolog.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Parse the BLATHER logging generated by ZEO2.
-
-An example of the log format is:
-2002-04-15T13:05:29 BLATHER(-100) ZEO Server storea(3235680, [714], 235339406490168806) ('10.0.26.30', 45514)
-"""
-
-import re
-import time
-
-rx_time = re.compile('(\d\d\d\d-\d\d-\d\d)T(\d\d:\d\d:\d\d)')
-
-def parse_time(line):
-    """Return the time portion of a zLOG line in seconds or None."""
-    mo = rx_time.match(line)
-    if mo is None:
-        return None
-    date, time_ = mo.group(1, 2)
-    date_l = [int(elt) for elt in date.split('-')]
-    time_l = [int(elt) for elt in time_.split(':')]
-    return int(time.mktime(date_l + time_l + [0, 0, 0]))
-
-rx_meth = re.compile("zrpc:\d+ calling (\w+)\((.*)")
-
-def parse_method(line):
-    pass
-
-def parse_line(line):
-    """Parse a log entry and return time, method info, and client."""
-    t = parse_time(line)
-    if t is None:
-        return None, None
-    mo = rx_meth.search(line)
-    if mo is None:
-        return None, None
-    meth_name = mo.group(1)
-    meth_args = mo.group(2).strip()
-    if meth_args.endswith(')'):
-        meth_args = meth_args[:-1]
-    meth_args = [s.strip() for s in meth_args.split(",")]
-    m = meth_name, tuple(meth_args)
-    return t, m
-
-class TStats:
-
-    counter = 1
-
-    def __init__(self):
-        self.id = TStats.counter
-        TStats.counter += 1
-
-    fields = ("time", "vote", "done", "user", "path")
-    fmt = "%-24s %5s %5s %-15s %s"
-    hdr = fmt % fields
-
-    def report(self):
-        """Print a report about the transaction"""
-        t = time.ctime(self.begin)
-        if hasattr(self, "vote"):
-            d_vote = self.vote - self.begin
-        else:
-            d_vote = "*"
-        if hasattr(self, "finish"):
-            d_finish = self.finish - self.begin
-        else:
-            d_finish =  "*"
-        print self.fmt % (time.ctime(self.begin), d_vote, d_finish,
-                          self.user, self.url)
-
-class TransactionParser:
-
-    def __init__(self):
-        self.txns = {}
-        self.skipped = 0
-
-    def parse(self, line):
-        t, m = parse_line(line)
-        if t is None:
-            return
-        name = m[0]
-        meth = getattr(self, name, None)
-        if meth is not None:
-            meth(t, m[1])
-
-    def tpc_begin(self, time, args):
-        t = TStats()
-        t.begin = time
-        t.user = args[1]
-        t.url = args[2]
-        t.objects = []
-        tid = eval(args[0])
-        self.txns[tid] = t
-
-    def get_txn(self, args):
-        tid = eval(args[0])
-        try:
-            return self.txns[tid]
-        except KeyError:
-            print "uknown tid", repr(tid)
-            return None
-
-    def tpc_finish(self, time, args):
-        t = self.get_txn(args)
-        if t is None:
-            return
-        t.finish = time
-
-    def vote(self, time, args):
-        t = self.get_txn(args)
-        if t is None:
-            return
-        t.vote = time
-
-    def get_txns(self):
-        L = [(t.id, t) for t in self.txns.values()]
-        L.sort()
-        return [t for (id, t) in L]
-
-if __name__ == "__main__":
-    import fileinput
-
-    p = TransactionParser()
-    i = 0
-    for line in fileinput.input():
-        i += 1
-        try:
-            p.parse(line)
-        except:
-            print "line", i
-            raise
-    print "Transaction: %d" % len(p.txns)
-    print TStats.hdr
-    for txn in p.get_txns():
-        txn.report()
diff --git a/branches/bug1734/src/scripts/repozo.py b/branches/bug1734/src/scripts/repozo.py
deleted file mode 100755
index c36d97a6..00000000
--- a/branches/bug1734/src/scripts/repozo.py
+++ /dev/null
@@ -1,517 +0,0 @@
-#!/usr/bin/env python2.3
-
-# repozo.py -- incremental and full backups of a Data.fs file.
-#
-# Originally written by Anthony Baxter
-# Significantly modified by Barry Warsaw
-
-"""repozo.py -- incremental and full backups of a Data.fs file.
-
-Usage: %(program)s [options]
-Where:
-
-    Exactly one of -B or -R must be specified:
-
-    -B / --backup
-        Backup current ZODB file.
-
-    -R / --recover
-        Restore a ZODB file from a backup.
-
-    -v / --verbose
-        Verbose mode.
-
-    -h / --help
-        Print this text and exit.
-
-    -r dir
-    --repository=dir
-        Repository directory containing the backup files.  This argument
-        is required.  The directory must already exist.  You should not
-        edit the files in this directory, or add your own files to it.
-
-Options for -B/--backup:
-    -f file
-    --file=file
-        Source Data.fs file.  This argument is required.
-
-    -F / --full
-        Force a full backup.  By default, an incremental backup is made
-        if possible (e.g., if a pack has occurred since the last
-        incremental backup, a full backup is necessary).
-
-    -Q / --quick
-        Verify via md5 checksum only the last incremental written.  This
-        significantly reduces the disk i/o at the (theoretical) cost of
-        inconsistency.  This is a probabilistic way of determining whether
-        a full backup is necessary.
-
-    -z / --gzip
-        Compress with gzip the backup files.  Uses the default zlib
-        compression level.  By default, gzip compression is not used.
-
-Options for -R/--recover:
-    -D str
-    --date=str
-        Recover state as of this date.  Specify UTC (not local) time.
-            yyyy-mm-dd[-hh[-mm[-ss]]]
-        By default, current time is used.
-
-    -o filename
-    --output=filename
-        Write recovered ZODB to given file.  By default, the file is
-        written to stdout.
-"""
-
-import os
-import sys
-import md5
-import gzip
-import time
-import errno
-import getopt
-
-from ZODB.FileStorage import FileStorage
-
-program = sys.argv[0]
-
-BACKUP = 1
-RECOVER = 2
-
-COMMASPACE = ', '
-READCHUNK = 16 * 1024
-VERBOSE = False
-
-
-def usage(code, msg=''):
-    outfp = sys.stderr
-    if code == 0:
-        outfp = sys.stdout
-
-    print >> outfp, __doc__ % globals()
-    if msg:
-        print >> outfp, msg
-
-    sys.exit(code)
-
-
-def log(msg, *args):
-    if VERBOSE:
-        # Use stderr here so that -v flag works with -R and no -o
-        print >> sys.stderr, msg % args
-
-
-def parseargs():
-    global VERBOSE
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'BRvhf:r:FD:o:Qz',
-                                   ['backup', 'recover', 'verbose', 'help',
-                                    'file=', 'repository=', 'full', 'date=',
-                                    'output=', 'quick', 'gzip'])
-    except getopt.error, msg:
-        usage(1, msg)
-
-    class Options:
-        mode = None         # BACKUP or RECOVER
-        file = None         # name of input Data.fs file
-        repository = None   # name of directory holding backups
-        full = False        # True forces full backup
-        date = None         # -D argument, if any
-        output = None       # where to write recovered data; None = stdout
-        quick = False       # -Q flag state
-        gzip = False        # -z flag state
-
-    options = Options()
-
-    for opt, arg in opts:
-        if opt in ('-h', '--help'):
-            usage(0)
-        elif opt in ('-v', '--verbose'):
-            VERBOSE = True
-        elif opt in ('-R', '--recover'):
-            if options.mode is not None:
-                usage(1, '-B and -R are mutually exclusive')
-            options.mode = RECOVER
-        elif opt in ('-B', '--backup'):
-            if options.mode is not None:
-                usage(1, '-B and -R are mutually exclusive')
-            options.mode = BACKUP
-        elif opt in ('-Q', '--quick'):
-            options.quick = True
-        elif opt in ('-f', '--file'):
-            options.file = arg
-        elif opt in ('-r', '--repository'):
-            options.repository = arg
-        elif opt in ('-F', '--full'):
-            options.full = True
-        elif opt in ('-D', '--date'):
-            options.date = arg
-        elif opt in ('-o', '--output'):
-            options.output = arg
-        elif opt in ('-z', '--gzip'):
-            options.gzip = True
-        else:
-            assert False, (opt, arg)
-
-    # Any other arguments are invalid
-    if args:
-        usage(1, 'Invalid arguments: ' + COMMASPACE.join(args))
-
-    # Sanity checks
-    if options.mode is None:
-        usage(1, 'Either --backup or --recover is required')
-    if options.repository is None:
-        usage(1, '--repository is required')
-    if options.mode == BACKUP:
-        if options.date is not None:
-            log('--date option is ignored in backup mode')
-            options.date = None
-        if options.output is not None:
-            log('--output option is ignored in backup mode')
-            options.output = None
-    else:
-        assert options.mode == RECOVER
-        if options.file is not None:
-            log('--file option is ignored in recover mode')
-            options.file = None
-    return options
-
-
-# afile is a Python file object, or created by gzip.open().  The latter
-# doesn't have a fileno() method, so to fsync it we need to reach into
-# its underlying file object.
-def fsync(afile):
-    afile.flush()
-    fileobject = getattr(afile, 'fileobj', afile)
-    os.fsync(fileobject.fileno())
-
-# Read bytes (no more than n, or to EOF if n is None) in chunks from the
-# current position in file fp.  Pass each chunk as an argument to func().
-# Return the total number of bytes read == the total number of bytes
-# passed in all to func().  Leaves the file position just after the
-# last byte read.
-def dofile(func, fp, n=None):
-    bytesread = 0L
-    while n is None or n > 0:
-        if n is None:
-            todo = READCHUNK
-        else:
-            todo = min(READCHUNK, n)
-        data = fp.read(todo)
-        if not data:
-            break
-        func(data)
-        nread = len(data)
-        bytesread += nread
-        if n is not None:
-            n -= nread
-    return bytesread
-
-
-def checksum(fp, n):
-    # Checksum the first n bytes of the specified file
-    sum = md5.new()
-    def func(data):
-        sum.update(data)
-    dofile(func, fp, n)
-    return sum.hexdigest()
-
-
-def copyfile(options, dst, start, n):
-    # Copy bytes from file src, to file dst, starting at offset start, for n
-    # length of bytes.  For robustness, we first write, flush and fsync
-    # to a temp file, then rename the temp file at the end.
-    sum = md5.new()
-    ifp = open(options.file, 'rb')
-    ifp.seek(start)
-    tempname = os.path.join(os.path.dirname(dst), 'tmp.tmp')
-    if options.gzip:
-        ofp = gzip.open(tempname, 'wb')
-    else:
-        ofp = open(tempname, 'wb')
-
-    def func(data):
-        sum.update(data)
-        ofp.write(data)
-
-    ndone = dofile(func, ifp, n)
-    assert ndone == n
-
-    ifp.close()
-    fsync(ofp)
-    ofp.close()
-    os.rename(tempname, dst)
-    return sum.hexdigest()
-
-
-def concat(files, ofp=None):
-    # Concatenate a bunch of files from the repository, output to `outfile' if
-    # given.  Return the number of bytes written and the md5 checksum of the
-    # bytes.
-    sum = md5.new()
-    def func(data):
-        sum.update(data)
-        if ofp:
-            ofp.write(data)
-    bytesread = 0
-    for f in files:
-        # Auto uncompress
-        if f.endswith('fsz'):
-            ifp = gzip.open(f, 'rb')
-        else:
-            ifp = open(f, 'rb')
-        bytesread += dofile(func, ifp)
-        ifp.close()
-    if ofp:
-        ofp.close()
-    return bytesread, sum.hexdigest()
-
-
-def gen_filename(options, ext=None):
-    if ext is None:
-        if options.full:
-            ext = '.fs'
-        else:
-            ext = '.deltafs'
-        if options.gzip:
-            ext += 'z'
-    t = time.gmtime()[:6] + (ext,)
-    return '%04d-%02d-%02d-%02d-%02d-%02d%s' % t
-
-# Return a list of files needed to reproduce state at time options.date.
-# This is a list, in chronological order, of the .fs[z] and .deltafs[z]
-# files, from the time of the most recent full backup preceding
-# options.date, up to options.date.
-
-import re
-is_data_file = re.compile(r'\d{4}(?:-\d\d){5}\.(?:delta)?fsz?$').match
-del re
-
-def find_files(options):
-    when = options.date
-    if not when:
-        when = gen_filename(options, '')
-    log('looking for files between last full backup and %s...', when)
-    all = filter(is_data_file, os.listdir(options.repository))
-    all.sort()
-    all.reverse()   # newest file first
-    # Find the last full backup before date, then include all the
-    # incrementals between that full backup and "when".
-    needed = []
-    for fname in all:
-        root, ext = os.path.splitext(fname)
-        if root <= when:
-            needed.append(fname)
-            if ext in ('.fs', '.fsz'):
-                break
-    # Make the file names relative to the repository directory
-    needed = [os.path.join(options.repository, f) for f in needed]
-    # Restore back to chronological order
-    needed.reverse()
-    if needed:
-        log('files needed to recover state as of %s:', when)
-        for f in needed:
-            log('\t%s', f)
-    else:
-        log('no files found')
-    return needed
-
-# Scan the .dat file corresponding to the last full backup performed.
-# Return
-#
-#     filename, startpos, endpos, checksum
-#
-# of the last incremental.  If there is no .dat file, or the .dat file
-# is empty, return
-#
-#     None, None, None, None
-
-def scandat(repofiles):
-    fullfile = repofiles[0]
-    datfile = os.path.splitext(fullfile)[0] + '.dat'
-    fn = startpos = endpos = sum = None # assume .dat file missing or empty
-    try:
-        fp = open(datfile)
-    except IOError, e:
-        if e.errno <> errno.ENOENT:
-            raise
-    else:
-        # We only care about the last one.
-        lines = fp.readlines()
-        fp.close()
-        if lines:
-            fn, startpos, endpos, sum = lines[-1].split()
-            startpos = long(startpos)
-            endpos = long(endpos)
-
-    return fn, startpos, endpos, sum
-
-
-def do_full_backup(options):
-    # Find the file position of the last completed transaction.
-    fs = FileStorage(options.file, read_only=True)
-    # Note that the FileStorage ctor calls read_index() which scans the file
-    # and returns "the position just after the last valid transaction record".
-    # getSize() then returns this position, which is exactly what we want,
-    # because we only want to copy stuff from the beginning of the file to the
-    # last valid transaction record.
-    pos = fs.getSize()
-    fs.close()
-    options.full = True
-    dest = os.path.join(options.repository, gen_filename(options))
-    if os.path.exists(dest):
-        print >> sys.stderr, 'Cannot overwrite existing file:', dest
-        sys.exit(2)
-    log('writing full backup: %s bytes to %s', pos, dest)
-    sum = copyfile(options, dest, 0, pos)
-    # Write the data file for this full backup
-    datfile = os.path.splitext(dest)[0] + '.dat'
-    fp = open(datfile, 'w')
-    print >> fp, dest, 0, pos, sum
-    fp.flush()
-    os.fsync(fp.fileno())
-    fp.close()
-
-
-def do_incremental_backup(options, reposz, repofiles):
-    # Find the file position of the last completed transaction.
-    fs = FileStorage(options.file, read_only=True)
-    # Note that the FileStorage ctor calls read_index() which scans the file
-    # and returns "the position just after the last valid transaction record".
-    # getSize() then returns this position, which is exactly what we want,
-    # because we only want to copy stuff from the beginning of the file to the
-    # last valid transaction record.
-    pos = fs.getSize()
-    fs.close()
-    options.full = False
-    dest = os.path.join(options.repository, gen_filename(options))
-    if os.path.exists(dest):
-        print >> sys.stderr, 'Cannot overwrite existing file:', dest
-        sys.exit(2)
-    log('writing incremental: %s bytes to %s',  pos-reposz, dest)
-    sum = copyfile(options, dest, reposz, pos - reposz)
-    # The first file in repofiles points to the last full backup.  Use this to
-    # get the .dat file and append the information for this incrementatl to
-    # that file.
-    fullfile = repofiles[0]
-    datfile = os.path.splitext(fullfile)[0] + '.dat'
-    # This .dat file better exist.  Let the exception percolate if not.
-    fp = open(datfile, 'a')
-    print >> fp, dest, reposz, pos, sum
-    fp.flush()
-    os.fsync(fp.fileno())
-    fp.close()
-
-
-def do_backup(options):
-    repofiles = find_files(options)
-    # See if we need to do a full backup
-    if options.full or not repofiles:
-        log('doing a full backup')
-        do_full_backup(options)
-        return
-    srcsz = os.path.getsize(options.file)
-    if options.quick:
-        fn, startpos, endpos, sum = scandat(repofiles)
-        # If the .dat file was missing, or was empty, do a full backup
-        if (fn, startpos, endpos, sum) == (None, None, None, None):
-            log('missing or empty .dat file (full backup)')
-            do_full_backup(options)
-            return
-        # Has the file shrunk, possibly because of a pack?
-        if srcsz < endpos:
-            log('file shrunk, possibly because of a pack (full backup)')
-            do_full_backup(options)
-            return
-        # Now check the md5 sum of the source file, from the last
-        # incremental's start and stop positions.
-        srcfp = open(options.file, 'rb')
-        srcfp.seek(startpos)
-        srcsum = checksum(srcfp, endpos-startpos)
-        srcfp.close()
-        log('last incremental file: %s', fn)
-        log('last incremental checksum: %s', sum)
-        log('source checksum range: [%s..%s], sum: %s',
-            startpos, endpos, srcsum)
-        if sum == srcsum:
-            if srcsz == endpos:
-                log('No changes, nothing to do')
-                return
-            log('doing incremental, starting at: %s', endpos)
-            do_incremental_backup(options, endpos, repofiles)
-            return
-    else:
-        # This was is much slower, and more disk i/o intensive, but it's also
-        # more accurate since it checks the actual existing files instead of
-        # the information in the .dat file.
-        #
-        # See if we can do an incremental, based on the files that already
-        # exist.  This call of concat() will not write an output file.
-        reposz, reposum = concat(repofiles)
-        log('repository state: %s bytes, md5: %s', reposz, reposum)
-        # Get the md5 checksum of the source file, up to two file positions:
-        # the entire size of the file, and up to the file position of the last
-        # incremental backup.
-        srcfp = open(options.file, 'rb')
-        srcsum = checksum(srcfp, srcsz)
-        srcfp.seek(0)
-        srcsum_backedup = checksum(srcfp, reposz)
-        srcfp.close()
-        log('current state   : %s bytes, md5: %s', srcsz, srcsum)
-        log('backed up state : %s bytes, md5: %s', reposz, srcsum_backedup)
-        # Has nothing changed?
-        if srcsz == reposz and srcsum == reposum:
-            log('No changes, nothing to do')
-            return
-        # Has the file shrunk, probably because of a pack?
-        if srcsz < reposz:
-            log('file shrunk, possibly because of a pack (full backup)')
-            do_full_backup(options)
-            return
-        # The source file is larger than the repository.  If the md5 checksums
-        # match, then we know we can do an incremental backup.  If they don't,
-        # then perhaps the file was packed at some point (or a
-        # non-transactional undo was performed, but this is deprecated).  Only
-        # do a full backup if forced to.
-        if reposum == srcsum_backedup:
-            log('doing incremental, starting at: %s', reposz)
-            do_incremental_backup(options, reposz, repofiles)
-            return
-    # The checksums don't match, meaning the front of the source file has
-    # changed.  We'll need to do a full backup in that case.
-    log('file changed, possibly because of a pack (full backup)')
-    do_full_backup(options)
-
-
-def do_recover(options):
-    # Find the first full backup at or before the specified date
-    repofiles = find_files(options)
-    if not repofiles:
-        if options.date:
-            log('No files in repository before %s', options.date)
-        else:
-            log('No files in repository')
-        return
-    if options.output is None:
-        log('Recovering file to stdout')
-        outfp = sys.stdout
-    else:
-        log('Recovering file to %s', options.output)
-        outfp = open(options.output, 'wb')
-    reposz, reposum = concat(repofiles, outfp)
-    if outfp <> sys.stdout:
-        outfp.close()
-    log('Recovered %s bytes, md5: %s', reposz, reposum)
-
-
-def main():
-    options = parseargs()
-    if options.mode == BACKUP:
-        do_backup(options)
-    else:
-        assert options.mode == RECOVER
-        do_recover(options)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/scripts/space.py b/branches/bug1734/src/scripts/space.py
deleted file mode 100644
index 9a75a05a..00000000
--- a/branches/bug1734/src/scripts/space.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Report on the space used by objects in a storage.
-
-usage: space.py data.fs
-
-The current implementation only supports FileStorage.
-
-Current limitations / simplifications: Ignores revisions and versions.
-"""
-
-from ZODB.FileStorage import FileStorage
-from ZODB.utils import U64, get_pickle_metadata
-
-def run(path, v=0):
-    fs = FileStorage(path, read_only=1)
-    # break into the file implementation
-    if hasattr(fs._index, 'iterkeys'):
-        iter = fs._index.iterkeys()
-    else:
-        iter = fs._index.keys()
-    totals = {}
-    for oid in iter:
-        data, serialno = fs.load(oid, '')
-        mod, klass = get_pickle_metadata(data)
-        key = "%s.%s" % (mod, klass)
-        bytes, count = totals.get(key, (0, 0))
-        bytes += len(data)
-        count += 1
-        totals[key] = bytes, count
-        if v:
-            print "%8s %5d %s" % (U64(oid), len(data), key)
-    L = totals.items()
-    L.sort(lambda a, b: cmp(a[1], b[1]))
-    L.reverse()
-    print "Totals per object class:"
-    for key, (bytes, count) in L:
-        print "%8d %8d %s" % (count, bytes, key)
-
-def main():
-    import sys
-    import getopt
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "v")
-    except getopt.error, msg:
-        print msg
-        print "usage: space.py [-v] Data.fs"
-        sys.exit(2)
-    if len(args) != 1:
-        print "usage: space.py [-v] Data.fs"
-        sys.exit(2)
-    v = 0
-    for o, a in opts:
-        if o == "-v":
-            v += 1
-    path = args[0]
-    run(path, v)
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/scripts/timeout.py b/branches/bug1734/src/scripts/timeout.py
deleted file mode 100755
index 3058b4da..00000000
--- a/branches/bug1734/src/scripts/timeout.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Transaction timeout test script.
-
-This script connects to a storage, begins a transaction, calls store()
-and tpc_vote(), and then sleeps forever.  This should trigger the
-transaction timeout feature of the server.
-
-usage: timeout.py address delay [storage-name]
-
-"""
-
-import sys
-import time
-
-from ZODB.Transaction import Transaction
-from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_pickle
-from ZEO.ClientStorage import ClientStorage
-
-ZERO = '\0'*8
-
-def main():
-    if len(sys.argv) not in (3, 4):
-        sys.stderr.write("Usage: timeout.py address delay [storage-name]\n" %
-                         sys.argv[0])
-        sys.exit(2)
-
-    hostport = sys.argv[1]
-    delay = float(sys.argv[2])
-    if sys.argv[3:]:
-        name = sys.argv[3]
-    else:
-        name = "1"
-
-    if "/" in hostport:
-        address = hostport
-    else:
-        if ":" in hostport:
-            i = hostport.index(":")
-            host, port = hostport[:i], hostport[i+1:]
-        else:
-            host, port = "", hostport
-        port = int(port)
-        address = (host, port)
-
-    print "Connecting to %s..." % repr(address)
-    storage = ClientStorage(address, name)
-    print "Connected.  Now starting a transaction..."
-
-    oid = storage.new_oid()
-    version = ""
-    revid = ZERO
-    data = MinPO("timeout.py")
-    pickled_data = zodb_pickle(data)
-    t = Transaction()
-    t.user = "timeout.py"
-    storage.tpc_begin(t)
-    storage.store(oid, revid, pickled_data, version, t)
-    print "Stored.  Now voting..."
-    storage.tpc_vote(t)
-
-    print "Voted; now sleeping %s..." % delay
-    time.sleep(delay)
-    print "Done."
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/scripts/zeopack.py b/branches/bug1734/src/scripts/zeopack.py
deleted file mode 100755
index cb0484ef..00000000
--- a/branches/bug1734/src/scripts/zeopack.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Connect to a ZEO server and ask it to pack.
-
-Usage: zeopack.py [options]
-
-Options:
-
-    -p port -- port to connect to
-
-    -h host -- host to connect to (default is current host)
-
-    -U path -- Unix-domain socket to connect to
-
-    -S name -- storage name (default is '1')
-
-    -d days -- pack objects more than days old
-
-    -1 -- Connect to a ZEO 1 server
-
-    -W -- wait for server to come up.  Normally the script tries to
-       connect for 10 seconds, then exits with an error.  The -W
-       option is only supported with ZEO 1.
-
-You must specify either -p and -h or -U.
-"""
-
-import getopt
-import socket
-import sys
-import time
-
-from ZEO.ClientStorage import ClientStorage
-
-WAIT = 10 # wait no more than 10 seconds for client to connect
-
-def connect(storage):
-    # The connect-on-startup logic that ZEO provides isn't too useful
-    # for this script.  We'd like to client to attempt to startup, but
-    # fail if it can't get through to the server after a reasonable
-    # amount of time.  There's no external support for this, so we'll
-    # expose the ZEO 1.0 internals.  (consenting adults only)
-    t0 = time.time()
-    while t0 + WAIT > time.time():
-        storage._call.connect()
-        if storage._connected:
-            return
-    raise RuntimeError, "Unable to connect to ZEO server"
-
-def pack1(addr, storage, days, wait):
-    cs = ClientStorage(addr, storage=storage,
-                       wait_for_server_on_startup=wait)
-    if wait:
-        # _startup() is an artifact of the way ZEO 1.0 works.  The
-        # ClientStorage doesn't get fully initialized until registerDB()
-        # is called.  The only thing we care about, though, is that
-        # registerDB() calls _startup().
-        cs._startup()
-    else:
-        connect(cs)
-    cs.invalidator = None
-    cs.pack(wait=1, days=days)
-    cs.close()
-
-def pack2(addr, storage, days):
-    cs = ClientStorage(addr, storage=storage, wait=1, read_only=1)
-    cs.pack(wait=1, days=days)
-    cs.close()
-
-def usage(exit=1):
-    print __doc__
-    print " ".join(sys.argv)
-    sys.exit(exit)
-
-def main():
-    host = None
-    port = None
-    unix = None
-    storage = '1'
-    days = 0
-    wait = 0
-    zeoversion = 2
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'p:h:U:S:d:W1')
-        for o, a in opts:
-            if o == '-p':
-                port = int(a)
-            elif o == '-h':
-                host = a
-            elif o == '-U':
-                unix = a
-            elif o == '-S':
-                storage = a
-            elif o == '-d':
-                days = int(a)
-            elif o == '-W':
-                wait = 1
-            elif o == '-1':
-                zeoversion = 1
-    except Exception, err:
-        print err
-        usage()
-
-    if unix is not None:
-        addr = unix
-    else:
-        if host is None:
-            host = socket.gethostname()
-        if port is None:
-            usage()
-        addr = host, port
-
-    if zeoversion == 1:
-        pack1(addr, storage, days, wait)
-    else:
-        pack2(addr, storage, days)
-
-if __name__ == "__main__":
-    try:
-        main()
-    except Exception, err:
-        print err
-        sys.exit(1)
diff --git a/branches/bug1734/src/scripts/zeoqueue.py b/branches/bug1734/src/scripts/zeoqueue.py
deleted file mode 100755
index f80d092f..00000000
--- a/branches/bug1734/src/scripts/zeoqueue.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Report on the number of currently waiting clients in the ZEO queue.
-
-Usage: %(PROGRAM)s [options] logfile
-
-Options:
-    -h / --help
-        Print this help text and exit.
-
-    -v / --verbose
-        Verbose output
-
-    -f file
-    --file file
-        Use the specified file to store the incremental state as a pickle.  If
-        not given, %(STATEFILE)s is used.
-
-    -r / --reset
-        Reset the state of the tool.  This blows away any existing state
-        pickle file and then exits -- it does not parse the file.  Use this
-        when you rotate log files so that the next run will parse from the
-        beginning of the file.
-"""
-
-import os
-import re
-import sys
-import time
-import errno
-import getopt
-import cPickle as pickle
-
-COMMASPACE = ', '
-STATEFILE = 'zeoqueue.pck'
-PROGRAM = sys.argv[0]
-
-try:
-    True, False
-except NameError:
-    True = 1
-    False = 0
-
-
-
-tcre = re.compile(r"""
-    (?P<ymd>
-     \d{4}-      # year
-     \d{2}-      # month
-     \d{2})      # day
-    T            # separator
-    (?P<hms>
-     \d{2}:      # hour
-     \d{2}:      # minute
-     \d{2})      # second
-     """, re.VERBOSE)
-
-ccre = re.compile(r"""
-    zrpc-conn:(?P<addr>\d+.\d+.\d+.\d+:\d+)\s+
-    calling\s+
-    (?P<method>
-     \w+)        # the method
-    \(           # args open paren
-      \'         # string quote start
-        (?P<tid>
-         \S+)    # first argument -- usually the tid
-      \'         # end of string
-    (?P<rest>
-     .*)         # rest of line
-    """, re.VERBOSE)
-
-wcre = re.compile(r'Clients waiting: (?P<num>\d+)')
-
-
-
-def parse_time(line):
-    """Return the time portion of a zLOG line in seconds or None."""
-    mo = tcre.match(line)
-    if mo is None:
-        return None
-    date, time_ = mo.group('ymd', 'hms')
-    date_l = [int(elt) for elt in date.split('-')]
-    time_l = [int(elt) for elt in time_.split(':')]
-    return int(time.mktime(date_l + time_l + [0, 0, 0]))
-
-
-class Txn:
-    """Track status of single transaction."""
-    def __init__(self, tid):
-        self.tid = tid
-        self.hint = None
-        self.begin = None
-        self.vote = None
-        self.abort = None
-        self.finish = None
-        self.voters = []
-
-    def isactive(self):
-        if self.begin and not (self.abort or self.finish):
-            return True
-        else:
-            return False
-
-
-
-class Status:
-    """Track status of ZEO server by replaying log records.
-
-    We want to keep track of several events:
-
-    - The last committed transaction.
-    - The last committed or aborted transaction.
-    - The last transaction that got the lock but didn't finish.
-    - The client address doing the first vote of a transaction.
-    - The number of currently active transactions.
-    - The number of reported queued transactions.
-    - Client restarts.
-    - Number of current connections (but this might not be useful).
-
-    We can observe these events by reading the following sorts of log
-    entries:
-
-    2002-12-16T06:16:05 BLATHER(-100) zrpc:12649 calling
-    tpc_begin('\x03I\x90((\xdbp\xd5', '', 'QueueCatal...
-
-    2002-12-16T06:16:06 BLATHER(-100) zrpc:12649 calling
-    vote('\x03I\x90((\xdbp\xd5')
-
-    2002-12-16T06:16:06 BLATHER(-100) zrpc:12649 calling
-    tpc_finish('\x03I\x90((\xdbp\xd5')
-
-    2002-12-16T10:46:10 INFO(0) ZSS:12649:1 Transaction blocked waiting
-    for storage. Clients waiting: 1.
-
-    2002-12-16T06:15:57 BLATHER(-100) zrpc:12649 connect from
-    ('10.0.26.54', 48983): <ManagedServerConnection ('10.0.26.54', 48983)>
-
-    2002-12-16T10:30:09 INFO(0) ZSS:12649:1 disconnected
-    """
-
-    def __init__(self):
-        self.lineno = 0
-        self.pos = 0
-        self.reset()
-
-    def reset(self):
-        self.commit = None
-        self.commit_or_abort = None
-        self.last_unfinished = None
-        self.n_active = 0
-        self.n_blocked = 0
-        self.n_conns = 0
-        self.t_restart = None
-        self.txns = {}
-
-    def iscomplete(self):
-        # The status report will always be complete if we encounter an
-        # explicit restart.
-        if self.t_restart is not None:
-            return True
-        # If we haven't seen a restart, assume that seeing a finished
-        # transaction is good enough.
-        return self.commit is not None
-
-    def process_file(self, fp):
-        if self.pos:
-            if VERBOSE:
-                print 'seeking to file position', self.pos
-            fp.seek(self.pos)
-        while True:
-            line = fp.readline()
-            if not line:
-                break
-            self.lineno += 1
-            self.process(line)
-        self.pos = fp.tell()
-
-    def process(self, line):
-        if line.find("calling") != -1:
-            self.process_call(line)
-        elif line.find("connect") != -1:
-            self.process_connect(line)
-        # test for "locked" because word may start with "B" or "b"
-        elif line.find("locked") != -1:
-            self.process_block(line)
-        elif line.find("Starting") != -1:
-            self.process_start(line)
-
-    def process_call(self, line):
-        mo = ccre.search(line)
-        if mo is None:
-            return
-        called_method = mo.group('method')
-        # Exit early if we've got zeoLoad, because it's the most
-        # frequently called method and we don't use it.
-        if called_method == "zeoLoad":
-            return
-        t = parse_time(line)
-        meth = getattr(self, "call_%s" % called_method, None)
-        if meth is None:
-            return
-        client = mo.group('addr')
-        tid = mo.group('tid')
-        rest = mo.group('rest')
-        meth(t, client, tid, rest)
-
-    def process_connect(self, line):
-        pass
-
-    def process_block(self, line):
-        mo = wcre.search(line)
-        if mo is None:
-            # assume that this was a restart message for the last blocked
-            # transaction.
-            self.n_blocked = 0
-        else:
-            self.n_blocked = int(mo.group('num'))
-
-    def process_start(self, line):
-        if line.find("Starting ZEO server") != -1:
-            self.reset()
-            self.t_restart = parse_time(line)
-
-    def call_tpc_begin(self, t, client, tid, rest):
-        txn = Txn(tid)
-        txn.begin = t
-        if rest[0] == ',':
-            i = 1
-            while rest[i].isspace():
-                i += 1
-            rest = rest[i:]
-        txn.hint = rest
-        self.txns[tid] = txn
-        self.n_active += 1
-        self.last_unfinished = txn
-
-    def call_vote(self, t, client, tid, rest):
-        txn = self.txns.get(tid)
-        if txn is None:
-            print "Oops!"
-            txn = self.txns[tid] = Txn(tid)
-        txn.vote = t
-        txn.voters.append(client)
-
-    def call_tpc_abort(self, t, client, tid, rest):
-        txn = self.txns.get(tid)
-        if txn is None:
-            print "Oops!"
-            txn = self.txns[tid] = Txn(tid)
-        txn.abort = t
-        txn.voters = []
-        self.n_active -= 1
-        if self.commit_or_abort:
-            # delete the old transaction
-            try:
-                del self.txns[self.commit_or_abort.tid]
-            except KeyError:
-                pass
-        self.commit_or_abort = txn
-
-    def call_tpc_finish(self, t, client, tid, rest):
-        txn = self.txns.get(tid)
-        if txn is None:
-            print "Oops!"
-            txn = self.txns[tid] = Txn(tid)
-        txn.finish = t
-        txn.voters = []
-        self.n_active -= 1
-        if self.commit:
-            # delete the old transaction
-            try:
-                del self.txns[self.commit.tid]
-            except KeyError:
-                pass
-        if self.commit_or_abort:
-            # delete the old transaction
-            try:
-                del self.txns[self.commit_or_abort.tid]
-            except KeyError:
-                pass
-        self.commit = self.commit_or_abort = txn
-
-    def report(self):
-        print "Blocked transactions:", self.n_blocked
-        if not VERBOSE:
-            return
-        if self.t_restart:
-            print "Server started:", time.ctime(self.t_restart)
-
-        if self.commit is not None:
-            t = self.commit_or_abort.finish
-            if t is None:
-                t = self.commit_or_abort.abort
-            print "Last finished transaction:", time.ctime(t)
-
-        # the blocked transaction should be the first one that calls vote
-        L = [(txn.begin, txn) for txn in self.txns.values()]
-        L.sort()
-
-        for x, txn in L:
-            if txn.isactive():
-                began = txn.begin
-                if txn.voters:
-                    print "Blocked client (first vote):", txn.voters[0]
-                print "Blocked transaction began at:", time.ctime(began)
-                print "Hint:", txn.hint
-                print "Idle time: %d sec" % int(time.time() - began)
-                break
-
-
-
-def usage(code, msg=''):
-    print >> sys.stderr, __doc__ % globals()
-    if msg:
-        print >> sys.stderr, msg
-    sys.exit(code)
-
-
-def main():
-    global VERBOSE
-
-    VERBOSE = 0
-    file = STATEFILE
-    reset = False
-    # -0 is a secret option used for testing purposes only
-    seek = True
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'vhf:r0',
-                                   ['help', 'verbose', 'file=', 'reset'])
-    except getopt.error, msg:
-        usage(1, msg)
-
-    for opt, arg in opts:
-        if opt in ('-h', '--help'):
-            usage(0)
-        elif opt in ('-v', '--verbose'):
-            VERBOSE += 1
-        elif opt in ('-f', '--file'):
-            file = arg
-        elif opt in ('-r', '--reset'):
-            reset = True
-        elif opt == '-0':
-            seek = False
-
-    if reset:
-        # Blow away the existing state file and exit
-        try:
-            os.unlink(file)
-            if VERBOSE:
-                print 'removing pickle state file', file
-        except OSError, e:
-            if e.errno <> errno.ENOENT:
-                raise
-        return
-
-    if not args:
-        usage(1, 'logfile is required')
-    if len(args) > 1:
-        usage(1, 'too many arguments: %s' % COMMASPACE.join(args))
-
-    path = args[0]
-
-    # Get the previous status object from the pickle file, if it is available
-    # and if the --reset flag wasn't given.
-    status = None
-    try:
-        statefp = open(file, 'rb')
-        try:
-            status = pickle.load(statefp)
-            if VERBOSE:
-                print 'reading status from file', file
-        finally:
-            statefp.close()
-    except IOError, e:
-        if e.errno <> errno.ENOENT:
-            raise
-    if status is None:
-        status = Status()
-        if VERBOSE:
-            print 'using new status'
-
-    if not seek:
-        status.pos = 0
-
-    fp = open(path, 'rb')
-    try:
-        status.process_file(fp)
-    finally:
-        fp.close()
-    # Save state
-    statefp = open(file, 'wb')
-    pickle.dump(status, statefp, 1)
-    statefp.close()
-    # Print the report and return the number of blocked clients in the exit
-    # status code.
-    status.report()
-    sys.exit(status.n_blocked)
-
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/scripts/zeoreplay.py b/branches/bug1734/src/scripts/zeoreplay.py
deleted file mode 100644
index 6527eefa..00000000
--- a/branches/bug1734/src/scripts/zeoreplay.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Parse the BLATHER logging generated by ZEO, and optionally replay it.
-
-Usage: zeointervals.py [options]
-
-Options:
-
-    --help / -h
-        Print this message and exit.
-
-    --replay=storage
-    -r storage
-        Replay the parsed transactions through the new storage
-
-    --maxtxn=count
-    -m count
-        Parse no more than count transactions.
-
-    --report / -p
-        Print a report as we're parsing.
-
-Unlike parsezeolog.py, this script generates timestamps for each transaction,
-and sub-command in the transaction.  We can use this to compare timings with
-synthesized data.
-"""
-
-import re
-import sys
-import time
-import getopt
-import operator
-# ZEO logs measure wall-clock time so for consistency we need to do the same
-#from time import clock as now
-from time import time as now
-
-from ZODB.FileStorage import FileStorage
-#from BDBStorage.BDBFullStorage import BDBFullStorage
-#from Standby.primary import PrimaryStorage
-#from Standby.config import RS_PORT
-from ZODB.Transaction import Transaction
-from ZODB.utils import p64
-
-datecre = re.compile('(\d\d\d\d-\d\d-\d\d)T(\d\d:\d\d:\d\d)')
-methcre = re.compile("ZEO Server (\w+)\((.*)\) \('(.*)', (\d+)")
-
-class StopParsing(Exception):
-    pass
-
-
-
-def usage(code, msg=''):
-    print __doc__
-    if msg:
-        print msg
-    sys.exit(code)
-
-
-
-def parse_time(line):
-    """Return the time portion of a zLOG line in seconds or None."""
-    mo = datecre.match(line)
-    if mo is None:
-        return None
-    date, time_ = mo.group(1, 2)
-    date_l = [int(elt) for elt in date.split('-')]
-    time_l = [int(elt) for elt in time_.split(':')]
-    return int(time.mktime(date_l + time_l + [0, 0, 0]))
-
-
-def parse_line(line):
-    """Parse a log entry and return time, method info, and client."""
-    t = parse_time(line)
-    if t is None:
-        return None, None, None
-    mo = methcre.search(line)
-    if mo is None:
-        return None, None, None
-    meth_name = mo.group(1)
-    meth_args = mo.group(2)
-    meth_args = [s.strip() for s in meth_args.split(',')]
-    m = meth_name, tuple(meth_args)
-    c = mo.group(3), mo.group(4)
-    return t, m, c
-
-
-
-class StoreStat:
-    def __init__(self, when, oid, size):
-        self.when = when
-        self.oid = oid
-        self.size = size
-
-    # Crufty
-    def __getitem__(self, i):
-        if i == 0: return self.oid
-        if i == 1: return self.size
-        raise IndexError
-
-
-class TxnStat:
-    def __init__(self):
-        self._begintime = None
-        self._finishtime = None
-        self._aborttime = None
-        self._url = None
-        self._objects = []
-
-    def tpc_begin(self, when, args, client):
-        self._begintime = when
-        # args are txnid, user, description (looks like it's always a url)
-        self._url = args[2]
-
-    def storea(self, when, args, client):
-        oid = int(args[0])
-        # args[1] is "[numbytes]"
-        size = int(args[1][1:-1])
-        s = StoreStat(when, oid, size)
-        self._objects.append(s)
-
-    def tpc_abort(self, when):
-        self._aborttime = when
-
-    def tpc_finish(self, when):
-        self._finishtime = when
-
-
-
-# Mapping oid -> revid
-_revids = {}
-
-class ReplayTxn(TxnStat):
-    def __init__(self, storage):
-        self._storage = storage
-        self._replaydelta = 0
-        TxnStat.__init__(self)
-
-    def replay(self):
-        ZERO = '\0'*8
-        t0 = now()
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        for obj in self._objects:
-            oid = obj.oid
-            revid = _revids.get(oid, ZERO)
-            # BAW: simulate a pickle of the given size
-            data = 'x' * obj.size
-            # BAW: ignore versions for now
-            newrevid  = self._storage.store(p64(oid), revid, data, '', t)
-            _revids[oid] = newrevid
-        if self._aborttime:
-            self._storage.tpc_abort(t)
-            origdelta = self._aborttime - self._begintime
-        else:
-            self._storage.tpc_vote(t)
-            self._storage.tpc_finish(t)
-            origdelta = self._finishtime - self._begintime
-        t1 = now()
-        # Shows how many seconds behind (positive) or ahead (negative) of the
-        # original reply our local update took
-        self._replaydelta = t1 - t0 - origdelta
-
-
-
-class ZEOParser:
-    def __init__(self, maxtxns=-1, report=1, storage=None):
-        self.__txns = []
-        self.__curtxn = {}
-        self.__skipped = 0
-        self.__maxtxns = maxtxns
-        self.__finishedtxns = 0
-        self.__report = report
-        self.__storage = storage
-
-    def parse(self, line):
-        t, m, c = parse_line(line)
-        if t is None:
-            # Skip this line
-            return
-        name = m[0]
-        meth = getattr(self, name, None)
-        if meth is not None:
-            meth(t, m[1], c)
-
-    def tpc_begin(self, when, args, client):
-        txn = ReplayTxn(self.__storage)
-        self.__curtxn[client] = txn
-        meth = getattr(txn, 'tpc_begin', None)
-        if meth is not None:
-            meth(when, args, client)
-
-    def storea(self, when, args, client):
-        txn = self.__curtxn.get(client)
-        if txn is None:
-            self.__skipped += 1
-            return
-        meth = getattr(txn, 'storea', None)
-        if meth is not None:
-            meth(when, args, client)
-
-    def tpc_finish(self, when, args, client):
-        txn = self.__curtxn.get(client)
-        if txn is None:
-            self.__skipped += 1
-            return
-        meth = getattr(txn, 'tpc_finish', None)
-        if meth is not None:
-            meth(when)
-        if self.__report:
-            self.report(txn)
-        self.__txns.append(txn)
-        self.__curtxn[client] = None
-        self.__finishedtxns += 1
-        if self.__maxtxns > 0 and self.__finishedtxns >= self.__maxtxns:
-            raise StopParsing
-
-    def report(self, txn):
-        """Print a report about the transaction"""
-        if txn._objects:
-            bytes = reduce(operator.add, [size for oid, size in txn._objects])
-        else:
-            bytes = 0
-        print '%s %s %4d %10d %s %s' % (
-            txn._begintime, txn._finishtime - txn._begintime,
-            len(txn._objects),
-            bytes,
-            time.ctime(txn._begintime),
-            txn._url)
-
-    def replay(self):
-        for txn in self.__txns:
-            txn.replay()
-        # How many fell behind?
-        slower = []
-        faster = []
-        for txn in self.__txns:
-            if txn._replaydelta > 0:
-                slower.append(txn)
-            else:
-                faster.append(txn)
-        print len(slower), 'laggards,', len(faster), 'on-time or faster'
-        # Find some averages
-        if slower:
-            sum = reduce(operator.add,
-                         [txn._replaydelta for txn in slower], 0)
-            print 'average slower txn was:', float(sum) / len(slower)
-        if faster:
-            sum = reduce(operator.add,
-                         [txn._replaydelta for txn in faster], 0)
-            print 'average faster txn was:', float(sum) / len(faster)
-
-
-
-def main():
-    try:
-        opts, args = getopt.getopt(
-            sys.argv[1:],
-            'hr:pm:',
-            ['help', 'replay=', 'report', 'maxtxns='])
-    except getopt.error, e:
-        usage(1, e)
-
-    if args:
-        usage(1)
-
-    replay = 0
-    maxtxns = -1
-    report = 0
-    storagefile = None
-    for opt, arg in opts:
-        if opt in ('-h', '--help'):
-            usage(0)
-        elif opt in ('-r', '--replay'):
-            replay = 1
-            storagefile = arg
-        elif opt in ('-p', '--report'):
-            report = 1
-        elif opt in ('-m', '--maxtxns'):
-            try:
-                maxtxns = int(arg)
-            except ValueError:
-                usage(1, 'Bad -m argument: %s' % arg)
-
-    if replay:
-        storage = FileStorage(storagefile)
-        #storage = BDBFullStorage(storagefile)
-        #storage = PrimaryStorage('yyz', storage, RS_PORT)
-    t0 = now()
-    p = ZEOParser(maxtxns, report, storage)
-    i = 0
-    while 1:
-        line = sys.stdin.readline()
-        if not line:
-            break
-        i += 1
-        try:
-            p.parse(line)
-        except StopParsing:
-            break
-        except:
-            print 'input file line:', i
-            raise
-    t1 = now()
-    print 'total parse time:', t1-t0
-    t2 = now()
-    if replay:
-        p.replay()
-    t3 = now()
-    print 'total replay time:', t3-t2
-    print 'total time:', t3-t0
-
-
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/scripts/zeoserverlog.py b/branches/bug1734/src/scripts/zeoserverlog.py
deleted file mode 100644
index 81fd80f1..00000000
--- a/branches/bug1734/src/scripts/zeoserverlog.py
+++ /dev/null
@@ -1,538 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tools for analyzing ZEO Server logs.
-
-This script contains a number of commands, implemented by command
-functions. To run a command, give the command name and it's arguments
-as arguments to this script.
-
-Commands:
-
-  blocked_times file threshold
-
-     Output a summary of episodes where thransactions were blocked
-     when the episode lasted at least threshold seconds.
-
-     The file may be a file name or - to read from standard input.
-     The file may also be a command:
-
-       script blocked_times 'bunzip2 <foo.log.bz2' 60
-
-     If the file is a command, it must contain at least a single
-     space.
-
-     The columns of output are:
-
-     - The time the episode started
-
-     - The seconds from the start of the episode until the blocking
-       transaction finished.
-
-     - The client id (host and port) of the blocking transaction.
-
-     - The seconds from the start of the episode until the end of the
-       episode.
-
-  time_calls file threshold
-
-     Time how long calls took. Note that this is normally combined
-     with grep to time just a particulat kind of call:
-
-       script time_calls 'bunzip2 <foo.log.bz2 | grep tpc_finish' 10
-
-       time_trans threshold
-
-     The columns of output are:
-
-     - The time of the call invocation
-
-     - The seconds from the call to the return
-
-     - The client that made the call.
-
-  time_trans file threshold
-
-    Output a summary of transactions that held the global transaction
-    lock for at least threshold seconds. (This is the time from when
-    voting starts until the transaction is completed by the server.)
-
-    The columns of output are:
-
-    - time that the vote started.
-
-    - client id
-
-    - number of objects written / number of objects updated
-
-    - seconds from tpc_begin to vote start
-
-    - seconds spent voting
-
-    - vote status: n=normal, d=delayed, e=error
-
-    - seconds wating between vote return and finish call
-
-    - time spent finishing or 'abort' if the transaction aborted
-
-  minute file
-
-    Compute production statistics by minute
-
-    The columns of output are:
-
-    - date/time
-
-    - Number of active clients
-
-    - number of reads
-
-    - number of stores
-
-    - number of commits (finish)
-
-    - number of aborts
-
-    - number of transactions (commits + aborts)
-
-    Summary statistics are printed at the end
-
-  minutes file
-
-    Show just the summary statistics for production by minute.
-
-  hour file
-
-    Compute production statistics by hour
-
-  hours file
-
-    Show just the summary statistics for production by hour.
-
-  day file
-
-    Compute production statistics by day
-
-  days file
-
-    Show just the summary statistics for production by day.
-
-  verify file
-
-    Compute verification statistics
-
-    The columns of output are:
-
-    - client id
-    - verification start time
-    - number of object's verified
-    - wall time to verify
-    - average miliseconds to verify per object.
-
-$Id$
-"""
-
-import datetime, sys, re, os
-
-
-def time(line):
-    d = line[:10]
-    t = line[11:19]
-    y, mo, d = map(int, d.split('-'))
-    h, mi, s = map(int, t.split(':'))
-    return datetime.datetime(y, mo, d, h, mi, s)
-
-
-def sub(t1, t2):
-    delta = t2 - t1
-    return delta.days*86400.0+delta.seconds+delta.microseconds/1000000.0
-
-
-
-waitre = re.compile(r'Clients waiting: (\d+)')
-idre = re.compile(r' ZSS:\d+/(\d+.\d+.\d+.\d+:\d+) ')
-def blocked_times(args):
-    f, thresh = args
-
-    t1 = t2 = cid = blocking = waiting = 0
-    last_blocking = False
-
-    thresh = int(thresh)
-
-    for line in xopen(f):
-        line = line.strip()
-
-        if line.endswith('Blocked transaction restarted.'):
-            blocking = False
-            waiting = 0
-        else:
-            s = waitre.search(line)
-            if not s:
-                continue
-            waiting = int(s.group(1))
-            blocking = line.find(
-                'Transaction blocked waiting for storage') >= 0
-
-        if blocking and waiting == 1:
-            t1 = time(line)
-            t2 = t1
-
-        if not blocking and last_blocking:
-            last_wait = 0
-            t2 = time(line)
-            cid = idre.search(line).group(1)
-
-        if waiting == 0:
-            d = sub(t1, time(line))
-            if d >= thresh:
-                print t1, sub(t1, t2), cid, d
-            t1 = t2 = cid = blocking = waiting = last_wait = max_wait = 0
-
-        last_blocking = blocking
-
-connidre = re.compile(r' zrpc-conn:(\d+.\d+.\d+.\d+:\d+) ')
-def time_calls(f):
-    f, thresh = f
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    thresh = float(thresh)
-    t1 = None
-    maxd = 0
-
-    for line in f:
-        line = line.strip()
-
-        if ' calling ' in line:
-            t1 = time(line)
-        elif ' returns ' in line and t1 is not None:
-            d = sub(t1, time(line))
-            if d >= thresh:
-                print t1, d, connidre.search(line).group(1)
-            maxd = max(maxd, d)
-            t1 = None
-
-    print maxd
-
-def xopen(f):
-    if f == '-':
-        return sys.stdin
-    if ' ' in f:
-        return os.popen(f, 'r')
-    return open(f)
-
-def time_tpc(f):
-    f, thresh = f
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    thresh = float(thresh)
-    transactions = {}
-
-    for line in f:
-        line = line.strip()
-
-        if ' calling vote(' in line:
-            cid = connidre.search(line).group(1)
-            transactions[cid] = time(line),
-        elif ' vote returns None' in line:
-            cid = connidre.search(line).group(1)
-            transactions[cid] += time(line), 'n'
-        elif ' vote() raised' in line:
-            cid = connidre.search(line).group(1)
-            transactions[cid] += time(line), 'e'
-        elif ' vote returns ' in line:
-            # delayed, skip
-            cid = connidre.search(line).group(1)
-            transactions[cid] += time(line), 'd'
-        elif ' calling tpc_abort(' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                t1, t2, vs = transactions[cid]
-                t = time(line)
-                d = sub(t1, t)
-                if d >= thresh:
-                    print 'a', t1, cid, sub(t1, t2), vs, sub(t2, t)
-                del transactions[cid]
-        elif ' calling tpc_finish(' in line:
-            if cid in transactions:
-                cid = connidre.search(line).group(1)
-                transactions[cid] += time(line),
-        elif ' tpc_finish returns ' in line:
-            if cid in transactions:
-                t1, t2, vs, t3 = transactions[cid]
-                t = time(line)
-                d = sub(t1, t)
-                if d >= thresh:
-                    print 'c', t1, cid, sub(t1, t2), vs, sub(t2, t3), sub(t3, t)
-                del transactions[cid]
-
-
-newobre = re.compile(r"storea\(.*, '\\x00\\x00\\x00\\x00\\x00")
-def time_trans(f):
-    f, thresh = f
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    thresh = float(thresh)
-    transactions = {}
-
-    for line in f:
-        line = line.strip()
-
-        if ' calling tpc_begin(' in line:
-            cid = connidre.search(line).group(1)
-            transactions[cid] = time(line), [0, 0]
-        if ' calling storea(' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                transactions[cid][1][0] += 1
-                if not newobre.search(line):
-                    transactions[cid][1][1] += 1
-
-        elif ' calling vote(' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                transactions[cid] += time(line),
-        elif ' vote returns None' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                transactions[cid] += time(line), 'n'
-        elif ' vote() raised' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                transactions[cid] += time(line), 'e'
-        elif ' vote returns ' in line:
-            # delayed, skip
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                transactions[cid] += time(line), 'd'
-        elif ' calling tpc_abort(' in line:
-            cid = connidre.search(line).group(1)
-            if cid in transactions:
-                try:
-                    t0, (stores, old), t1, t2, vs = transactions[cid]
-                except ValueError:
-                    pass
-                else:
-                    t = time(line)
-                    d = sub(t1, t)
-                    if d >= thresh:
-                        print t1, cid, "%s/%s" % (stores, old), \
-                              sub(t0, t1), sub(t1, t2), vs, \
-                              sub(t2, t), 'abort'
-                del transactions[cid]
-        elif ' calling tpc_finish(' in line:
-            if cid in transactions:
-                cid = connidre.search(line).group(1)
-                transactions[cid] += time(line),
-        elif ' tpc_finish returns ' in line:
-            if cid in transactions:
-                t0, (stores, old), t1, t2, vs, t3 = transactions[cid]
-                t = time(line)
-                d = sub(t1, t)
-                if d >= thresh:
-                    print t1, cid, "%s/%s" % (stores, old), \
-                          sub(t0, t1), sub(t1, t2), vs, \
-                          sub(t2, t3), sub(t3, t)
-                del transactions[cid]
-
-def minute(f, slice=16, detail=1, summary=1):
-    f, = f
-
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    cols = ["time", "reads", "stores", "commits", "aborts", "txns"]
-    fmt = "%18s %6s %6s %7s %6s %6s"
-    print fmt % cols
-    print fmt % ["-"*len(col) for col in cols]
-
-    mlast = r = s = c = a = cl = None
-    rs = []
-    ss = []
-    cs = []
-    as = []
-    ts = []
-    cls = []
-
-    for line in f:
-        line = line.strip()
-        if (line.find('returns') > 0
-            or line.find('storea') > 0
-            or line.find('tpc_abort') > 0
-            ):
-            client = connidre.search(line).group(1)
-            m = line[:slice]
-            if m != mlast:
-                if mlast:
-                    if detail:
-                        print fmt % (mlast, len(cl), r, s, c, a, a+c)
-                    cls.append(len(cl))
-                    rs.append(r)
-                    ss.append(s)
-                    cs.append(c)
-                    as.append(a)
-                    ts.append(c+a)
-                mlast = m
-                r = s = c = a = 0
-                cl = {}
-            if line.find('zeoLoad') > 0:
-                r += 1
-                cl[client] = 1
-            elif line.find('storea') > 0:
-                s += 1
-                cl[client] = 1
-            elif line.find('tpc_finish') > 0:
-                c += 1
-                cl[client] = 1
-            elif line.find('tpc_abort') > 0:
-                a += 1
-                cl[client] = 1
-
-    if mlast:
-        if detail:
-            print fmt % (mlast, len(cl), r, s, c, a, a+c)
-        cls.append(len(cl))
-        rs.append(r)
-        ss.append(s)
-        cs.append(c)
-        as.append(a)
-        ts.append(c+a)
-
-    if summary:
-        print
-        print 'Summary:     \t', '\t'.join(('min', '10%', '25%', 'med',
-                                            '75%', '90%', 'max', 'mean'))
-        print "n=%6d\t" % len(cls), '-'*62
-        print 'Clients: \t', '\t'.join(map(str,stats(cls)))
-        print 'Reads:   \t', '\t'.join(map(str,stats( rs)))
-        print 'Stores:  \t', '\t'.join(map(str,stats( ss)))
-        print 'Commits: \t', '\t'.join(map(str,stats( cs)))
-        print 'Aborts:  \t', '\t'.join(map(str,stats( as)))
-        print 'Trans:   \t', '\t'.join(map(str,stats( ts)))
-
-def stats(s):
-    s.sort()
-    min = s[0]
-    max = s[-1]
-    n = len(s)
-    out = [min]
-    ni = n + 1
-    for p in .1, .25, .5, .75, .90:
-        lp = ni*p
-        l = int(lp)
-        if lp < 1 or lp > n:
-            out.append('-')
-        elif abs(lp-l) < .00001:
-            out.append(s[l-1])
-        else:
-            out.append(int(s[l-1] + (lp - l) * (s[l] - s[l-1])))
-
-    mean = 0.0
-    for v in s:
-        mean += v
-
-    out.extend([max, int(mean/n)])
-
-    return out
-
-def minutes(f):
-    minute(f, 16, detail=0)
-
-def hour(f):
-    minute(f, 13)
-
-def day(f):
-    minute(f, 10)
-
-def hours(f):
-    minute(f, 13, detail=0)
-
-def days(f):
-    minute(f, 10, detail=0)
-
-
-new_connection_idre = re.compile(r"new connection \('(\d+.\d+.\d+.\d+)', (\d+)\):")
-def verify(f):
-    f, = f
-
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    t1 = None
-    nv = {}
-    for line in f:
-        if line.find('new connection') > 0:
-            m = new_connection_idre.search(line)
-            cid = "%s:%s" % (m.group(1), m.group(2))
-            nv[cid] = [time(line), 0]
-        elif line.find('calling zeoVerify(') > 0:
-            cid = connidre.search(line).group(1)
-            nv[cid][1] += 1
-        elif line.find('calling endZeoVerify()') > 0:
-            cid = connidre.search(line).group(1)
-            t1, n = nv[cid]
-            if n:
-                d = sub(t1, time(line))
-                print cid, t1, n, d, n and (d*1000.0/n) or '-'
-
-def recovery(f):
-    f, = f
-
-    if f == '-':
-        f = sys.stdin
-    else:
-        f = xopen(f)
-
-    last = ''
-    trans = []
-    n = 0
-    for line in f:
-        n += 1
-        if line.find('RecoveryServer') < 0:
-            continue
-        l = line.find('sending transaction ')
-        if l > 0 and last.find('sending transaction ') > 0:
-            trans.append(line[l+20:].strip())
-        else:
-            if trans:
-                if len(trans) > 1:
-                    print "  ... %s similar records skipped ..." % (
-                        len(trans) - 1)
-                    print n, last.strip()
-                trans=[]
-            print n, line.strip()
-        last = line
-
-    if len(trans) > 1:
-        print "  ... %s similar records skipped ..." % (
-            len(trans) - 1)
-        print n, last.strip()
-
-
-
-if __name__ == '__main__':
-    globals()[sys.argv[1]](sys.argv[2:])
diff --git a/branches/bug1734/src/scripts/zeoup.py b/branches/bug1734/src/scripts/zeoup.py
deleted file mode 100755
index 772cbefe..00000000
--- a/branches/bug1734/src/scripts/zeoup.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/env python2.3
-
-"""Make sure a ZEO server is running.
-
-usage: zeoup.py [options]
-
-The test will connect to a ZEO server, load the root object, and attempt to
-update the zeoup counter in the root.  It will report success if it updates
-the counter or if it gets a ConflictError.  A ConflictError is considered a
-success, because the client was able to start a transaction.
-
-Options:
-
-    -p port -- port to connect to
-
-    -h host -- host to connect to (default is current host)
-
-    -S storage -- storage name (default '1')
-
-    -U path -- Unix-domain socket to connect to
-
-    --nowrite -- Do not update the zeoup counter.
-
-    -1 -- Connect to a ZEO 1.0 server.
-
-You must specify either -p and -h or -U.
-"""
-
-import getopt
-import socket
-import sys
-import time
-
-from persistent.mapping import PersistentMapping
-import transaction
-
-import ZODB
-from ZODB.POSException import ConflictError
-from ZODB.tests.MinPO import MinPO
-from ZEO.ClientStorage import ClientStorage
-
-ZEO_VERSION = 2
-
-def check_server(addr, storage, write):
-    t0 = time.time()
-    if ZEO_VERSION == 2:
-        # TODO:  should do retries w/ exponential backoff.
-        cs = ClientStorage(addr, storage=storage, wait=0,
-                           read_only=(not write))
-    else:
-        cs = ClientStorage(addr, storage=storage, debug=1,
-                           wait_for_server_on_startup=1)
-    # _startup() is an artifact of the way ZEO 1.0 works.  The
-    # ClientStorage doesn't get fully initialized until registerDB()
-    # is called.  The only thing we care about, though, is that
-    # registerDB() calls _startup().
-
-    if write:
-        db = ZODB.DB(cs)
-        cn = db.open()
-        root = cn.root()
-        try:
-            # We store the data in a special `monitor' dict under the root,
-            # where other tools may also store such heartbeat and bookkeeping
-            # type data.
-            monitor = root.get('monitor')
-            if monitor is None:
-                monitor = root['monitor'] = PersistentMapping()
-            obj = monitor['zeoup'] = monitor.get('zeoup', MinPO(0))
-            obj.value += 1
-            transaction.commit()
-        except ConflictError:
-            pass
-        cn.close()
-        db.close()
-    else:
-        data, serial = cs.load("\0\0\0\0\0\0\0\0", "")
-        cs.close()
-    t1 = time.time()
-    print "Elapsed time: %.2f" % (t1 - t0)
-
-def usage(exit=1):
-    print __doc__
-    print " ".join(sys.argv)
-    sys.exit(exit)
-
-def main():
-    host = None
-    port = None
-    unix = None
-    write = 1
-    storage = '1'
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], 'p:h:U:S:1',
-                                   ['nowrite'])
-        for o, a in opts:
-            if o == '-p':
-                port = int(a)
-            elif o == '-h':
-                host = a
-            elif o == '-U':
-                unix = a
-            elif o == '-S':
-                storage = a
-            elif o == '--nowrite':
-                write = 0
-            elif o == '-1':
-                ZEO_VERSION = 1
-    except Exception, err:
-        s = str(err)
-        if s:
-            s = ": " + s
-        print err.__class__.__name__ + s
-        usage()
-
-    if unix is not None:
-        addr = unix
-    else:
-        if host is None:
-            host = socket.gethostname()
-        if port is None:
-            usage()
-        addr = host, port
-
-    check_server(addr, storage, write)
-
-if __name__ == "__main__":
-    try:
-        main()
-    except SystemExit:
-        raise
-    except Exception, err:
-        s = str(err)
-        if s:
-            s = ": " + s
-        print err.__class__.__name__ + s
-        sys.exit(1)
diff --git a/branches/bug1734/src/scripts/zodbload.py b/branches/bug1734/src/scripts/zodbload.py
deleted file mode 100644
index 6318385a..00000000
--- a/branches/bug1734/src/scripts/zodbload.py
+++ /dev/null
@@ -1,842 +0,0 @@
-#!/usr/bin/env python2.3
-
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test script for testing ZODB under a heavy zope-like load.
-
-Note that, to be as realistic as possible with ZEO, you should run this
-script multiple times, to simulate multiple clients.
-
-Here's how this works.
-
-The script starts some number of threads.  Each thread, sequentially
-executes jobs.  There is a job producer that produces jobs.
-
-Input data are provided by a mail producer that hands out message from
-a mailbox.
-
-Execution continues until there is an error, which will normally occur
-when the mailbox is exhausted.
-
-Command-line options are used to provide job definitions. Job
-definitions have perameters of the form name=value.  Jobs have 2
-standard parameters:
-
-  frequency=integer
-
-     The frequency of the job. The default is 1.
-
-  sleep=float
-
-     The number os seconds to sleep before performing the job. The
-     default is 0.
-
-Usage: loadmail2 [options]
-
-  Options:
-
-    -edit [frequency=integer] [sleep=float]
-
-       Define an edit job. An edit job edits a random already-saved
-       email message, deleting and inserting a random number of words.
-
-       After editing the message, the message is (re)cataloged.
-
-    -insert [number=int] [frequency=integer] [sleep=float]
-
-       Insert some number of email messages.
-
-    -index [number=int] [frequency=integer] [sleep=float]
-
-       Insert and index (catalog) some number of email messages.
-
-    -search [terms='word1 word2 ...'] [frequency=integer] [sleep=float]
-
-       Search the catalog. A query is givem with one or more terms as
-       would be entered into a typical seach box.  If no query is
-       given, then queries will be randomly selected based on a set of
-       built-in word list.
-
-    -setup
-
-       Set up the database. This will delete any existing Data.fs
-       file.  (Of course, this may have no effect, if there is a
-       custom_zodb that defined a different storage.) It also adds a
-       mail folder and a catalog.
-
-    -options file
-
-       Read options from the given file. Th efile should be a python
-       source file that defines a sequence of options named 'options'.
-
-    -threads n
-
-       Specify the number of threads to execute. If not specified (< 2),
-       then jobs are run in a single (main) thread.
-
-    -mbox filename
-
-       Specify the mailbox for getting input data.
-
-       There is a (lame) syntax for providing options within the
-       filename. The filename may be followed by up to 3 integers,
-       min, max, and start:
-
-         -mbox 'foo.mbox 0 100 10000'
-
-       The messages from min to max will be read from the mailbox.
-       They will be assigned message numbers starting with start.
-       So, in the example above, we read the first hundred messages
-       and assign thgem message numbers starting with 10001.
-
-       The maxmum can be given as a negative number, in which case, it
-       specifies the number of messages to read.
-
-       The start defaults to the minimum. The following two options:
-
-         -mbox 'foo.mbox 300 400 300'
-
-       and
-
-         -mbox 'foo.mbox 300 -100'
-
-       are equivalent
-
-$Id$
-"""
-
-import mailbox
-import math
-import os
-import random
-import re
-import sys
-import threading
-import time
-import transaction
-
-class JobProducer:
-
-    def __init__(self):
-        self.jobs = []
-
-    def add(self, callable, frequency, sleep, repeatp=0):
-        self.jobs.extend([(callable, sleep, repeatp)] * int(frequency))
-        random.shuffle(self.jobs)
-
-    def next(self):
-        factory, sleep, repeatp = random.choice(self.jobs)
-        time.sleep(sleep)
-        callable, args = factory.create()
-        return factory, callable, args, repeatp
-
-    def __nonzero__(self):
-        return not not self.jobs
-
-
-
-class MBox:
-
-    def __init__(self, filename):
-        if ' ' in filename:
-            filename = filename.split()
-            if len(filename) < 4:
-                filename += [0, 0, -1][-(4-len(filename)):]
-            filename, min, max, start = filename
-            min = int(min)
-            max = int(max)
-            start = int(start)
-
-            if start < 0:
-                start = min
-
-            if max < 0:
-                # negative max is treated as a count
-                self._max = start - max
-            elif max > 0:
-                self._max = start + max - min
-            else:
-                self._max = 0
-
-        else:
-            self._max = 0
-            min = start = 0
-
-        if filename.endswith('.bz2'):
-            f = os.popen("bunzip2 <"+filename, 'r')
-            filename = filename[-4:]
-        else:
-            f = open(filename)
-
-        self._mbox = mb = mailbox.UnixMailbox(f)
-
-        self.number = start
-        while min:
-            mb.next()
-            min -= 1
-
-        self._lock = threading.Lock()
-        self.__name__ = os.path.splitext(os.path.split(filename)[1])[0]
-        self._max = max
-
-    def next(self):
-        self._lock.acquire()
-        try:
-            if self._max > 0 and self.number >= self._max:
-                raise IndexError(self.number + 1)
-            message = self._mbox.next()
-            message.body = message.fp.read()
-            message.headers = list(message.headers)
-            self.number += 1
-            message.number = self.number
-            message.mbox = self.__name__
-            return message
-        finally:
-            self._lock.release()
-
-bins = 9973
-#bins = 11
-def mailfolder(app, mboxname, number):
-    mail = getattr(app, mboxname, None)
-    if mail is None:
-        app.manage_addFolder(mboxname)
-        mail = getattr(app, mboxname)
-        from BTrees.Length import Length
-        mail.length = Length()
-        for i in range(bins):
-            mail.manage_addFolder('b'+str(i))
-    bin = hash(str(number))%bins
-    return getattr(mail, 'b'+str(bin))
-
-
-def VmSize():
-
-    try:
-        f = open('/proc/%s/status' % os.getpid())
-    except:
-        return 0
-    else:
-        l = filter(lambda l: l[:7] == 'VmSize:', f.readlines())
-        if l:
-            l = l[0][7:].strip().split()[0]
-            return int(l)
-    return 0
-
-def setup(lib_python):
-    try:
-        os.remove(os.path.join(lib_python, '..', '..', 'var', 'Data.fs'))
-    except:
-        pass
-    import Zope2
-    import Products
-    import AccessControl.SecurityManagement
-    app=Zope2.app()
-
-    Products.ZCatalog.ZCatalog.manage_addZCatalog(app, 'cat', '')
-
-    from Products.ZCTextIndex.ZCTextIndex import PLexicon
-    from Products.ZCTextIndex.Lexicon import Splitter, CaseNormalizer
-
-    app.cat._setObject('lex',
-                       PLexicon('lex', '', Splitter(), CaseNormalizer())
-                       )
-
-    class extra:
-        doc_attr = 'PrincipiaSearchSource'
-        lexicon_id = 'lex'
-        index_type = 'Okapi BM25 Rank'
-
-    app.cat.addIndex('PrincipiaSearchSource', 'ZCTextIndex', extra)
-
-    transaction.commit()
-
-    system = AccessControl.SpecialUsers.system
-    AccessControl.SecurityManagement.newSecurityManager(None, system)
-
-    app._p_jar.close()
-
-def do(db, f, args):
-    """Do something in a transaction, retrying of necessary
-
-    Measure the speed of both the compurartion and the commit
-    """
-    from ZODB.POSException import ConflictError
-    wcomp = ccomp = wcommit = ccommit = 0.0
-    rconflicts = wconflicts = 0
-    start = time.time()
-
-    while 1:
-        connection = db.open()
-        try:
-            transaction.begin()
-            t=time.time()
-            c=time.clock()
-            try:
-                try:
-                    r = f(connection, *args)
-                except ConflictError:
-                    rconflicts += 1
-                    transaction.abort()
-                    continue
-            finally:
-                wcomp += time.time() - t
-                ccomp += time.clock() - c
-
-            t=time.time()
-            c=time.clock()
-            try:
-                try:
-                    transaction.commit()
-                    break
-                except ConflictError:
-                    wconflicts += 1
-                    transaction.abort()
-                    continue
-            finally:
-                wcommit += time.time() - t
-                ccommit += time.clock() - c
-        finally:
-            connection.close()
-
-    return start, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit, r
-
-def run1(tid, db, factory, job, args):
-    (start, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit, r
-     ) = do(db, job, args)
-    start = "%.4d-%.2d-%.2d %.2d:%.2d:%.2d" % time.localtime(start)[:6]
-    print "%s %s %8.3g %8.3g %s %s\t%8.3g %8.3g %s %r" % (
-        start, tid, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit,
-        factory.__name__, r)
-
-def run(jobs, tid=''):
-    import Zope2
-    while 1:
-        factory, job, args, repeatp = jobs.next()
-        run1(tid, Zope2.DB, factory, job, args)
-        if repeatp:
-            while 1:
-                i = random.randint(0,100)
-                if i > repeatp:
-                    break
-                run1(tid, Zope2.DB, factory, job, args)
-
-
-def index(connection, messages, catalog, max):
-    app = connection.root()['Application']
-    for message in messages:
-        mail = mailfolder(app, message.mbox, message.number)
-
-        if max:
-            # Cheat and use folder implementation secrets
-            # to avoid having to read the old data
-            _objects = mail._objects
-            if len(_objects) >= max:
-                for d in _objects[:len(_objects)-max+1]:
-                    del mail.__dict__[d['id']]
-                mail._objects = _objects[len(_objects)-max+1:]
-
-        docid = 'm'+str(message.number)
-        mail.manage_addDTMLDocument(docid, file=message.body)
-
-        # increment counted
-        getattr(app, message.mbox).length.change(1)
-
-        doc = mail[docid]
-        for h in message.headers:
-            h = h.strip()
-            l = h.find(':')
-            if l <= 0:
-                continue
-            name = h[:l].lower()
-            if name=='subject':
-                name='title'
-            v = h[l+1:].strip()
-            type='string'
-
-            if name=='title':
-                doc.manage_changeProperties(title=h)
-            else:
-                try:
-                    doc.manage_addProperty(name, v, type)
-                except:
-                    pass
-        if catalog:
-            app.cat.catalog_object(doc)
-
-    return message.number
-
-class IndexJob:
-    needs_mbox = 1
-    catalog = 1
-    prefix = 'index'
-
-    def __init__(self, mbox, number=1, max=0):
-        self.__name__ = "%s%s_%s" % (self.prefix, number, mbox.__name__)
-        self.mbox, self.number, self.max = mbox, int(number), int(max)
-
-    def create(self):
-        messages = [self.mbox.next() for i in range(self.number)]
-        return index, (messages, self.catalog, self.max)
-
-
-class InsertJob(IndexJob):
-    catalog = 0
-    prefix = 'insert'
-
-wordre = re.compile(r'(\w{3,20})')
-stop = 'and', 'not'
-def edit(connection, mbox, catalog=1):
-    app = connection.root()['Application']
-    mail = getattr(app, mbox.__name__, None)
-    if mail is None:
-        time.sleep(1)
-        return "No mailbox %s" % mbox.__name__
-
-    nmessages = mail.length()
-    if nmessages < 2:
-        time.sleep(1)
-        return "No messages to edit in %s" % mbox.__name__
-
-    # find a message to edit:
-    while 1:
-        number = random.randint(1, nmessages-1)
-        did = 'm' + str(number)
-
-        mail = mailfolder(app, mbox.__name__, number)
-        doc = getattr(mail, did, None)
-        if doc is not None:
-            break
-
-    text = doc.raw.split()
-    norig = len(text)
-    if norig > 10:
-        ndel = int(math.exp(random.randint(0, int(math.log(norig)))))
-        nins = int(math.exp(random.randint(0, int(math.log(norig)))))
-    else:
-        ndel = 0
-        nins = 10
-
-    for j in range(ndel):
-        j = random.randint(0,len(text)-1)
-        word = text[j]
-        m = wordre.search(word)
-        if m:
-            word = m.group(1).lower()
-            if (not wordsd.has_key(word)) and word not in stop:
-                words.append(word)
-                wordsd[word] = 1
-        del text[j]
-
-    for j in range(nins):
-        word = random.choice(words)
-        text.append(word)
-
-    doc.raw = ' '.join(text)
-
-    if catalog:
-        app.cat.catalog_object(doc)
-
-    return norig, ndel, nins
-
-class EditJob:
-    needs_mbox = 1
-    prefix = 'edit'
-    catalog = 1
-
-    def __init__(self, mbox):
-        self.__name__ = "%s_%s" % (self.prefix, mbox.__name__)
-        self.mbox = mbox
-
-    def create(self):
-        return edit, (self.mbox, self.catalog)
-
-class ModifyJob(EditJob):
-    prefix = 'modify'
-    catalog = 0
-
-
-def search(connection, terms, number):
-    app = connection.root()['Application']
-    cat = app.cat
-    n = 0
-
-    for i in number:
-        term = random.choice(terms)
-
-        results = cat(PrincipiaSearchSource=term)
-        n += len(results)
-        for result in results:
-            obj = result.getObject()
-            # Apparently, there is a bug in Zope that leads obj to be None
-            # on occasion.
-            if obj is not None:
-                obj.getId()
-
-    return n
-
-class SearchJob:
-
-    def __init__(self, terms='', number=10):
-
-        if terms:
-            terms = terms.split()
-            self.__name__ = "search_" + '_'.join(terms)
-            self.terms = terms
-        else:
-            self.__name__ = 'search'
-            self.terms = words
-
-        number = min(int(number), len(self.terms))
-        self.number = range(number)
-
-    def create(self):
-        return search, (self.terms, self.number)
-
-
-words=['banishment', 'indirectly', 'imprecise', 'peeks',
-'opportunely', 'bribe', 'sufficiently', 'Occidentalized', 'elapsing',
-'fermenting', 'listen', 'orphanage', 'younger', 'draperies', 'Ida',
-'cuttlefish', 'mastermind', 'Michaels', 'populations', 'lent',
-'cater', 'attentional', 'hastiness', 'dragnet', 'mangling',
-'scabbards', 'princely', 'star', 'repeat', 'deviation', 'agers',
-'fix', 'digital', 'ambitious', 'transit', 'jeeps', 'lighted',
-'Prussianizations', 'Kickapoo', 'virtual', 'Andrew', 'generally',
-'boatsman', 'amounts', 'promulgation', 'Malay', 'savaging',
-'courtesan', 'nursed', 'hungered', 'shiningly', 'ship', 'presides',
-'Parke', 'moderns', 'Jonas', 'unenlightening', 'dearth', 'deer',
-'domesticates', 'recognize', 'gong', 'penetrating', 'dependents',
-'unusually', 'complications', 'Dennis', 'imbalances', 'nightgown',
-'attached', 'testaments', 'congresswoman', 'circuits', 'bumpers',
-'braver', 'Boreas', 'hauled', 'Howe', 'seethed', 'cult', 'numismatic',
-'vitality', 'differences', 'collapsed', 'Sandburg', 'inches', 'head',
-'rhythmic', 'opponent', 'blanketer', 'attorneys', 'hen', 'spies',
-'indispensably', 'clinical', 'redirection', 'submit', 'catalysts',
-'councilwoman', 'kills', 'topologies', 'noxious', 'exactions',
-'dashers', 'balanced', 'slider', 'cancerous', 'bathtubs', 'legged',
-'respectably', 'crochets', 'absenteeism', 'arcsine', 'facility',
-'cleaners', 'bobwhite', 'Hawkins', 'stockade', 'provisional',
-'tenants', 'forearms', 'Knowlton', 'commit', 'scornful',
-'pediatrician', 'greets', 'clenches', 'trowels', 'accepts',
-'Carboloy', 'Glenn', 'Leigh', 'enroll', 'Madison', 'Macon', 'oiling',
-'entertainingly', 'super', 'propositional', 'pliers', 'beneficiary',
-'hospitable', 'emigration', 'sift', 'sensor', 'reserved',
-'colonization', 'shrilled', 'momentously', 'stevedore', 'Shanghaiing',
-'schoolmasters', 'shaken', 'biology', 'inclination', 'immoderate',
-'stem', 'allegory', 'economical', 'daytime', 'Newell', 'Moscow',
-'archeology', 'ported', 'scandals', 'Blackfoot', 'leery', 'kilobit',
-'empire', 'obliviousness', 'productions', 'sacrificed', 'ideals',
-'enrolling', 'certainties', 'Capsicum', 'Brookdale', 'Markism',
-'unkind', 'dyers', 'legislates', 'grotesquely', 'megawords',
-'arbitrary', 'laughing', 'wildcats', 'thrower', 'sex', 'devils',
-'Wehr', 'ablates', 'consume', 'gossips', 'doorways', 'Shari',
-'advanced', 'enumerable', 'existentially', 'stunt', 'auctioneers',
-'scheduler', 'blanching', 'petulance', 'perceptibly', 'vapors',
-'progressed', 'rains', 'intercom', 'emergency', 'increased',
-'fluctuating', 'Krishna', 'silken', 'reformed', 'transformation',
-'easter', 'fares', 'comprehensible', 'trespasses', 'hallmark',
-'tormenter', 'breastworks', 'brassiere', 'bladders', 'civet', 'death',
-'transformer', 'tolerably', 'bugle', 'clergy', 'mantels', 'satin',
-'Boswellizes', 'Bloomington', 'notifier', 'Filippo', 'circling',
-'unassigned', 'dumbness', 'sentries', 'representativeness', 'souped',
-'Klux', 'Kingstown', 'gerund', 'Russell', 'splices', 'bellow',
-'bandies', 'beefers', 'cameramen', 'appalled', 'Ionian', 'butterball',
-'Portland', 'pleaded', 'admiringly', 'pricks', 'hearty', 'corer',
-'deliverable', 'accountably', 'mentors', 'accorded',
-'acknowledgement', 'Lawrenceville', 'morphology', 'eucalyptus',
-'Rena', 'enchanting', 'tighter', 'scholars', 'graduations', 'edges',
-'Latinization', 'proficiency', 'monolithic', 'parenthesizing', 'defy',
-'shames', 'enjoyment', 'Purdue', 'disagrees', 'barefoot', 'maims',
-'flabbergast', 'dishonorable', 'interpolation', 'fanatics', 'dickens',
-'abysses', 'adverse', 'components', 'bowl', 'belong', 'Pipestone',
-'trainees', 'paw', 'pigtail', 'feed', 'whore', 'conditioner',
-'Volstead', 'voices', 'strain', 'inhabits', 'Edwin', 'discourses',
-'deigns', 'cruiser', 'biconvex', 'biking', 'depreciation', 'Harrison',
-'Persian', 'stunning', 'agar', 'rope', 'wagoner', 'elections',
-'reticulately', 'Cruz', 'pulpits', 'wilt', 'peels', 'plants',
-'administerings', 'deepen', 'rubs', 'hence', 'dissension', 'implored',
-'bereavement', 'abyss', 'Pennsylvania', 'benevolent', 'corresponding',
-'Poseidon', 'inactive', 'butchers', 'Mach', 'woke', 'loading',
-'utilizing', 'Hoosier', 'undo', 'Semitization', 'trigger', 'Mouthe',
-'mark', 'disgracefully', 'copier', 'futility', 'gondola', 'algebraic',
-'lecturers', 'sponged', 'instigators', 'looted', 'ether', 'trust',
-'feeblest', 'sequencer', 'disjointness', 'congresses', 'Vicksburg',
-'incompatibilities', 'commend', 'Luxembourg', 'reticulation',
-'instructively', 'reconstructs', 'bricks', 'attache', 'Englishman',
-'provocation', 'roughen', 'cynic', 'plugged', 'scrawls', 'antipode',
-'injected', 'Daedalus', 'Burnsides', 'asker', 'confronter',
-'merriment', 'disdain', 'thicket', 'stinker', 'great', 'tiers',
-'oust', 'antipodes', 'Macintosh', 'tented', 'packages',
-'Mediterraneanize', 'hurts', 'orthodontist', 'seeder', 'readying',
-'babying', 'Florida', 'Sri', 'buckets', 'complementary',
-'cartographer', 'chateaus', 'shaves', 'thinkable', 'Tehran',
-'Gordian', 'Angles', 'arguable', 'bureau', 'smallest', 'fans',
-'navigated', 'dipole', 'bootleg', 'distinctive', 'minimization',
-'absorbed', 'surmised', 'Malawi', 'absorbent', 'close', 'conciseness',
-'hopefully', 'declares', 'descent', 'trick', 'portend', 'unable',
-'mildly', 'Morse', 'reference', 'scours', 'Caribbean', 'battlers',
-'astringency', 'likelier', 'Byronizes', 'econometric', 'grad',
-'steak', 'Austrian', 'ban', 'voting', 'Darlington', 'bison', 'Cetus',
-'proclaim', 'Gilbertson', 'evictions', 'submittal', 'bearings',
-'Gothicizer', 'settings', 'McMahon', 'densities', 'determinants',
-'period', 'DeKastere', 'swindle', 'promptness', 'enablers', 'wordy',
-'during', 'tables', 'responder', 'baffle', 'phosgene', 'muttering',
-'limiters', 'custodian', 'prevented', 'Stouffer', 'waltz', 'Videotex',
-'brainstorms', 'alcoholism', 'jab', 'shouldering', 'screening',
-'explicitly', 'earner', 'commandment', 'French', 'scrutinizing',
-'Gemma', 'capacitive', 'sheriff', 'herbivore', 'Betsey', 'Formosa',
-'scorcher', 'font', 'damming', 'soldiers', 'flack', 'Marks',
-'unlinking', 'serenely', 'rotating', 'converge', 'celebrities',
-'unassailable', 'bawling', 'wording', 'silencing', 'scotch',
-'coincided', 'masochists', 'graphs', 'pernicious', 'disease',
-'depreciates', 'later', 'torus', 'interject', 'mutated', 'causer',
-'messy', 'Bechtel', 'redundantly', 'profoundest', 'autopsy',
-'philosophic', 'iterate', 'Poisson', 'horridly', 'silversmith',
-'millennium', 'plunder', 'salmon', 'missioner', 'advances', 'provers',
-'earthliness', 'manor', 'resurrectors', 'Dahl', 'canto', 'gangrene',
-'gabler', 'ashore', 'frictionless', 'expansionism', 'emphasis',
-'preservations', 'Duane', 'descend', 'isolated', 'firmware',
-'dynamites', 'scrawled', 'cavemen', 'ponder', 'prosperity', 'squaw',
-'vulnerable', 'opthalmic', 'Simms', 'unite', 'totallers', 'Waring',
-'enforced', 'bridge', 'collecting', 'sublime', 'Moore', 'gobble',
-'criticizes', 'daydreams', 'sedate', 'apples', 'Concordia',
-'subsequence', 'distill', 'Allan', 'seizure', 'Isadore', 'Lancashire',
-'spacings', 'corresponded', 'hobble', 'Boonton', 'genuineness',
-'artifact', 'gratuities', 'interviewee', 'Vladimir', 'mailable',
-'Bini', 'Kowalewski', 'interprets', 'bereave', 'evacuated', 'friend',
-'tourists', 'crunched', 'soothsayer', 'fleetly', 'Romanizations',
-'Medicaid', 'persevering', 'flimsy', 'doomsday', 'trillion',
-'carcasses', 'guess', 'seersucker', 'ripping', 'affliction',
-'wildest', 'spokes', 'sheaths', 'procreate', 'rusticates', 'Schapiro',
-'thereafter', 'mistakenly', 'shelf', 'ruination', 'bushel',
-'assuredly', 'corrupting', 'federation', 'portmanteau', 'wading',
-'incendiary', 'thing', 'wanderers', 'messages', 'Paso', 'reexamined',
-'freeings', 'denture', 'potting', 'disturber', 'laborer', 'comrade',
-'intercommunicating', 'Pelham', 'reproach', 'Fenton', 'Alva', 'oasis',
-'attending', 'cockpit', 'scout', 'Jude', 'gagging', 'jailed',
-'crustaceans', 'dirt', 'exquisitely', 'Internet', 'blocker', 'smock',
-'Troutman', 'neighboring', 'surprise', 'midscale', 'impart',
-'badgering', 'fountain', 'Essen', 'societies', 'redresses',
-'afterwards', 'puckering', 'silks', 'Blakey', 'sequel', 'greet',
-'basements', 'Aubrey', 'helmsman', 'album', 'wheelers', 'easternmost',
-'flock', 'ambassadors', 'astatine', 'supplant', 'gird', 'clockwork',
-'foxes', 'rerouting', 'divisional', 'bends', 'spacer',
-'physiologically', 'exquisite', 'concerts', 'unbridled', 'crossing',
-'rock', 'leatherneck', 'Fortescue', 'reloading', 'Laramie', 'Tim',
-'forlorn', 'revert', 'scarcer', 'spigot', 'equality', 'paranormal',
-'aggrieves', 'pegs', 'committeewomen', 'documented', 'interrupt',
-'emerald', 'Battelle', 'reconverted', 'anticipated', 'prejudices',
-'drowsiness', 'trivialities', 'food', 'blackberries', 'Cyclades',
-'tourist', 'branching', 'nugget', 'Asilomar', 'repairmen', 'Cowan',
-'receptacles', 'nobler', 'Nebraskan', 'territorial', 'chickadee',
-'bedbug', 'darted', 'vigilance', 'Octavia', 'summands', 'policemen',
-'twirls', 'style', 'outlawing', 'specifiable', 'pang', 'Orpheus',
-'epigram', 'Babel', 'butyrate', 'wishing', 'fiendish', 'accentuate',
-'much', 'pulsed', 'adorned', 'arbiters', 'counted', 'Afrikaner',
-'parameterizes', 'agenda', 'Americanism', 'referenda', 'derived',
-'liquidity', 'trembling', 'lordly', 'Agway', 'Dillon', 'propellers',
-'statement', 'stickiest', 'thankfully', 'autograph', 'parallel',
-'impulse', 'Hamey', 'stylistic', 'disproved', 'inquirer', 'hoisting',
-'residues', 'variant', 'colonials', 'dequeued', 'especial', 'Samoa',
-'Polaris', 'dismisses', 'surpasses', 'prognosis', 'urinates',
-'leaguers', 'ostriches', 'calculative', 'digested', 'divided',
-'reconfigurer', 'Lakewood', 'illegalities', 'redundancy',
-'approachability', 'masterly', 'cookery', 'crystallized', 'Dunham',
-'exclaims', 'mainline', 'Australianizes', 'nationhood', 'pusher',
-'ushers', 'paranoia', 'workstations', 'radiance', 'impedes',
-'Minotaur', 'cataloging', 'bites', 'fashioning', 'Alsop', 'servants',
-'Onondaga', 'paragraph', 'leadings', 'clients', 'Latrobe',
-'Cornwallis', 'excitingly', 'calorimetric', 'savior', 'tandem',
-'antibiotics', 'excuse', 'brushy', 'selfish', 'naive', 'becomes',
-'towers', 'popularizes', 'engender', 'introducing', 'possession',
-'slaughtered', 'marginally', 'Packards', 'parabola', 'utopia',
-'automata', 'deterrent', 'chocolates', 'objectives', 'clannish',
-'aspirin', 'ferociousness', 'primarily', 'armpit', 'handfuls',
-'dangle', 'Manila', 'enlivened', 'decrease', 'phylum', 'hardy',
-'objectively', 'baskets', 'chaired', 'Sepoy', 'deputy', 'blizzard',
-'shootings', 'breathtaking', 'sticking', 'initials', 'epitomized',
-'Forrest', 'cellular', 'amatory', 'radioed', 'horrified', 'Neva',
-'simultaneous', 'delimiter', 'expulsion', 'Himmler', 'contradiction',
-'Remus', 'Franklinizations', 'luggage', 'moisture', 'Jews',
-'comptroller', 'brevity', 'contradictions', 'Ohio', 'active',
-'babysit', 'China', 'youngest', 'superstition', 'clawing', 'raccoons',
-'chose', 'shoreline', 'helmets', 'Jeffersonian', 'papered',
-'kindergarten', 'reply', 'succinct', 'split', 'wriggle', 'suitcases',
-'nonce', 'grinders', 'anthem', 'showcase', 'maimed', 'blue', 'obeys',
-'unreported', 'perusing', 'recalculate', 'rancher', 'demonic',
-'Lilliputianize', 'approximation', 'repents', 'yellowness',
-'irritates', 'Ferber', 'flashlights', 'booty', 'Neanderthal',
-'someday', 'foregoes', 'lingering', 'cloudiness', 'guy', 'consumer',
-'Berkowitz', 'relics', 'interpolating', 'reappearing', 'advisements',
-'Nolan', 'turrets', 'skeletal', 'skills', 'mammas', 'Winsett',
-'wheelings', 'stiffen', 'monkeys', 'plainness', 'braziers', 'Leary',
-'advisee', 'jack', 'verb', 'reinterpret', 'geometrical', 'trolleys',
-'arboreal', 'overpowered', 'Cuzco', 'poetical', 'admirations',
-'Hobbes', 'phonemes', 'Newsweek', 'agitator', 'finally', 'prophets',
-'environment', 'easterners', 'precomputed', 'faults', 'rankly',
-'swallowing', 'crawl', 'trolley', 'spreading', 'resourceful', 'go',
-'demandingly', 'broader', 'spiders', 'Marsha', 'debris', 'operates',
-'Dundee', 'alleles', 'crunchier', 'quizzical', 'hanging', 'Fisk']
-
-wordsd = {}
-for word in words:
-    wordsd[word] = 1
-
-
-def collect_options(args, jobs, options):
-
-    while args:
-        arg = args.pop(0)
-        if arg.startswith('-'):
-            name = arg[1:]
-            if name == 'options':
-                fname = args.pop(0)
-                d = {}
-                execfile(fname, d)
-                collect_options(list(d['options']), jobs, options)
-            elif options.has_key(name):
-                v = args.pop(0)
-                if options[name] != None:
-                    raise ValueError(
-                        "Duplicate values for %s, %s and %s"
-                        % (name, v, options[name])
-                        )
-                options[name] = v
-            elif name == 'setup':
-                options['setup'] = 1
-            elif globals().has_key(name.capitalize()+'Job'):
-                job = name
-                kw = {}
-                while args and args[0].find("=") > 0:
-                    arg = args.pop(0).split('=')
-                    name, v = arg[0], '='.join(arg[1:])
-                    if kw.has_key(name):
-                        raise ValueError(
-                            "Duplicate parameter %s for job %s"
-                            % (name, job)
-                            )
-                    kw[name]=v
-                if kw.has_key('frequency'):
-                    frequency = kw['frequency']
-                    del kw['frequency']
-                else:
-                    frequency = 1
-
-                if kw.has_key('sleep'):
-                    sleep = float(kw['sleep'])
-                    del kw['sleep']
-                else:
-                    sleep = 0.0001
-
-                if kw.has_key('repeat'):
-                    repeatp = float(kw['repeat'])
-                    del kw['repeat']
-                else:
-                    repeatp = 0
-
-                jobs.append((job, kw, frequency, sleep, repeatp))
-            else:
-                raise ValueError("not an option or job", name)
-        else:
-            raise ValueError("Expected an option", arg)
-
-
-def find_lib_python():
-    for b in os.getcwd(), os.path.split(sys.argv[0])[0]:
-        for i in range(6):
-            d = ['..']*i + ['lib', 'python']
-            p = os.path.join(b, *d)
-            if os.path.isdir(p):
-                return p
-    raise ValueError("Couldn't find lib/python")
-
-def main(args=None):
-    lib_python = find_lib_python()
-    sys.path.insert(0, lib_python)
-
-    if args is None:
-        args = sys.argv[1:]
-    if not args:
-        print __doc__
-        sys.exit(0)
-
-    print args
-    random.seed(hash(tuple(args))) # always use the same for the given args
-
-    options = {"mbox": None, "threads": None}
-    jobdefs = []
-    collect_options(args, jobdefs, options)
-
-    mboxes = {}
-    if options["mbox"]:
-        mboxes[options["mbox"]] = MBox(options["mbox"])
-
-    # Perform a ZConfig-based Zope initialization:
-    zetup(os.path.join(lib_python, '..', '..', 'etc', 'zope.conf'))
-
-    if options.has_key('setup'):
-        setup(lib_python)
-    else:
-        import Zope2
-        Zope2.startup()
-
-    #from ThreadedAsync.LoopCallback import loop
-    #threading.Thread(target=loop, args=(), name='asyncore').start()
-
-    jobs = JobProducer()
-    for job, kw, frequency, sleep, repeatp in jobdefs:
-        Job = globals()[job.capitalize()+'Job']
-        if getattr(Job, 'needs_mbox', 0):
-            if not kw.has_key("mbox"):
-                if not options["mbox"]:
-                    raise ValueError(
-                        "no mailbox (mbox option) file  specified")
-                kw['mbox'] = mboxes[options["mbox"]]
-            else:
-                if not mboxes.has_key[kw["mbox"]]:
-                    mboxes[kw['mbox']] = MBox[kw['mbox']]
-                kw["mbox"] = mboxes[kw['mbox']]
-        jobs.add(Job(**kw), frequency, sleep, repeatp)
-
-    if not jobs:
-        print "No jobs to execute"
-        return
-
-    threads = int(options['threads'] or '0')
-    if threads > 1:
-        threads = [threading.Thread(target=run, args=(jobs, i), name=str(i))
-                   for i in range(threads)]
-        for thread in threads:
-            thread.start()
-        for thread in threads:
-            thread.join()
-    else:
-        run(jobs)
-
-
-def zetup(configfile_name):
-    from Zope.Startup.options import ZopeOptions
-    from Zope.Startup import handlers as h
-    from App import config
-    opts = ZopeOptions()
-    opts.configfile = configfile_name
-    opts.realize(args=[])
-    h.handleConfig(opts.configroot, opts.confighandlers)
-    config.setConfiguration(opts.configroot)
-    from Zope.Startup import dropPrivileges
-    dropPrivileges(opts.configroot)
-
-
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/transaction/DEPENDENCIES.cfg b/branches/bug1734/src/transaction/DEPENDENCIES.cfg
deleted file mode 100644
index 4247b32c..00000000
--- a/branches/bug1734/src/transaction/DEPENDENCIES.cfg
+++ /dev/null
@@ -1 +0,0 @@
-ZODB
diff --git a/branches/bug1734/src/transaction/README.txt b/branches/bug1734/src/transaction/README.txt
deleted file mode 100644
index 66dad1fe..00000000
--- a/branches/bug1734/src/transaction/README.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-This package is currently a facade of the ZODB.Transaction module.
-
-It exists to support:
-
-- Application code that uses the ZODB 4 transaction API
-
-- ZODB4-style data managers (transaction.interfaces.IDataManager)
-
-Note that the data manager API, transaction.interfaces.IDataManager,
-is syntactically simple, but semantically complex.  The semantics
-were not easy to express in the interface. This could probably use
-more work.  The semantics are presented in detail through examples of
-a sample data manager in transaction.tests.test_SampleDataManager.
-
diff --git a/branches/bug1734/src/transaction/__init__.py b/branches/bug1734/src/transaction/__init__.py
deleted file mode 100644
index e486028a..00000000
--- a/branches/bug1734/src/transaction/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-############################################################################
-#
-# Copyright (c) 2001, 2002, 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################
-"""Exported transaction functions.
-
-$Id$
-"""
-
-from transaction._transaction import Transaction
-from transaction._manager import TransactionManager, ThreadTransactionManager
-
-manager = ThreadTransactionManager()
-
-def get():
-    return manager.get()
-
-def begin():
-    return manager.begin()
-
-def commit(sub=False):
-    manager.get().commit(sub)
-
-def abort(sub=False):
-    manager.get().abort(sub)
-
-def get_transaction():
-    from ZODB.utils import deprecated36
-    deprecated36("   use transaction.get() instead of get_transaction()")
-    return get()
diff --git a/branches/bug1734/src/transaction/_manager.py b/branches/bug1734/src/transaction/_manager.py
deleted file mode 100644
index 893107d6..00000000
--- a/branches/bug1734/src/transaction/_manager.py
+++ /dev/null
@@ -1,112 +0,0 @@
-############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################
-"""A TransactionManager controls transaction boundaries.
-
-It coordinates application code and resource managers, so that they
-are associated with the right transaction.
-"""
-
-import thread
-
-from transaction._transaction import Transaction
-
-# We have to remember sets of synch objects, especially Connections.
-# But we don't want mere registration with a transaction manager to
-# keep a synch object alive forever; in particular, it's common
-# practice not to explicitly close Connection objects, and keeping
-# a Connection alive keeps a potentially huge number of other objects
-# alive (e.g., the cache, and everything reachable from it too).
-# Therefore we use "weak sets" internally.
-#
-# Obscure:  because of the __init__.py maze, we can't import WeakSet
-# at top level here.
-
-class TransactionManager(object):
-
-    def __init__(self):
-        from ZODB.utils import WeakSet
-
-        self._txn = None
-        self._synchs = WeakSet()
-
-    def begin(self):
-        if self._txn is not None:
-            self._txn.abort()
-        self._txn = Transaction(self._synchs, self)
-        return self._txn
-
-    def get(self):
-        if self._txn is None:
-            self._txn = Transaction(self._synchs, self)
-        return self._txn
-
-    def free(self, txn):
-        assert txn is self._txn
-        self._txn = None
-
-    def registerSynch(self, synch):
-        self._synchs.add(synch)
-
-    def unregisterSynch(self, synch):
-        self._synchs.remove(synch)
-
-class ThreadTransactionManager(object):
-    """Thread-aware transaction manager.
-
-    Each thread is associated with a unique transaction.
-    """
-
-    def __init__(self):
-        # _threads maps thread ids to transactions
-        self._txns = {}
-        # _synchs maps a thread id to a WeakSet of registered synchronizers.
-        # The WeakSet is passed to the Transaction constructor, because the
-        # latter needs to call the synchronizers when it commits.
-        self._synchs = {}
-
-    def begin(self):
-        tid = thread.get_ident()
-        txn = self._txns.get(tid)
-        if txn is not None:
-            txn.abort()
-        synchs = self._synchs.get(tid)
-        txn = self._txns[tid] = Transaction(synchs, self)
-        return txn
-
-    def get(self):
-        tid = thread.get_ident()
-        txn = self._txns.get(tid)
-        if txn is None:
-            synchs = self._synchs.get(tid)
-            txn = self._txns[tid] = Transaction(synchs, self)
-        return txn
-
-    def free(self, txn):
-        tid = thread.get_ident()
-        assert txn is self._txns.get(tid)
-        del self._txns[tid]
-
-    def registerSynch(self, synch):
-        from ZODB.utils import WeakSet
-
-        tid = thread.get_ident()
-        ws = self._synchs.get(tid)
-        if ws is None:
-            ws = self._synchs[tid] = WeakSet()
-        ws.add(synch)
-
-    def unregisterSynch(self, synch):
-        tid = thread.get_ident()
-        ws = self._synchs[tid]
-        ws.remove(synch)
diff --git a/branches/bug1734/src/transaction/_transaction.py b/branches/bug1734/src/transaction/_transaction.py
deleted file mode 100644
index 3f470116..00000000
--- a/branches/bug1734/src/transaction/_transaction.py
+++ /dev/null
@@ -1,632 +0,0 @@
-############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################
-"""Transaction objects manage resources for an individual activity.
-
-Compatibility issues
---------------------
-
-The implementation of Transaction objects involves two layers of
-backwards compatibility, because this version of transaction supports
-both ZODB 3 and ZODB 4.  Zope is evolving towards the ZODB4
-interfaces.
-
-Transaction has two methods for a resource manager to call to
-participate in a transaction -- register() and join().  join() takes a
-resource manager and adds it to the list of resources.  register() is
-for backwards compatibility.  It takes a persistent object and
-registers its _p_jar attribute.  TODO: explain adapter
-
-Subtransactions
----------------
-
-A subtransaction applies the transaction notion recursively.  It
-allows a set of modifications within a transaction to be committed or
-aborted as a group.  A subtransaction is a strictly local activity;
-its changes are not visible to any other database connection until the
-top-level transaction commits.  In addition to its use to organize a
-large transaction, subtransactions can be used to optimize memory use.
-ZODB must keep modified objects in memory until a transaction commits
-and it can write the changes to the storage.  A subtransaction uses a
-temporary disk storage for its commits, allowing modified objects to
-be flushed from memory when the subtransaction commits.
-
-The commit() and abort() methods take an optional subtransaction
-argument that defaults to false.  If it is a true, the operation is
-performed on a subtransaction.
-
-Subtransactions add a lot of complexity to the transaction
-implementation.  Some resource managers support subtransactions, but
-they are not required to.  (ZODB Connection is the only standard
-resource manager that supports subtransactions.)  Resource managers
-that do support subtransactions implement abort_sub() and commit_sub()
-methods and support a second argument to tpc_begin().
-
-The second argument to tpc_begin() indicates that a subtransaction
-commit is beginning (if it is true).  In a subtransaction, there is no
-tpc_vote() call (I don't know why not).  The tpc_finish()
-or tpc_abort() call applies just to that subtransaction.
-
-Once a resource manager is involved in a subtransaction, all
-subsequent transactions will be treated as subtransactions until
-abort_sub() or commit_sub() is called.  abort_sub() will undo all the
-changes of the subtransactions.  commit_sub() will begin a top-level
-transaction and store all the changes from subtransactions.  After
-commit_sub(), the transaction must still call tpc_vote() and
-tpc_finish().
-
-If the resource manager does not support subtransactions, nothing
-happens when the subtransaction commits.  Instead, the resource
-manager is put on a list of managers to commit when the actual
-top-level transaction commits.  If this happens, it will not be
-possible to abort subtransactions.
-
-Two-phase commit
-----------------
-
-A transaction commit involves an interaction between the transaction
-object and one or more resource managers.  The transaction manager
-calls the following four methods on each resource manager; it calls
-tpc_begin() on each resource manager before calling commit() on any of
-them.
-
-    1. tpc_begin(txn, subtransaction=False)
-    2. commit(txn)
-    3. tpc_vote(txn)
-    4. tpc_finish(txn)
-
-Subtransaction commit
----------------------
-
-When a subtransaction commits, the protocol is different.
-
-1. tpc_begin() is passed a second argument, which indicates that a
-   subtransaction is being committed.
-2. tpc_vote() is not called.
-
-Once a subtransaction has been committed, the top-level transaction
-commit will start with a commit_sub() call instead of a tpc_begin()
-call.
-
-Error handling
---------------
-
-When errors occur during two-phase commit, the transaction manager
-aborts all the resource managers.  The specific methods it calls
-depend on whether the error occurs before or after the call to
-tpc_vote() on that transaction manager.
-
-If the resource manager has not voted, then the resource manager will
-have one or more uncommitted objects.  There are two cases that lead
-to this state; either the transaction manager has not called commit()
-for any objects on this resource manager or the call that failed was a
-commit() for one of the objects of this resource manager.  For each
-uncommitted object, including the object that failed in its commit(),
-call abort().
-
-Once uncommitted objects are aborted, tpc_abort() or abort_sub() is
-called on each resource manager.  abort_sub() is called if the
-resource manager was involved in a subtransaction.
-
-Synchronization
----------------
-
-You can register sychronization objects (synchronizers) with the
-tranasction manager.  The synchronizer must implement
-beforeCompletion() and afterCompletion() methods.  The transaction
-manager calls beforeCompletion() when it starts a top-level two-phase
-commit.  It calls afterCompletion() when a top-level transaction is
-committed or aborted.  The methods are passed the current Transaction
-as their only argument.
-"""
-
-import logging
-import sys
-import thread
-import warnings
-import traceback
-from cStringIO import StringIO
-
-# Sigh.  In the maze of __init__.py's, ZODB.__init__.py takes 'get'
-# out of transaction.__init__.py, in order to stuff the 'get_transaction'
-# alias in __builtin__.  So here in _transaction.py, we can't import
-# exceptions from ZODB.POSException at top level (we're imported by
-# our __init__.py, which is imported by ZODB's __init__, so the ZODB
-# package isn't well-formed when we're first imported).
-# from ZODB.POSException import TransactionError, TransactionFailedError
-
-_marker = object()
-
-# The point of this is to avoid hiding exceptions (which the builtin
-# hasattr() does).
-def myhasattr(obj, attr):
-    return getattr(obj, attr, _marker) is not _marker
-
-class Status:
-    # ACTIVE is the initial state.
-    ACTIVE       = "Active"
-
-    COMMITTING   = "Committing"
-    COMMITTED    = "Committed"
-
-    # commit() or commit(True) raised an exception.  All further attempts
-    # to commit or join this transaction will raise TransactionFailedError.
-    COMMITFAILED = "Commit failed"
-
-class Transaction(object):
-
-    def __init__(self, synchronizers=None, manager=None):
-        self.status = Status.ACTIVE
-        # List of resource managers, e.g. MultiObjectResourceAdapters.
-        self._resources = []
-
-        # Weak set of synchronizer objects to call.
-        if synchronizers is None:
-            from ZODB.utils import WeakSet
-            synchronizers = WeakSet()
-        self._synchronizers = synchronizers
-
-        self._manager = manager
-
-        # _adapters: Connection/_p_jar -> MultiObjectResourceAdapter[Sub]
-        self._adapters = {}
-        self._voted = {} # id(Connection) -> boolean, True if voted
-        # _voted and other dictionaries use the id() of the resource
-        # manager as a key, because we can't guess whether the actual
-        # resource managers will be safe to use as dict keys.
-
-        # The user, description, and _extension attributes are accessed
-        # directly by storages, leading underscore notwithstanding.
-        self.user = ""
-        self.description = ""
-        self._extension = {}
-
-        self.log = logging.getLogger("txn.%d" % thread.get_ident())
-        self.log.debug("new transaction")
-
-        # _sub contains all of the resource managers involved in
-        # subtransactions.  It maps id(a resource manager) to the resource
-        # manager.
-        self._sub = {}
-        # _nonsub contains all the resource managers that do not support
-        # subtransactions that were involved in subtransaction commits.
-        self._nonsub = {}
-
-        # If a commit fails, the traceback is saved in _failure_traceback.
-        # If another attempt is made to commit, TransactionFailedError is
-        # raised, incorporating this traceback.
-        self._failure_traceback = None
-
-    # Raise TransactionFailedError, due to commit()/join()/register()
-    # getting called when the current transaction has already suffered
-    # a commit failure.
-    def _prior_commit_failed(self):
-        from ZODB.POSException import TransactionFailedError
-        assert self._failure_traceback is not None
-        raise TransactionFailedError("commit() previously failed, "
-                "with this traceback:\n\n%s" %
-                self._failure_traceback.getvalue())
-
-    def join(self, resource):
-        if self.status is Status.COMMITFAILED:
-            self._prior_commit_failed() # doesn't return
-
-        if self.status is not Status.ACTIVE:
-            # TODO: Should it be possible to join a committing transaction?
-            # I think some users want it.
-            raise ValueError("expected txn status %r, but it's %r" % (
-                             Status.ACTIVE, self.status))
-        # TODO: the prepare check is a bit of a hack, perhaps it would
-        # be better to use interfaces.  If this is a ZODB4-style
-        # resource manager, it needs to be adapted, too.
-        if myhasattr(resource, "prepare"):
-            resource = DataManagerAdapter(resource)
-        self._resources.append(resource)
-
-    def register(self, obj):
-        # The old way of registering transaction participants.
-        #
-        # register() is passed either a persisent object or a
-        # resource manager like the ones defined in ZODB.DB.
-        # If it is passed a persistent object, that object should
-        # be stored when the transaction commits.  For other
-        # objects, the object implements the standard two-phase
-        # commit protocol.
-
-        manager = getattr(obj, "_p_jar", obj)
-        adapter = self._adapters.get(manager)
-        if adapter is None:
-            if myhasattr(manager, "commit_sub"):
-                adapter = MultiObjectResourceAdapterSub(manager)
-            else:
-                adapter = MultiObjectResourceAdapter(manager)
-            adapter.objects.append(obj)
-            self._adapters[manager] = adapter
-            self.join(adapter)
-        else:
-            # TODO: comment out this expensive assert later
-            # Use id() to guard against proxies.
-            assert id(obj) not in map(id, adapter.objects)
-            adapter.objects.append(obj)
-
-            # In the presence of subtransactions, an existing adapter
-            # might be in _adapters but not in _resources.
-            if adapter not in self._resources:
-                self._resources.append(adapter)
-
-    def begin(self):
-        from ZODB.utils import deprecated36
-
-        deprecated36("Transaction.begin() should no longer be used; use "
-                      "the begin() method of a transaction manager.")
-        if (self._resources or
-              self._sub or
-              self._nonsub or
-              self._synchronizers):
-            self.abort()
-        # Else aborting wouldn't do anything, except if _manager is non-None,
-        # in which case it would do nothing besides uselessly free() this
-        # transaction.
-
-    def commit(self, subtransaction=False):
-        if self.status is Status.COMMITFAILED:
-            self._prior_commit_failed() # doesn't return
-
-        if not subtransaction and self._sub and self._resources:
-            # This commit is for a top-level transaction that has
-            # previously committed subtransactions.  Do one last
-            # subtransaction commit to clear out the current objects,
-            # then commit all the subjars.
-            self.commit(True)
-
-        if not subtransaction:
-            self._synchronizers.map(lambda s: s.beforeCompletion(self))
-            self.status = Status.COMMITTING
-
-        try:
-            self._commitResources(subtransaction)
-        except:
-            self.status = Status.COMMITFAILED
-            # Save the traceback for TransactionFailedError.
-            ft = self._failure_traceback = StringIO()
-            t, v, tb = sys.exc_info()
-            # Record how we got into commit().
-            traceback.print_stack(sys._getframe(1), None, ft)
-            # Append the stack entries from here down to the exception.
-            traceback.print_tb(tb, None, ft)
-            # Append the exception type and value.
-            ft.writelines(traceback.format_exception_only(t, v))
-            raise t, v, tb
-
-        if subtransaction:
-            self._resources = []
-        else:
-            self.status = Status.COMMITTED
-            if self._manager:
-                self._manager.free(self)
-            self._synchronizers.map(lambda s: s.afterCompletion(self))
-            self.log.debug("commit")
-
-    def _commitResources(self, subtransaction):
-        # Execute the two-phase commit protocol.
-
-        L = self._getResourceManagers(subtransaction)
-        try:
-            for rm in L:
-                # If you pass subtransaction=True to tpc_begin(), it
-                # will create a temporary storage for the duration of
-                # the transaction.  To signal that the top-level
-                # transaction is committing, you must then call
-                # commit_sub().
-                if not subtransaction and id(rm) in self._sub:
-                    del self._sub[id(rm)]
-                    rm.commit_sub(self)
-                else:
-                    rm.tpc_begin(self, subtransaction)
-            for rm in L:
-                rm.commit(self)
-                self.log.debug("commit %r" % rm)
-            if not subtransaction:
-                # Not sure why, but it is intentional that you do not
-                # call tpc_vote() for subtransaction commits.
-                for rm in L:
-                    rm.tpc_vote(self)
-                    self._voted[id(rm)] = True
-
-            try:
-                for rm in L:
-                    rm.tpc_finish(self)
-            except:
-                # TODO: do we need to make this warning stronger?
-                # TODO: It would be nice if the system could be configured
-                # to stop committing transactions at this point.
-                self.log.critical("A storage error occured during the second "
-                                  "phase of the two-phase commit.  Resources "
-                                  "may be in an inconsistent state.")
-                raise
-        except:
-            # If an error occurs committing a transaction, we try
-            # to revert the changes in each of the resource managers.
-            t, v, tb = sys.exc_info()
-            try:
-                self._cleanup(L)
-            finally:
-                if not subtransaction:
-                    self._synchronizers.map(lambda s: s.afterCompletion(self))
-            raise t, v, tb
-
-    def _cleanup(self, L):
-        # Called when an exception occurs during tpc_vote or tpc_finish.
-        for rm in L:
-            if id(rm) not in self._voted:
-                try:
-                    rm.abort(self)
-                except Exception:
-                    self.log.error("Error in abort() on manager %s",
-                                   rm, exc_info=sys.exc_info())
-        for rm in L:
-            if id(rm) in self._sub:
-                try:
-                    rm.abort_sub(self)
-                except Exception:
-                    self.log.error("Error in abort_sub() on manager %s",
-                                   rm, exc_info=sys.exc_info())
-            else:
-                try:
-                    rm.tpc_abort(self)
-                except Exception:
-                    self.log.error("Error in tpc_abort() on manager %s",
-                                   rm, exc_info=sys.exc_info())
-
-    def _getResourceManagers(self, subtransaction):
-        L = []
-        if subtransaction:
-            # If we are in a subtransaction, make sure all resource
-            # managers are placed in either _sub or _nonsub.  When
-            # the top-level transaction commits, we need to merge
-            # these back into the resource set.
-
-            # If a data manager doesn't support sub-transactions, we
-            # don't do anything with it now.  (That's somewhat okay,
-            # because subtransactions are mostly just an
-            # optimization.)  Save it until the top-level transaction
-            # commits.
-
-            for rm in self._resources:
-                if myhasattr(rm, "commit_sub"):
-                    self._sub[id(rm)] = rm
-                    L.append(rm)
-                else:
-                    self._nonsub[id(rm)] = rm
-        else:
-            if self._sub or self._nonsub:
-                # Merge all of _sub, _nonsub, and _resources.
-                d = dict(self._sub)
-                d.update(self._nonsub)
-                # TODO: I think _sub and _nonsub are disjoint, and that
-                #       _resources is empty.  If so, we can simplify this code.
-                assert len(d) == len(self._sub) + len(self._nonsub)
-                assert not self._resources
-                for rm in self._resources:
-                    d[id(rm)] = rm
-                L = d.values()
-            else:
-                L = list(self._resources)
-
-        L.sort(rm_cmp)
-        return L
-
-    def abort(self, subtransaction=False):
-        if not subtransaction:
-            self._synchronizers.map(lambda s: s.beforeCompletion(self))
-
-        if subtransaction and self._nonsub:
-            from ZODB.POSException import TransactionError
-            raise TransactionError("Resource manager does not support "
-                                   "subtransaction abort")
-
-        tb = None
-        for rm in self._resources + self._nonsub.values():
-            try:
-                rm.abort(self)
-            except:
-                if tb is None:
-                    t, v, tb = sys.exc_info()
-                self.log.error("Failed to abort resource manager: %s",
-                               rm, exc_info=sys.exc_info())
-
-        if not subtransaction:
-            for rm in self._sub.values():
-                try:
-                    rm.abort_sub(self)
-                except:
-                    if tb is None:
-                        t, v, tb = sys.exc_info()
-                    self.log.error("Failed to abort_sub resource manager: %s",
-                                   rm, exc_info=sys.exc_info())
-
-        if not subtransaction:
-            if self._manager:
-                self._manager.free(self)
-            self._synchronizers.map(lambda s: s.afterCompletion(self))
-            self.log.debug("abort")
-
-        if tb is not None:
-            raise t, v, tb
-
-    def note(self, text):
-        text = text.strip()
-        if self.description:
-            self.description += "\n\n" + text
-        else:
-            self.description = text
-
-    def setUser(self, user_name, path="/"):
-        self.user = "%s %s" % (path, user_name)
-
-    def setExtendedInfo(self, name, value):
-        self._extension[name] = value
-
-# TODO: We need a better name for the adapters.
-
-class MultiObjectResourceAdapter(object):
-    """Adapt the old-style register() call to the new-style join().
-
-    With join(), a resource mananger like a Connection registers with
-    the transaction manager.  With register(), an individual object
-    is passed to register().
-    """
-
-    def __init__(self, jar):
-        self.manager = jar
-        self.objects = []
-        self.ncommitted = 0
-
-    def __repr__(self):
-        return "<%s for %s at %s>" % (self.__class__.__name__,
-                                      self.manager, id(self))
-
-    def sortKey(self):
-        return self.manager.sortKey()
-
-    def tpc_begin(self, txn, sub=False):
-        self.manager.tpc_begin(txn, sub)
-
-    def tpc_finish(self, txn):
-        self.manager.tpc_finish(txn)
-
-    def tpc_abort(self, txn):
-        self.manager.tpc_abort(txn)
-
-    def commit(self, txn):
-        for o in self.objects:
-            self.manager.commit(o, txn)
-            self.ncommitted += 1
-
-    def tpc_vote(self, txn):
-        self.manager.tpc_vote(txn)
-
-    def abort(self, txn):
-        tb = None
-        for o in self.objects:
-            try:
-                self.manager.abort(o, txn)
-            except:
-                # Capture the first exception and re-raise it after
-                # aborting all the other objects.
-                if tb is None:
-                    t, v, tb = sys.exc_info()
-                txn.log.error("Failed to abort object: %s",
-                              object_hint(o), exc_info=sys.exc_info())
-        if tb is not None:
-            raise t, v, tb
-
-class MultiObjectResourceAdapterSub(MultiObjectResourceAdapter):
-    """Adapt resource managers that participate in subtransactions."""
-
-    def commit_sub(self, txn):
-        self.manager.commit_sub(txn)
-
-    def abort_sub(self, txn):
-        self.manager.abort_sub(txn)
-
-    def tpc_begin(self, txn, sub=False):
-        self.manager.tpc_begin(txn, sub)
-        self.sub = sub
-
-    def tpc_finish(self, txn):
-        self.manager.tpc_finish(txn)
-        if self.sub:
-            self.objects = []
-
-
-def rm_cmp(rm1, rm2):
-    return cmp(rm1.sortKey(), rm2.sortKey())
-
-def object_hint(o):
-    """Return a string describing the object.
-
-    This function does not raise an exception.
-    """
-
-    from ZODB.utils import oid_repr
-
-    # We should always be able to get __class__.
-    klass = o.__class__.__name__
-    # oid would be great, but may this isn't a persistent object.
-    oid = getattr(o, "_p_oid", _marker)
-    if oid is not _marker:
-        oid = oid_repr(oid)
-    return "%s oid=%s" % (klass, oid)
-
-class DataManagerAdapter(object):
-    """Adapt zodb 4-style data managers to zodb3 style
-
-    Adapt transaction.interfaces.IDataManager to
-    ZODB.interfaces.IPureDatamanager
-    """
-
-    # Note that it is pretty important that this does not have a _p_jar
-    # attribute. This object will be registered with a zodb3 TM, which
-    # will then try to get a _p_jar from it, using it as the default.
-    # (Objects without a _p_jar are their own data managers.)
-
-    def __init__(self, datamanager):
-        self._datamanager = datamanager
-        self._rollback = None
-
-    # TODO: I'm not sure why commit() doesn't do anything
-
-    def commit(self, transaction):
-        pass
-
-    def abort(self, transaction):
-
-        # We need to discard any changes since the last save point, or all
-        # changes
-
-        if self._rollback is None:
-            # No previous savepoint, so just abort
-            self._datamanager.abort(transaction)
-        else:
-            self._rollback()
-
-    def abort_sub(self, transaction):
-        self._datamanager.abort(transaction)
-
-    def commit_sub(self, transaction):
-        # Nothing to do wrt data, be we begin 2pc for the top-level
-        # trans
-        self._sub = False
-
-    def tpc_begin(self, transaction, subtransaction=False):
-        self._sub = subtransaction
-
-    def tpc_abort(self, transaction):
-        if self._sub:
-            self.abort(self, transaction)
-        else:
-            self._datamanager.abort(transaction)
-
-    def tpc_finish(self, transaction):
-        if self._sub:
-            self._rollback = self._datamanager.savepoint(transaction).rollback
-        else:
-            self._datamanager.commit(transaction)
-
-    def tpc_vote(self, transaction):
-        if not self._sub:
-            self._datamanager.prepare(transaction)
-
-    def sortKey(self):
-        return self._datamanager.sortKey()
diff --git a/branches/bug1734/src/transaction/interfaces.py b/branches/bug1734/src/transaction/interfaces.py
deleted file mode 100644
index be335d9a..00000000
--- a/branches/bug1734/src/transaction/interfaces.py
+++ /dev/null
@@ -1,263 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Transaction Interfaces
-
-$Id$
-"""
-
-import zope.interface
-
-class IDataManager(zope.interface.Interface):
-    """Objects that manage transactional storage.
-
-    These objects may manage data for other objects, or they may manage
-    non-object storages, such as relational databases.
-
-    IDataManagerOriginal is the interface currently provided by ZODB
-    database connections, but the intent is to move to the newer
-    IDataManager.
-    """
-
-    def abort_sub(transaction):
-        """Discard all subtransaction data.
-
-        See subtransaction.txt
-
-        This is called when top-level transactions are aborted.
-
-        No further subtransactions can be started once abort_sub()
-        has been called; this is only used when the transaction is
-        being aborted.
-
-        abort_sub also implies the abort of a 2-phase commit.
-
-        This should never fail.
-        """
-
-    def commit_sub(transaction):
-        """Commit all changes made in subtransactions and begin 2-phase commit
-
-        Data are saved *as if* they are part of the current transaction.
-        That is, they will not be persistent unless the current transaction
-        is committed.
-
-        This is called when the current top-level transaction is committed.
-
-        No further subtransactions can be started once commit_sub()
-        has been called; this is only used when the transaction is
-        being committed.
-
-        This call also implies the beginning of 2-phase commit.
-        """
-
-    # Two-phase commit protocol.  These methods are called by the
-    # ITransaction object associated with the transaction being
-    # committed.
-
-    def tpc_begin(transaction, subtransaction=False):
-        """Begin commit of a transaction, starting the two-phase commit.
-
-        transaction is the ITransaction instance associated with the
-        transaction being committed.
-
-        subtransaction is a Boolean flag indicating whether the
-        two-phase commit is being invoked for a subtransaction.
-
-        Important note: Subtransactions are modelled in the sense that
-        when you commit a subtransaction, subsequent commits should be
-        for subtransactions as well.  That is, there must be a
-        commit_sub() call between a tpc_begin() call with the
-        subtransaction flag set to true and a tpc_begin() with the
-        flag set to false.
-
-        """
-
-    def tpc_abort(transaction):
-        """Abort a transaction.
-
-        This is called by a transaction manager to end a two-phase commit on
-        the data manager.
-
-        This is always called after a tpc_begin call.
-
-        transaction is the ITransaction instance associated with the
-        transaction being committed.
-
-        This should never fail.
-        """
-
-    def tpc_finish(transaction):
-        """Indicate confirmation that the transaction is done.
-
-        transaction is the ITransaction instance associated with the
-        transaction being committed.
-
-        This should never fail. If this raises an exception, the
-        database is not expected to maintain consistency; it's a
-        serious error.
-
-        It's important that the storage calls the passed function 
-        while it still has its lock.  We don't want another thread
-        to be able to read any updated data until we've had a chance
-        to send an invalidation message to all of the other
-        connections!
-        """
-
-    def tpc_vote(transaction):
-        """Verify that a data manager can commit the transaction
-
-        This is the last chance for a data manager to vote 'no'.  A
-        data manager votes 'no' by raising an exception.
-
-        transaction is the ITransaction instance associated with the
-        transaction being committed.
-        """
-
-    def commit(transaction):
-        """Commit modifications to registered objects.
-
-        Save the object as part of the data to be made persistent if
-        the transaction commits.
-
-        This includes conflict detection and handling. If no conflicts or
-        errors occur it saves the objects in the storage. 
-        """
-
-    def abort(transaction):
-        """Abort a transaction and forget all changes.
-
-        Abort must be called outside of a two-phase commit.
-
-        Abort is called by the transaction manager to abort transactions 
-        that are not yet in a two-phase commit. 
-        """
-
-    def sortKey():
-        """Return a key to use for ordering registered DataManagers
-
-        ZODB uses a global sort order to prevent deadlock when it commits
-        transactions involving multiple resource managers.  The resource
-        manager must define a sortKey() method that provides a global ordering
-        for resource managers.
-        """
-        # Alternate version:
-        #"""Return a consistent sort key for this connection.
-        #
-        #This allows ordering multiple connections that use the same storage in
-        #a consistent manner. This is unique for the lifetime of a connection,
-        #which is good enough to avoid ZEO deadlocks.
-        #"""
-
-    def beforeCompletion(transaction):
-        """Hook that is called by the transaction before completing a commit"""
-
-    def afterCompletion(transaction):
-        """Hook that is called by the transaction after completing a commit"""
-
-class ITransaction(zope.interface.Interface):
-    """Object representing a running transaction.
-
-    Objects with this interface may represent different transactions
-    during their lifetime (.begin() can be called to start a new
-    transaction using the same instance).
-    """
-
-    user = zope.interface.Attribute(
-        "user",
-        "The name of the user on whose behalf the transaction is being\n"
-        "performed.  The format of the user name is defined by the\n"
-        "application.")
-    # Unsure: required to be a string?
-
-    description = zope.interface.Attribute(
-        "description",
-        "Textual description of the transaction.")
-
-    def begin(info=None, subtransaction=None):
-        """Begin a new transaction.
-
-        If the transaction is in progress, it is aborted and a new
-        transaction is started using the same transaction object.
-        """
-
-    def commit(subtransaction=None):
-        """Finalize the transaction.
-
-        This executes the two-phase commit algorithm for all
-        IDataManager objects associated with the transaction.
-        """
-
-    def abort(subtransaction=0, freeme=1):
-        """Abort the transaction.
-
-        This is called from the application.  This can only be called
-        before the two-phase commit protocol has been started.
-        """
-
-    def join(datamanager):
-        """Add a datamanager to the transaction.
-
-        The datamanager must implement the
-        transactions.interfaces.IDataManager interface, and be
-        adaptable to ZODB.interfaces.IDataManager.
-        """
-
-    def register(object):
-        """Register the given object for transaction control."""
-
-    def note(text):
-        """Add text to the transaction description.
-
-        If a description has already been set, text is added to the
-        end of the description following two newline characters.
-        Surrounding whitespace is stripped from text.
-        """
-        # Unsure:  does impl do the right thing with ''?  Not clear what
-        # the "right thing" is.
-
-    def setUser(user_name, path="/"):
-        """Set the user name.
-
-        path should be provided if needed to further qualify the
-        identified user.
-        """
-
-    def setExtendedInfo(name, value):
-        """Add extension data to the transaction.
-
-        name is the name of the extension property to set; value must
-        be a picklable value.
-
-        Storage implementations may limit the amount of extension data
-        which can be stored.
-        """
-        # Unsure:  is this allowed to cause an exception here, during
-        # the two-phase commit, or can it toss data silently?
-
-
-class IRollback(zope.interface.Interface):
-
-    def rollback():
-        """Rollback changes since savepoint.
-
-        IOW, rollback to the last savepoint.
-
-        It is an error to rollback to a savepoint if:
-
-        - An earlier savepoint within the same transaction has been
-          rolled back to, or
-
-        - The transaction has ended.
-        """
-
diff --git a/branches/bug1734/src/transaction/notes.txt b/branches/bug1734/src/transaction/notes.txt
deleted file mode 100644
index f7e090af..00000000
--- a/branches/bug1734/src/transaction/notes.txt
+++ /dev/null
@@ -1,269 +0,0 @@
-[more info may (or may not) be added to
-
-    http://zope.org/Wikis/ZODB/ReviseTransactionAPI
-]
-
-Notes on a future transaction API
-=================================
-
-I did a brief review of the current transaction APIs from ZODB 3 and
-ZODB 4, considering some of the issues that have come up since last
-winter when most of the initial design and implementation of ZODB 4's
-transaction API was done.
-
-Participants
-------------
-
-There are four participants in the transaction APIs.
-
-1. Application -- Some application code is ultimately in charge of the
-transaction process.  It uses transactional resources, decides the
-scope of individual transactions, and commits or aborts transactions.
-
-2. Resource Manager -- Typically library or framework code that provides
-transactional access to some resource -- a ZODB database, a relational
-database, or some other resource.  It provides an API for application
-code that isn't defined by the transaction framework.  It collaborates
-with the transaction manager to find the current transaction.  It
-collaborates with the transaction for registration, notification, and
-for committing changes.
-
-The ZODB Connection is a resource manager.  In ZODB 4, it is called a
-data manager.  In ZODB 3, it is called a jar.  In other literature,
-resource manager seems to be common.
-
-3. Transaction -- coordinates the actions of application and resource
-managers for a particular activity.  The transaction usually has a
-short lifetime.  The application begins it, resources register with it
-as the application runs, then it finishes with a commit or abort.
-
-4. Transaction Manager -- coordinates the use of transaction.  The
-transaction manager provides policies for associating resource
-managers with specific transactions.  The question "What is the
-current transaction?" is answered by the transaction manager.
-
-I'm taking as a starting point the transaction API that was defined
-for ZODB 4.  I reviewed it again after a lot of time away, and I still
-think it's on the right track.
-
-Current transaction
--------------------
-
-The first question is "What is the current transaction?"  This
-question is decided by the transaction manager.  An application could
-chose an application manager that suites its need best.  
-
-In the current ZODB, the transaction manager is essentially the
-implementation of ZODB.Transaction.get_transaction() and the
-associated thread id -> txn dict.  I think we can encapsulate this
-policy an a first-class object and allow applications to decide which
-one they want to use.  By default, a thread-based txn manager would be
-provided.
-
-The other responsibility of the transaction manager is to decide when
-to start a new transaction.  The current ZODB transaction manager
-starts one whenever a client calls get() and there is no current
-transaction.  I think there could be some benefit to an explicit new()
-operation that will always create a new transaction.  A particular
-manager could implement the policy that get() called before new()
-returns None or raises an exception.
-
-Basic transaction API
----------------------
-
-A transaction module or package can export a very simple API for
-interacting with transactions.  It hides most of the complexity from
-applications that want to use the standard Zope policies.  Here's a
-sketch of an implementation:
-
-_mgr = TransactionManager()
-
-def get():
-    """Return the current transaction."""
-    return _mgr.get()
-
-def new():
-    """Return a new transaction."""
-    return _mgr.new()
-
-def commit():
-    """Commit the current transaction."""
-    _mgr.get().commit()
-
-def abort():
-    """Abort the current transaction."""
-    _mgr.get().abort()
-
-Application code can just import the transaction module to use the
-get(), new(), abort(), and commit() methods.
-
-The individual transaction objects should have a register() method
-that is used by a resource manager to register that it has
-modifications for this transaction.  It's part of the basic API, but
-not the basic user API.
-
-Extended transaction API
-------------------------
-
-There are a few other methods that might make sense on a transaction:
-
-status() -- return a code or string indicating what state the
-transaction is in -- begin, aborted, committed, etc.
-
-note() -- add metadata to txn
-
-The transaction module should have a mechanism for installing a new
-transaction manager.
-
-Suspend and resume
-------------------
-
-If the transaction manager's job is to decide what the current
-transaction is, then it would make sense to have suspend() and
-resume() APIs that allow the current activity to be stopped for a
-time.  The goal of these APIs is to allow more control over
-coordination.  
-
-It seems like user code would call suspend() and resume() on
-individual transaction objects, which would interact with the
-transaction manager.
-
-If suspend() and resume() are supported, then we need to think about
-whether those events need to be communicated to the resource
-managers. 
-
-This is a new feature that isn't needed for ZODB 3.3.
-
-Registration and notification
------------------------------
-
-The transaction object coordinates the activities of resource
-managers.  When a managed resource is modified, its manager must
-register with the current transaction.  (It's an error to modify an
-object when there is no transaction?)
-
-When the transaction commits or aborts, the transaction calls back to
-each registered resource manager.  The callbacks form the two-phase
-commit protocol.  I like the ZODB 4 names and approach prepare() (does
-tpc_begin through tpc_vote on the storage).
-
-A resource manager does not register with a transaction if none of its
-resources are modified.  Some resource managers would like to know
-about transaction boundaries anyway.  A ZODB Connection would like to
-process invalidations at every commit, even if none of its objects
-were modified.
-
-It's not clear what the notification interface should look like or
-what events are of interest.  In theory, transaction begin, abort, and
-commit are all interesting; perhaps a combined abort-or-commit event
-would be useful.  The ZODB use case only needs one event.
-
-The java transaction API has beforeCompletion and afterCompletion,
-where after gets passed a status code to indicate abort or commit.
-I think these should be sufficient.
-
-Nested transactions / savepoints
---------------------------------
-
-ZODB 3 and ZODB 4 each have a limited form of nested transactions.
-They are called subtransactions in ZODB 3 and savepoints in ZODB 4.
-The essential mechanism is the same:  At the time of subtransaction is
-committed, all the modifications up to that time are written out to a
-temporary file.  The application can later revert to that saved state
-or commit the main transaction, which copies modifications from the
-temporary file to the real storage.
-
-The savepoint mechanism can be used to implement the subtransaction
-model, by creating a savepoint every time a subtransaction starts or
-ends.
-
-If a resource manager joins a transaction after a savepoint, we need
-to create an initial savepoint for the new resource manager that will
-rollback all its changes.  If the new resource manager doesn't support
-savepoints, we probably need to mark earlier savepoints as invalid.
-There are some edges cases to work out here.
-
-It's not clear how nested transactions affect the transaction manager
-API.  If we just use savepoint(), then there's no issue to sort out.
-A nested transaction API may be more convenient.  One possibility is
-to pass a transaction object to new() indicating that the new
-transaction is a child of the current transaction.  Example:
-
-    transaction.new(transaction.get())
-
-That seems rather wordy.  Perhaps:
-
-    transaction.child()
-
-where this creates a new nested transaction that is a child of the
-current one, raising an exception if there is no current transaction.
-
-This illustrates that a subtransaction feature could create new
-requirements for the transaction manager API.  
-
-The current ZODB 3 API is that calling commit(1) or commit(True) means
-"commit a subtransaction."  abort() has the same API.  We need to
-support this API for backwards compatibility.  A new API would be a
-new feature that isn't necessary for ZODB 3.3.
-
-ZODB Connection and Transactions
---------------------------------
-
-The Connection has three interactions with a transaction manager.
-First, it registers itself with the transaction manager for
-synchronization messages.  Second, it registers with the current
-transaction the first time an object is modified in that transaction.
-Third, there is an option to explicitly pass a transaction manager to
-the connection constructor via DB.open(); the connection always uses
-this transaction manager, regardless of the default manager.
-
-Deadlock and recovery
----------------------
-
-ZODB uses a global sort order to prevent deadlock when it commits
-transactions involving multiple resource managers.  The resource
-manager must define a sortKey() method that provides a global ordering
-for resource managers.  The sort code doesn't exist in ZODB 4, but
-could be added fairly easily.
-
-The transaction managers don't support recovery, where recovery means
-restoring a system to a consistent state after a failure during the
-second phase of two-phase commit.  When a failure occurs in the second
-phase, some transaction participations may not know the outcome of the
-transaction.  (It would be cool to support recovery, but that's not
-being discussed now.)
-
-In the absence of real recovery manager means that our transaction
-commit implementation needs to play many tricks to avoid the need for
-recovery (pseudo-recovery).  For example, if the first resource
-manager fails in the second phase, we attempt to abort all the other
-resource managers.  (This isn't strictly correct, because we don't know the
-status of the first resource manager if it fails.)  If we used
-something more like the ZODB 4 implementation, we'd need to make sure
-all the pseudo-recovery work is done in the new implementation.
-
-Closing resource managers
--------------------------
-
-The ZODB Connection is explicitly opened and closed by the
-application; other resource managers probably get closed to.  The
-relationship between transactions and closed resource managers is
-undefined in the current API.  A transaction will probably fail if the
-Connection is closed, or succeed by accident if the Connection is
-re-opened. 
-
-The resource manager - transaction API should include some means for
-dealing with close.  The likely approach is to raise an error if you
-close a resource manager that is currently registered with a
-transaction. 
-
-First steps
------------
-
-I would definitely like to see some things in ZODB 3.3:
-
-    - simplified module-level transaction calls
-    - notifications for abort-commit event
-    - restructured Connection to track modified objects itself
-    - explicit transaction manager object
-
diff --git a/branches/bug1734/src/transaction/tests/__init__.py b/branches/bug1734/src/transaction/tests/__init__.py
deleted file mode 100644
index 792d6005..00000000
--- a/branches/bug1734/src/transaction/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#
diff --git a/branches/bug1734/src/transaction/tests/abstestIDataManager.py b/branches/bug1734/src/transaction/tests/abstestIDataManager.py
deleted file mode 100644
index b746399e..00000000
--- a/branches/bug1734/src/transaction/tests/abstestIDataManager.py
+++ /dev/null
@@ -1,63 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test cases for objects implementing IDataManager.
-
-This is a combo test between Connection and DB, since the two are
-rather incestuous and the DB Interface is not defined that I was
-able to find.
-
-To do a full test suite one would probably want to write a dummy
-storage that will raise errors as needed for testing.
-
-I started this test suite to reproduce a very simple error (tpc_abort
-had an error and wouldn't even run if called).  So it is *very*
-incomplete, and even the tests that exist do not make sure that
-the data actually gets written/not written to the storge.
-
-Obviously this test suite should be expanded.
-
-$Id$
-"""
-
-from unittest import TestCase
-from transaction.interfaces import IRollback
-
-class IDataManagerTests(TestCase, object):
-
-    def setUp(self):
-        self.datamgr = None # subclass should override
-        self.obj = None # subclass should define Persistent object
-        self.txn_factory = None
-
-    def get_transaction(self):
-        return self.txn_factory()
-
-    ################################
-    # IDataManager interface tests #
-    ################################
-
-    def testCommitObj(self):
-        tran = self.get_transaction()
-        self.datamgr.prepare(tran)
-        self.datamgr.commit(tran)
-
-    def testAbortTran(self):
-        tran = self.get_transaction()
-        self.datamgr.prepare(tran)
-        self.datamgr.abort(tran)
-
-    def testRollback(self):
-        tran = self.get_transaction()
-        rb = self.datamgr.savepoint(tran)
-        self.assert_(IRollback.providedBy(rb))
diff --git a/branches/bug1734/src/transaction/tests/test_SampleDataManager.py b/branches/bug1734/src/transaction/tests/test_SampleDataManager.py
deleted file mode 100644
index af874cb7..00000000
--- a/branches/bug1734/src/transaction/tests/test_SampleDataManager.py
+++ /dev/null
@@ -1,412 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Sample objects for use in tests
-
-$Id$
-"""
-
-class DataManager(object):
-    """Sample data manager
-
-       This class provides a trivial data-manager implementation and doc
-       strings to illustrate the the protocol and to provide a tool for
-       writing tests.
-
-       Our sample data manager has state that is updated through an inc
-       method and through transaction operations.
-
-       When we create a sample data manager:
-
-       >>> dm = DataManager()
-
-       It has two bits of state, state:
-
-       >>> dm.state
-       0
-
-       and delta:
-
-       >>> dm.delta
-       0
-
-       Both of which are initialized to 0.  state is meant to model
-       committed state, while delta represents tentative changes within a
-       transaction.  We change the state by calling inc:
-
-       >>> dm.inc()
-
-       which updates delta:
-
-       >>> dm.delta
-       1
-
-       but state isn't changed until we commit the transaction:
-
-       >>> dm.state
-       0
-
-       To commit the changes, we use 2-phase commit. We execute the first
-       stage by calling prepare.  We need to pass a transation. Our
-       sample data managers don't really use the transactions for much,
-       so we'll be lazy and use strings for transactions:
-
-       >>> t1 = '1'
-       >>> dm.prepare(t1)
-
-       The sample data manager updates the state when we call prepare:
-
-       >>> dm.state
-       1
-       >>> dm.delta
-       1
-
-       This is mainly so we can detect some affect of calling the methods.
-
-       Now if we call commit:
-
-       >>> dm.commit(t1)
-
-       Our changes are"permanent".  The state reflects the changes and the
-       delta has been reset to 0.
-
-       >>> dm.state
-       1
-       >>> dm.delta
-       0
-       """
-
-    def __init__(self):
-        self.state = 0
-        self.sp = 0
-        self.transaction = None
-        self.delta = 0
-        self.prepared = False
-
-    def inc(self, n=1):
-        self.delta += n
-
-    def prepare(self, transaction):
-        """Prepare to commit data
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> t1 = '1'
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-        >>> dm.state
-        1
-        >>> dm.inc()
-        >>> t2 = '2'
-        >>> dm.prepare(t2)
-        >>> dm.abort(t2)
-        >>> dm.state
-        1
-
-        It is en error to call prepare more than once without an intervening
-        commit or abort:
-
-        >>> dm.prepare(t1)
-
-        >>> dm.prepare(t1)
-        Traceback (most recent call last):
-        ...
-        TypeError: Already prepared
-
-        >>> dm.prepare(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: Already prepared
-
-        >>> dm.abort(t1)
-
-        If there was a preceeding savepoint, the transaction must match:
-
-        >>> rollback = dm.savepoint(t1)
-        >>> dm.prepare(t2)
-        Traceback (most recent call last):
-        ,,,
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.prepare(t1)
-
-        """
-        if self.prepared:
-            raise TypeError('Already prepared')
-        self._checkTransaction(transaction)
-        self.prepared = True
-        self.transaction = transaction
-        self.state += self.delta
-
-    def _checkTransaction(self, transaction):
-        if (transaction is not self.transaction
-            and self.transaction is not None):
-            raise TypeError("Transaction missmatch",
-                            transaction, self.transaction)
-
-    def abort(self, transaction):
-        """Abort a transaction
-
-        The abort method can be called before two-phase commit to
-        throw away work done in the transaction:
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> t1 = '1'
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        The abort method also throws away work done in savepoints:
-
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (0, 2)
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        If savepoints are used, abort must be passed the same
-        transaction:
-
-        >>> dm.inc()
-        >>> r = dm.savepoint(t1)
-        >>> t2 = '2'
-        >>> dm.abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.abort(t1)
-
-        The abort method is also used to abort a two-phase commit:
-
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.prepare(t1)
-        >>> dm.state, dm.delta
-        (1, 1)
-        >>> dm.abort(t1)
-        >>> dm.state, dm.delta
-        (0, 0)
-
-        Of course, the transactions passed to prepare and abort must
-        match:
-
-        >>> dm.prepare(t1)
-        >>> dm.abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> dm.abort(t1)
-
-
-        """
-        self._checkTransaction(transaction)
-        if self.transaction is not None:
-            self.transaction = None
-
-        if self.prepared:
-            self.state -= self.delta
-            self.prepared = False
-
-        self.delta = 0
-
-    def commit(self, transaction):
-        """Complete two-phase commit
-
-        >>> dm = DataManager()
-        >>> dm.state
-        0
-        >>> dm.inc()
-
-        We start two-phase commit by calling prepare:
-
-        >>> t1 = '1'
-        >>> dm.prepare(t1)
-
-        We complete it by calling commit:
-
-        >>> dm.commit(t1)
-        >>> dm.state
-        1
-
-        It is an error ro call commit without calling prepare first:
-
-        >>> dm.inc()
-        >>> t2 = '2'
-        >>> dm.commit(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: Not prepared to commit
-
-        >>> dm.prepare(t2)
-        >>> dm.commit(t2)
-
-        If course, the transactions given to prepare and commit must
-        be the same:
-
-        >>> dm.inc()
-        >>> t3 = '3'
-        >>> dm.prepare(t3)
-        >>> dm.commit(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '3')
-
-        """
-        if not self.prepared:
-            raise TypeError('Not prepared to commit')
-        self._checkTransaction(transaction)
-        self.delta = 0
-        self.transaction = None
-        self.prepared = False
-
-    def savepoint(self, transaction):
-        """Provide the ability to rollback transaction state
-
-        Savepoints provide a way to:
-
-        - Save partial transaction work. For some data managers, this
-          could allow resources to be used more efficiently.
-
-        - Provide the ability to revert state to a point in a
-          transaction without aborting the entire transaction.  In
-          other words, savepoints support partial aborts.
-
-        Savepoints don't use two-phase commit. If there are errors in
-        setting or rolling back to savepoints, the application should
-        abort the containing transaction.  This is *not* the
-        responsibility of the data manager.
-
-        Savepoints are always associated with a transaction. Any work
-        done in a savepoint's transaction is tentative until the
-        transaction is committed using two-phase commit.
-
-        >>> dm = DataManager()
-        >>> dm.inc()
-        >>> t1 = '1'
-        >>> r = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (0, 2)
-        >>> r.rollback()
-        >>> dm.state, dm.delta
-        (0, 1)
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        Savepoints must have the same transaction:
-
-        >>> r1 = dm.savepoint(t1)
-        >>> dm.state, dm.delta
-        (1, 0)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 1)
-        >>> t2 = '2'
-        >>> r2 = dm.savepoint(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> r2 = dm.savepoint(t1)
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 2)
-
-        If we rollback to an earlier savepoint, we discard all work
-        done later:
-
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        and we can no longer rollback to the later savepoint:
-
-        >>> r2.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Attempt to roll back to invalid save point', 3, 2)
-
-        We can roll back to a savepoint as often as we like:
-
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        >>> dm.inc()
-        >>> dm.inc()
-        >>> dm.inc()
-        >>> dm.state, dm.delta
-        (1, 3)
-        >>> r1.rollback()
-        >>> dm.state, dm.delta
-        (1, 0)
-
-        But we can't rollback to a savepoint after it has been
-        committed:
-
-        >>> dm.prepare(t1)
-        >>> dm.commit(t1)
-
-        >>> r1.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: Attempt to rollback stale rollback
-
-        """
-        if self.prepared:
-            raise TypeError("Can't get savepoint during two-phase commit")
-        self._checkTransaction(transaction)
-        self.transaction = transaction
-        self.sp += 1
-        return Rollback(self)
-
-class Rollback(object):
-
-    def __init__(self, dm):
-        self.dm = dm
-        self.sp = dm.sp
-        self.delta = dm.delta
-        self.transaction = dm.transaction
-
-    def rollback(self):
-        if self.transaction is not self.dm.transaction:
-            raise TypeError("Attempt to rollback stale rollback")
-        if self.dm.sp < self.sp:
-            raise TypeError("Attempt to roll back to invalid save point",
-                            self.sp, self.dm.sp)
-        self.dm.sp = self.sp
-        self.dm.delta = self.delta
-
-
-def test_suite():
-    from doctest import DocTestSuite
-    return DocTestSuite()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/transaction/tests/test_SampleResourceManager.py b/branches/bug1734/src/transaction/tests/test_SampleResourceManager.py
deleted file mode 100644
index 36461d6d..00000000
--- a/branches/bug1734/src/transaction/tests/test_SampleResourceManager.py
+++ /dev/null
@@ -1,435 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Sample objects for use in tests
-
-$Id$
-"""
-
-class ResourceManager(object):
-    """Sample resource manager.
-
-       This class provides a trivial resource-manager implementation and doc
-       strings to illustrate the protocol and to provide a tool for writing
-       tests.
-
-       Our sample resource manager has state that is updated through an inc
-       method and through transaction operations.
-
-       When we create a sample resource manager:
-
-       >>> rm = ResourceManager()
-
-       It has two pieces state, state and delta, both initialized to 0:
-
-       >>> rm.state
-       0
-       >>> rm.delta
-       0
-
-       state is meant to model committed state, while delta represents
-       tentative changes within a transaction.  We change the state by
-       calling inc:
-
-       >>> rm.inc()
-
-       which updates delta:
-
-       >>> rm.delta
-       1
-
-       but state isn't changed until we commit the transaction:
-
-       >>> rm.state
-       0
-
-       To commit the changes, we use 2-phase commit.  We execute the first
-       stage by calling prepare.  We need to pass a transation. Our
-       sample resource managers don't really use the transactions for much,
-       so we'll be lazy and use strings for transactions.  The sample
-       resource manager updates the state when we call tpc_vote:
-
-
-       >>> t1 = '1'
-       >>> rm.tpc_begin(t1)
-       >>> rm.state, rm.delta
-       (0, 1)
-
-       >>> rm.tpc_vote(t1)
-       >>> rm.state, rm.delta
-       (1, 1)
-
-       Now if we call tpc_finish:
-
-       >>> rm.tpc_finish(t1)
-
-       Our changes are "permanent".  The state reflects the changes and the
-       delta has been reset to 0.
-
-       >>> rm.state, rm.delta
-       (1, 0)
-       """
-
-    def __init__(self):
-        self.state = 0
-        self.sp = 0
-        self.transaction = None
-        self.delta = 0
-        self.txn_state = None
-
-    def _check_state(self, *ok_states):
-        if self.txn_state not in ok_states:
-            raise ValueError("txn in state %r but expected one of %r" %
-                             (self.txn_state, ok_states))
-
-    def _checkTransaction(self, transaction):
-        if (transaction is not self.transaction
-            and self.transaction is not None):
-            raise TypeError("Transaction missmatch",
-                            transaction, self.transaction)
-
-    def inc(self, n=1):
-        self.delta += n
-
-    def tpc_begin(self, transaction):
-        """Prepare to commit data.
-
-        >>> rm = ResourceManager()
-        >>> rm.inc()
-        >>> t1 = '1'
-        >>> rm.tpc_begin(t1)
-        >>> rm.tpc_vote(t1)
-        >>> rm.tpc_finish(t1)
-        >>> rm.state
-        1
-        >>> rm.inc()
-        >>> t2 = '2'
-        >>> rm.tpc_begin(t2)
-        >>> rm.tpc_vote(t2)
-        >>> rm.tpc_abort(t2)
-        >>> rm.state
-        1
-
-        It is an error to call tpc_begin more than once without completing
-        two-phase commit:
-
-        >>> rm.tpc_begin(t1)
-
-        >>> rm.tpc_begin(t1)
-        Traceback (most recent call last):
-        ...
-        ValueError: txn in state 'tpc_begin' but expected one of (None,)
-        >>> rm.tpc_abort(t1)
-
-        If there was a preceeding savepoint, the transaction must match:
-
-        >>> rollback = rm.savepoint(t1)
-        >>> rm.tpc_begin(t2)
-        Traceback (most recent call last):
-        ,,,
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> rm.tpc_begin(t1)
-
-        """
-        self._checkTransaction(transaction)
-        self._check_state(None)
-        self.transaction = transaction
-        self.txn_state = 'tpc_begin'
-
-    def tpc_vote(self, transaction):
-        """Verify that a data manager can commit the transaction.
-
-        This is the last chance for a data manager to vote 'no'.  A
-        data manager votes 'no' by raising an exception.
-
-        transaction is the ITransaction instance associated with the
-        transaction being committed.
-        """
-        self._checkTransaction(transaction)
-        self._check_state('tpc_begin')
-        self.state += self.delta
-        self.txn_state = 'tpc_vote'
-
-    def tpc_finish(self, transaction):
-        """Complete two-phase commit
-
-        >>> rm = ResourceManager()
-        >>> rm.state
-        0
-        >>> rm.inc()
-
-        We start two-phase commit by calling prepare:
-
-        >>> t1 = '1'
-        >>> rm.tpc_begin(t1)
-        >>> rm.tpc_vote(t1)
-
-        We complete it by calling tpc_finish:
-
-        >>> rm.tpc_finish(t1)
-        >>> rm.state
-        1
-
-        It is an error ro call tpc_finish without calling tpc_vote:
-
-        >>> rm.inc()
-        >>> t2 = '2'
-        >>> rm.tpc_begin(t2)
-        >>> rm.tpc_finish(t2)
-        Traceback (most recent call last):
-        ...
-        ValueError: txn in state 'tpc_begin' but expected one of ('tpc_vote',)
-
-        >>> rm.tpc_abort(t2)  # clean slate
-
-        >>> rm.tpc_begin(t2)
-        >>> rm.tpc_vote(t2)
-        >>> rm.tpc_finish(t2)
-
-        Of course, the transactions given to tpc_begin and tpc_finish must
-        be the same:
-
-        >>> rm.inc()
-        >>> t3 = '3'
-        >>> rm.tpc_begin(t3)
-        >>> rm.tpc_vote(t3)
-        >>> rm.tpc_finish(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '3')
-        """
-        self._checkTransaction(transaction)
-        self._check_state('tpc_vote')
-        self.delta = 0
-        self.transaction = None
-        self.prepared = False
-        self.txn_state = None
-
-    def tpc_abort(self, transaction):
-        """Abort a transaction
-
-        The abort method can be called before two-phase commit to
-        throw away work done in the transaction:
-
-        >>> rm = ResourceManager()
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (0, 1)
-        >>> t1 = '1'
-        >>> rm.tpc_abort(t1)
-        >>> rm.state, rm.delta
-        (0, 0)
-
-        The abort method also throws away work done in savepoints:
-
-        >>> rm.inc()
-        >>> r = rm.savepoint(t1)
-        >>> rm.inc()
-        >>> r = rm.savepoint(t1)
-        >>> rm.state, rm.delta
-        (0, 2)
-        >>> rm.tpc_abort(t1)
-        >>> rm.state, rm.delta
-        (0, 0)
-
-        If savepoints are used, abort must be passed the same
-        transaction:
-
-        >>> rm.inc()
-        >>> r = rm.savepoint(t1)
-        >>> t2 = '2'
-        >>> rm.tpc_abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> rm.tpc_abort(t1)
-
-        The abort method is also used to abort a two-phase commit:
-
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (0, 1)
-        >>> rm.tpc_begin(t1)
-        >>> rm.state, rm.delta
-        (0, 1)
-        >>> rm.tpc_vote(t1)
-        >>> rm.state, rm.delta
-        (1, 1)
-        >>> rm.tpc_abort(t1)
-        >>> rm.state, rm.delta
-        (0, 0)
-
-        Of course, the transactions passed to prepare and abort must
-        match:
-
-        >>> rm.tpc_begin(t1)
-        >>> rm.tpc_abort(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> rm.tpc_abort(t1)
-
-        This should never fail.
-        """
-
-        self._checkTransaction(transaction)
-        if self.transaction is not None:
-            self.transaction = None
-
-        if self.txn_state == 'tpc_vote':
-            self.state -= self.delta
-
-        self.txn_state = None
-        self.delta = 0
-
-    def savepoint(self, transaction):
-        """Provide the ability to rollback transaction state
-
-        Savepoints provide a way to:
-
-        - Save partial transaction work. For some resource managers, this
-          could allow resources to be used more efficiently.
-
-        - Provide the ability to revert state to a point in a
-          transaction without aborting the entire transaction.  In
-          other words, savepoints support partial aborts.
-
-        Savepoints don't use two-phase commit. If there are errors in
-        setting or rolling back to savepoints, the application should
-        abort the containing transaction.  This is *not* the
-        responsibility of the resource manager.
-
-        Savepoints are always associated with a transaction. Any work
-        done in a savepoint's transaction is tentative until the
-        transaction is committed using two-phase commit.
-
-        >>> rm = ResourceManager()
-        >>> rm.inc()
-        >>> t1 = '1'
-        >>> r = rm.savepoint(t1)
-        >>> rm.state, rm.delta
-        (0, 1)
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (0, 2)
-        >>> r.rollback()
-        >>> rm.state, rm.delta
-        (0, 1)
-        >>> rm.tpc_begin(t1)
-        >>> rm.tpc_vote(t1)
-        >>> rm.tpc_finish(t1)
-        >>> rm.state, rm.delta
-        (1, 0)
-
-        Savepoints must have the same transaction:
-
-        >>> r1 = rm.savepoint(t1)
-        >>> rm.state, rm.delta
-        (1, 0)
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (1, 1)
-        >>> t2 = '2'
-        >>> r2 = rm.savepoint(t2)
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Transaction missmatch', '2', '1')
-
-        >>> r2 = rm.savepoint(t1)
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (1, 2)
-
-        If we rollback to an earlier savepoint, we discard all work
-        done later:
-
-        >>> r1.rollback()
-        >>> rm.state, rm.delta
-        (1, 0)
-
-        and we can no longer rollback to the later savepoint:
-
-        >>> r2.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: ('Attempt to roll back to invalid save point', 3, 2)
-
-        We can roll back to a savepoint as often as we like:
-
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> r1.rollback()
-        >>> rm.state, rm.delta
-        (1, 0)
-
-        >>> rm.inc()
-        >>> rm.inc()
-        >>> rm.inc()
-        >>> rm.state, rm.delta
-        (1, 3)
-        >>> r1.rollback()
-        >>> rm.state, rm.delta
-        (1, 0)
-
-        But we can't rollback to a savepoint after it has been
-        committed:
-
-        >>> rm.tpc_begin(t1)
-        >>> rm.tpc_vote(t1)
-        >>> rm.tpc_finish(t1)
-
-        >>> r1.rollback()
-        Traceback (most recent call last):
-        ...
-        TypeError: Attempt to rollback stale rollback
-
-        """
-        if self.txn_state is not None:
-            raise TypeError("Can't get savepoint during two-phase commit")
-        self._checkTransaction(transaction)
-        self.transaction = transaction
-        self.sp += 1
-        return SavePoint(self)
-
-    def discard(self, transaction):
-        pass
-
-class SavePoint(object):
-
-    def __init__(self, rm):
-        self.rm = rm
-        self.sp = rm.sp
-        self.delta = rm.delta
-        self.transaction = rm.transaction
-
-    def rollback(self):
-        if self.transaction is not self.rm.transaction:
-            raise TypeError("Attempt to rollback stale rollback")
-        if self.rm.sp < self.sp:
-            raise TypeError("Attempt to roll back to invalid save point",
-                            self.sp, self.rm.sp)
-        self.rm.sp = self.sp
-        self.rm.delta = self.delta
-
-    def discard(self):
-        pass
-
-def test_suite():
-    from doctest import DocTestSuite
-    return DocTestSuite()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/transaction/tests/test_register_compat.py b/branches/bug1734/src/transaction/tests/test_register_compat.py
deleted file mode 100644
index ad8701e2..00000000
--- a/branches/bug1734/src/transaction/tests/test_register_compat.py
+++ /dev/null
@@ -1,154 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test backwards compatibility for resource managers using register().
-
-The transaction package supports several different APIs for resource
-managers.  The original ZODB3 API was implemented by ZODB.Connection.
-The Connection passed persistent objects to a Transaction's register()
-method.  It's possible that third-party code also used this API, hence
-these tests that the code that adapts the old interface to the current
-API works.
-
-These tests use a TestConnection object that implements the old API.
-They check that the right methods are called and in roughly the right
-order.
-
-Common cases
-------------
-
-First, check that a basic transaction commit works.
-
->>> cn = TestConnection()
->>> cn.register(Object())
->>> cn.register(Object())
->>> cn.register(Object())
->>> transaction.commit()
->>> len(cn.committed)
-3
->>> len(cn.aborted)
-0
->>> cn.calls
-['begin', 'vote', 'finish']
-
-Second, check that a basic transaction abort works.  If the
-application calls abort(), then the transaction never gets into the
-two-phase commit.  It just aborts each object.
-
->>> cn = TestConnection()
->>> cn.register(Object())
->>> cn.register(Object())
->>> cn.register(Object())
->>> transaction.abort()
->>> len(cn.committed)
-0
->>> len(cn.aborted)
-3
->>> cn.calls
-[]
-
-Error handling
---------------
-
-The tricky part of the implementation is recovering from an error that
-occurs during the two-phase commit.  We override the commit() and
-abort() methods of Object to cause errors during commit.
-
-Note that the implementation uses lists internally, so that objects
-are committed in the order they are registered.  (In the presence of
-multiple resource managers, objects from a single resource manager are
-committed in order.  I'm not sure if this is an accident of the
-implementation or a feature that should be supported by any
-implementation.)
-
-The order of resource managers depends on sortKey().
-
->>> cn = TestConnection()
->>> cn.register(Object())
->>> cn.register(CommitError())
->>> cn.register(Object())
->>> transaction.commit()
-Traceback (most recent call last):
- ...
-RuntimeError: commit
->>> len(cn.committed)
-1
->>> len(cn.aborted)
-3
-
-Clean up:
-
->>> transaction.abort()
-"""
-
-import transaction
-
-class Object(object):
-
-    def commit(self):
-        pass
-
-    def abort(self):
-        pass
-
-class CommitError(Object):
-
-    def commit(self):
-        raise RuntimeError("commit")
-
-class AbortError(Object):
-
-    def abort(self):
-        raise RuntimeError("abort")
-
-class BothError(CommitError, AbortError):
-    pass
-
-class TestConnection:
-
-    def __init__(self):
-        self.committed = []
-        self.aborted = []
-        self.calls = []
-
-    def register(self, obj):
-        obj._p_jar = self
-        transaction.get().register(obj)
-
-    def sortKey(self):
-        return str(id(self))
-
-    def tpc_begin(self, txn, sub):
-        self.calls.append("begin")
-
-    def tpc_vote(self, txn):
-        self.calls.append("vote")
-
-    def tpc_finish(self, txn):
-        self.calls.append("finish")
-
-    def tpc_abort(self, txn):
-        self.calls.append("abort")
-
-    def commit(self, obj, txn):
-        obj.commit()
-        self.committed.append(obj)
-
-    def abort(self, obj, txn):
-        obj.abort()
-        self.aborted.append(obj)
-
-import doctest
-
-def test_suite():
-    return doctest.DocTestSuite()
diff --git a/branches/bug1734/src/transaction/tests/test_transaction.py b/branches/bug1734/src/transaction/tests/test_transaction.py
deleted file mode 100644
index 2c39b8ed..00000000
--- a/branches/bug1734/src/transaction/tests/test_transaction.py
+++ /dev/null
@@ -1,646 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Test tranasction behavior for variety of cases.
-
-I wrote these unittests to investigate some odd transaction
-behavior when doing unittests of integrating non sub transaction
-aware objects, and to insure proper txn behavior. these
-tests test the transaction system independent of the rest of the
-zodb.
-
-you can see the method calls to a jar by passing the
-keyword arg tracing to the modify method of a dataobject.
-the value of the arg is a prefix used for tracing print calls
-to that objects jar.
-
-the number of times a jar method was called can be inspected
-by looking at an attribute of the jar that is the method
-name prefixed with a c (count/check).
-
-i've included some tracing examples for tests that i thought
-were illuminating as doc strings below.
-
-TODO
-
-    add in tests for objects which are modified multiple times,
-    for example an object that gets modified in multiple sub txns.
-
-$Id$
-"""
-
-import unittest
-import transaction
-from ZODB.utils import positive_id
-
-class TransactionTests(unittest.TestCase):
-
-    def setUp(self):
-        self.orig_tm = transaction.manager
-        transaction.manager = transaction.TransactionManager()
-        self.sub1 = DataObject()
-        self.sub2 = DataObject()
-        self.sub3 = DataObject()
-        self.nosub1 = DataObject(nost=1)
-
-    def tearDown(self):
-        transaction.manager = self.orig_tm
-
-    # basic tests with two sub trans jars
-    # really we only need one, so tests for
-    # sub1 should identical to tests for sub2
-    def testTransactionCommit(self):
-
-        self.sub1.modify()
-        self.sub2.modify()
-
-        transaction.commit()
-
-        assert self.sub1._p_jar.ccommit_sub == 0
-        assert self.sub1._p_jar.ctpc_finish == 1
-
-    def testTransactionAbort(self):
-
-        self.sub1.modify()
-        self.sub2.modify()
-
-        transaction.abort()
-
-        assert self.sub2._p_jar.cabort == 1
-
-    def testTransactionNote(self):
-
-        t = transaction.get()
-
-        t.note('This is a note.')
-        self.assertEqual(t.description, 'This is a note.')
-        t.note('Another.')
-        self.assertEqual(t.description, 'This is a note.\n\nAnother.')
-
-        t.abort()
-
-    def testSubTransactionCommitCommit(self):
-
-        self.sub1.modify()
-        self.sub2.modify()
-
-        transaction.commit(1)
-
-        assert self.sub1._p_jar.ctpc_vote == 0
-        assert self.sub1._p_jar.ctpc_finish == 1
-
-        transaction.commit()
-
-        assert self.sub1._p_jar.ccommit_sub == 1
-        assert self.sub1._p_jar.ctpc_vote == 1
-
-    def testSubTransactionCommitAbort(self):
-
-        self.sub1.modify()
-        self.sub2.modify()
-
-        transaction.commit(1)
-        transaction.abort()
-
-        assert self.sub1._p_jar.ctpc_vote == 0
-        assert self.sub1._p_jar.cabort == 0
-        assert self.sub1._p_jar.cabort_sub == 1
-
-    def testMultipleSubTransactionCommitCommit(self):
-        self.sub1.modify()
-        transaction.commit(1)
-
-        self.sub2.modify()
-        # reset a flag on the original to test it again
-        self.sub1.ctpc_finish = 0
-        transaction.commit(1)
-
-        # this is interesting.. we go through
-        # every subtrans commit with all subtrans capable
-        # objects... i don't like this but its an impl artifact
-
-        assert self.sub1._p_jar.ctpc_vote == 0
-        assert self.sub1._p_jar.ctpc_finish > 0
-
-        # add another before we do the entire txn commit
-        self.sub3.modify()
-
-        transaction.commit()
-
-        # we did an implicit sub commit, is this impl artifact?
-        assert self.sub3._p_jar.ccommit_sub == 1
-        assert self.sub1._p_jar.ctpc_finish > 1
-
-
-    def testMultipleSubTransactionCommitAbortSub(self):
-        """
-        sub1 calling method commit
-        sub1 calling method tpc_finish
-        sub2 calling method tpc_begin
-        sub2 calling method commit
-        sub2 calling method tpc_finish
-        sub3 calling method abort
-        sub1 calling method commit_sub
-        sub2 calling method commit_sub
-        sub2 calling method tpc_vote
-        sub1 calling method tpc_vote
-        sub1 calling method tpc_finish
-        sub2 calling method tpc_finish
-        """
-
-        # add it
-        self.sub1.modify()
-
-        transaction.commit(1)
-
-        # add another
-        self.sub2.modify()
-
-        transaction.commit(1)
-
-        assert self.sub1._p_jar.ctpc_vote == 0
-        assert self.sub1._p_jar.ctpc_finish > 0
-
-        # add another before we do the entire txn commit
-        self.sub3.modify()
-
-        # abort the sub transaction
-        transaction.abort(1)
-
-        # commit the container transaction
-        transaction.commit()
-
-        assert self.sub3._p_jar.cabort == 1
-        assert self.sub1._p_jar.ccommit_sub == 1
-        assert self.sub1._p_jar.ctpc_finish > 1
-
-    # repeat adding in a nonsub trans jars
-
-    def testNSJTransactionCommit(self):
-
-        self.nosub1.modify()
-
-        transaction.commit()
-
-        assert self.nosub1._p_jar.ctpc_finish == 1
-
-    def testNSJTransactionAbort(self):
-
-        self.nosub1.modify()
-
-        transaction.abort()
-
-        assert self.nosub1._p_jar.ctpc_finish == 0
-        assert self.nosub1._p_jar.cabort == 1
-
-    def BUGtestNSJSubTransactionCommitAbort(self):
-        """
-        this reveals a bug in transaction.py
-        the nosub jar should not have tpc_finish
-        called on it till the containing txn
-        ends.
-
-        sub calling method commit
-        nosub calling method tpc_begin
-        sub calling method tpc_finish
-        nosub calling method tpc_finish
-        nosub calling method abort
-        sub calling method abort_sub
-        """
-
-        self.sub1.modify(tracing='sub')
-        self.nosub1.modify(tracing='nosub')
-
-        transaction.commit(1)
-
-        assert self.sub1._p_jar.ctpc_finish == 1
-
-        # bug, non sub trans jars are getting finished
-        # in a subtrans
-        assert self.nosub1._p_jar.ctpc_finish == 0
-
-        transaction.abort()
-
-        assert self.nosub1._p_jar.cabort == 1
-        assert self.sub1._p_jar.cabort_sub == 1
-
-    def testNSJSubTransactionCommitCommit(self):
-
-        self.sub1.modify()
-        self.nosub1.modify()
-
-        transaction.commit(1)
-
-        assert self.nosub1._p_jar.ctpc_vote == 0
-
-        transaction.commit()
-
-        #assert self.nosub1._p_jar.ccommit_sub == 0
-        assert self.nosub1._p_jar.ctpc_vote == 1
-        assert self.sub1._p_jar.ccommit_sub == 1
-        assert self.sub1._p_jar.ctpc_vote == 1
-
-
-    def testNSJMultipleSubTransactionCommitCommit(self):
-        """
-        sub1 calling method tpc_begin
-        sub1 calling method commit
-        sub1 calling method tpc_finish
-        nosub calling method tpc_begin
-        nosub calling method tpc_finish
-        sub2 calling method tpc_begin
-        sub2 calling method commit
-        sub2 calling method tpc_finish
-        nosub calling method tpc_begin
-        nosub calling method commit
-        sub1 calling method commit_sub
-        sub2 calling method commit_sub
-        sub1 calling method tpc_vote
-        nosub calling method tpc_vote
-        sub2 calling method tpc_vote
-        sub2 calling method tpc_finish
-        nosub calling method tpc_finish
-        sub1 calling method tpc_finish
-        """
-
-        # add it
-        self.sub1.modify()
-
-        transaction.commit(1)
-
-        # add another
-        self.nosub1.modify()
-
-        transaction.commit(1)
-
-        assert self.sub1._p_jar.ctpc_vote == 0
-        assert self.nosub1._p_jar.ctpc_vote == 0
-        assert self.sub1._p_jar.ctpc_finish > 0
-
-        # add another before we do the entire txn commit
-        self.sub2.modify()
-
-        # commit the container transaction
-        transaction.commit()
-
-        # we did an implicit sub commit
-        assert self.sub2._p_jar.ccommit_sub == 1
-        assert self.sub1._p_jar.ctpc_finish > 1
-
-    ### Failure Mode Tests
-    #
-    # ok now we do some more interesting
-    # tests that check the implementations
-    # error handling by throwing errors from
-    # various jar methods
-    ###
-
-    # first the recoverable errors
-
-    def testExceptionInAbort(self):
-
-        self.sub1._p_jar = SubTransactionJar(errors='abort')
-
-        self.nosub1.modify()
-        self.sub1.modify(nojar=1)
-        self.sub2.modify()
-
-        try:
-            transaction.abort()
-        except TestTxnException: pass
-
-        assert self.nosub1._p_jar.cabort == 1
-        assert self.sub2._p_jar.cabort == 1
-
-    def testExceptionInCommit(self):
-
-        self.sub1._p_jar = SubTransactionJar(errors='commit')
-
-        self.nosub1.modify()
-        self.sub1.modify(nojar=1)
-
-        try:
-            transaction.commit()
-        except TestTxnException: pass
-
-        assert self.nosub1._p_jar.ctpc_finish == 0
-        assert self.nosub1._p_jar.ccommit == 1
-        assert self.nosub1._p_jar.ctpc_abort == 1
-
-    def testExceptionInTpcVote(self):
-
-        self.sub1._p_jar = SubTransactionJar(errors='tpc_vote')
-
-        self.nosub1.modify()
-        self.sub1.modify(nojar=1)
-
-        try:
-            transaction.commit()
-        except TestTxnException: pass
-
-        assert self.nosub1._p_jar.ctpc_finish == 0
-        assert self.nosub1._p_jar.ccommit == 1
-        assert self.nosub1._p_jar.ctpc_abort == 1
-        assert self.sub1._p_jar.ctpc_abort == 1
-
-    def testExceptionInTpcBegin(self):
-        """
-        ok this test reveals a bug in the TM.py
-        as the nosub tpc_abort there is ignored.
-
-        nosub calling method tpc_begin
-        nosub calling method commit
-        sub calling method tpc_begin
-        sub calling method abort
-        sub calling method tpc_abort
-        nosub calling method tpc_abort
-        """
-        self.sub1._p_jar = SubTransactionJar(errors='tpc_begin')
-
-        self.nosub1.modify()
-        self.sub1.modify(nojar=1)
-
-        try:
-            transaction.commit()
-        except TestTxnException: pass
-
-        assert self.nosub1._p_jar.ctpc_abort == 1
-        assert self.sub1._p_jar.ctpc_abort == 1
-
-    def testExceptionInTpcAbort(self):
-        self.sub1._p_jar = SubTransactionJar(
-                                errors=('tpc_abort', 'tpc_vote'))
-
-        self.nosub1.modify()
-        self.sub1.modify(nojar=1)
-
-        try:
-            transaction.commit()
-        except TestTxnException:
-            pass
-
-        assert self.nosub1._p_jar.ctpc_abort == 1
-
-    ### More Failure modes...
-    # now we mix in some sub transactions
-    ###
-
-    def testExceptionInSubCommitSub(self):
-        # It's harder than normal to verify test results, because
-        # the subtransaction jars are stored in a dictionary.  The
-        # order in which jars are processed depends on the order
-        # they come out of the dictionary.
-
-        self.sub1.modify()
-        transaction.commit(1)
-
-        self.nosub1.modify()
-
-        self.sub2._p_jar = SubTransactionJar(errors='commit_sub')
-        self.sub2.modify(nojar=1)
-
-        transaction.commit(1)
-
-        self.sub3.modify()
-
-        try:
-            transaction.commit()
-        except TestTxnException:
-            pass
-
-        if self.sub1._p_jar.ccommit_sub:
-            self.assertEqual(self.sub1._p_jar.ctpc_abort, 1)
-        else:
-            self.assertEqual(self.sub1._p_jar.cabort_sub, 1)
-
-        self.assertEqual(self.sub2._p_jar.ctpc_abort, 1)
-        self.assertEqual(self.nosub1._p_jar.ctpc_abort, 1)
-
-        if self.sub3._p_jar.ccommit_sub:
-            self.assertEqual(self.sub3._p_jar.ctpc_abort, 1)
-        else:
-            self.assertEqual(self.sub3._p_jar.cabort_sub, 1)
-
-    def testExceptionInSubAbortSub(self):
-        # This test has two errors.  When commit_sub() is called on
-        # sub1, it will fail.  If sub1 is handled first, it will raise
-        # an except and abort_sub() will be called on sub2.  If sub2
-        # is handled first, then commit_sub() will fail after sub2 has
-        # already begun its top-level transaction and tpc_abort() will
-        # be called.
-
-        self.sub1._p_jar = SubTransactionJar(errors='commit_sub')
-        self.sub1.modify(nojar=1)
-        transaction.commit(1)
-
-        self.nosub1.modify()
-        self.sub2._p_jar = SubTransactionJar(errors='abort_sub')
-        self.sub2.modify(nojar=1)
-        transaction.commit(1)
-
-        self.sub3.modify()
-
-        try:
-            transaction.commit()
-        except TestTxnException, err:
-            pass
-        else:
-            self.fail("expected transaction to fail")
-
-        # The last commit failed.  If the commit_sub() method was
-        # called, then tpc_abort() should be called to abort the
-        # actual transaction.  If not, then calling abort_sub() is
-        # sufficient.
-        if self.sub3._p_jar.ccommit_sub:
-            self.assertEqual(self.sub3._p_jar.ctpc_abort, 1)
-        else:
-            self.assertEqual(self.sub3._p_jar.cabort_sub, 1)
-
-    # last test, check the hosing mechanism
-
-##    def testHoserStoppage(self):
-##        # It's hard to test the "hosed" state of the database, where
-##        # hosed means that a failure occurred in the second phase of
-##        # the two phase commit.  It's hard because the database can
-##        # recover from such an error if it occurs during the very first
-##        # tpc_finish() call of the second phase.
-
-##        for obj in self.sub1, self.sub2:
-##            j = HoserJar(errors='tpc_finish')
-##            j.reset()
-##            obj._p_jar = j
-##            obj.modify(nojar=1)
-
-##        try:
-##            transaction.commit()
-##        except TestTxnException:
-##            pass
-
-##        self.assert_(Transaction.hosed)
-
-##        self.sub2.modify()
-
-##        try:
-##            transaction.commit()
-##        except Transaction.POSException.TransactionError:
-##            pass
-##        else:
-##            self.fail("Hosed Application didn't stop commits")
-
-
-class DataObject:
-
-    def __init__(self, nost=0):
-        self.nost = nost
-        self._p_jar = None
-
-    def modify(self, nojar=0, tracing=0):
-        if not nojar:
-            if self.nost:
-                self._p_jar = NoSubTransactionJar(tracing=tracing)
-            else:
-                self._p_jar = SubTransactionJar(tracing=tracing)
-        transaction.get().register(self)
-
-class TestTxnException(Exception):
-    pass
-
-class BasicJar:
-
-    def __init__(self, errors=(), tracing=0):
-        if not isinstance(errors, tuple):
-            errors = errors,
-        self.errors = errors
-        self.tracing = tracing
-        self.cabort = 0
-        self.ccommit = 0
-        self.ctpc_begin = 0
-        self.ctpc_abort = 0
-        self.ctpc_vote = 0
-        self.ctpc_finish = 0
-        self.cabort_sub = 0
-        self.ccommit_sub = 0
-
-    def __repr__(self):
-        return "<%s %X %s>" % (self.__class__.__name__,
-                               positive_id(self),
-                               self.errors)
-
-    def sortKey(self):
-        # All these jars use the same sort key, and Python's list.sort()
-        # is stable.  These two
-        return self.__class__.__name__
-
-    def check(self, method):
-        if self.tracing:
-            print '%s calling method %s'%(str(self.tracing),method)
-
-        if method in self.errors:
-            raise TestTxnException("error %s" % method)
-
-    ## basic jar txn interface
-
-    def abort(self, *args):
-        self.check('abort')
-        self.cabort += 1
-
-    def commit(self, *args):
-        self.check('commit')
-        self.ccommit += 1
-
-    def tpc_begin(self, txn, sub=0):
-        self.check('tpc_begin')
-        self.ctpc_begin += 1
-
-    def tpc_vote(self, *args):
-        self.check('tpc_vote')
-        self.ctpc_vote += 1
-
-    def tpc_abort(self, *args):
-        self.check('tpc_abort')
-        self.ctpc_abort += 1
-
-    def tpc_finish(self, *args):
-        self.check('tpc_finish')
-        self.ctpc_finish += 1
-
-class SubTransactionJar(BasicJar):
-
-    def abort_sub(self, txn):
-        self.check('abort_sub')
-        self.cabort_sub = 1
-
-    def commit_sub(self, txn):
-        self.check('commit_sub')
-        self.ccommit_sub = 1
-
-class NoSubTransactionJar(BasicJar):
-    pass
-
-class HoserJar(BasicJar):
-
-    # The HoserJars coordinate their actions via the class variable
-    # committed.  The check() method will only raise its exception
-    # if committed > 0.
-
-    committed = 0
-
-    def reset(self):
-        # Calling reset() on any instance will reset the class variable.
-        HoserJar.committed = 0
-
-    def check(self, method):
-        if HoserJar.committed > 0:
-            BasicJar.check(self, method)
-
-    def tpc_finish(self, *args):
-        self.check('tpc_finish')
-        self.ctpc_finish += 1
-        HoserJar.committed += 1
-
-
-def test_join():
-    """White-box test of the join method
-
-    The join method is provided for "backward-compatability" with ZODB 4
-    data managers.
-
-    The argument to join must be a zodb4 data manager,
-    transaction.interfaces.IDataManager.
-
-    >>> from ZODB.tests.sampledm import DataManager
-    >>> from transaction._transaction import DataManagerAdapter
-    >>> t = transaction.Transaction()
-    >>> dm = DataManager()
-    >>> t.join(dm)
-
-    The end result is that a data manager adapter is one of the
-    transaction's objects:
-
-    >>> isinstance(t._resources[0], DataManagerAdapter)
-    True
-    >>> t._resources[0]._datamanager is dm
-    True
-
-    """
-
-def test_suite():
-    from doctest import DocTestSuite
-    return unittest.TestSuite((
-        DocTestSuite(),
-        unittest.makeSuite(TransactionTests),
-        ))
-
-
-if __name__ == '__main__':
-    unittest.TextTestRunner().run(test_suite())
diff --git a/branches/bug1734/src/transaction/tests/test_util.py b/branches/bug1734/src/transaction/tests/test_util.py
deleted file mode 100644
index 232103a6..00000000
--- a/branches/bug1734/src/transaction/tests/test_util.py
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test transaction utilities
-
-$Id$
-"""
-import unittest
-from doctest import DocTestSuite
-
-def test_suite():
-    return DocTestSuite('transaction.util')
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/transaction/util.py b/branches/bug1734/src/transaction/util.py
deleted file mode 100644
index 23689477..00000000
--- a/branches/bug1734/src/transaction/util.py
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Utility classes or functions
-
-$Id$
-"""
-
-from transaction.interfaces import IRollback
-
-try:
-    from zope.interface import implements
-except ImportError:
-    def implements(*args):
-        pass
-
-class NoSavepointSupportRollback:
-    """Rollback for data managers that don't support savepoints
-
-    >>> class DataManager:
-    ...     def savepoint(self, txn):
-    ...         return NoSavepointSupportRollback(self)
-    >>> rb = DataManager().savepoint('some transaction')
-    >>> rb.rollback()
-    Traceback (most recent call last):
-    ...
-    NotImplementedError: """ \
-           """DataManager data managers do not support """ \
-           """savepoints (aka subtransactions
-
-    """
-
-    implements(IRollback)
-
-    def __init__(self, dm):
-        self.dm = dm.__class__.__name__
-
-    def rollback(self):
-        raise NotImplementedError(
-            "%s data managers do not support savepoints (aka subtransactions"
-            % self.dm)
diff --git a/branches/bug1734/src/zdaemon/DEPENDENCIES.cfg b/branches/bug1734/src/zdaemon/DEPENDENCIES.cfg
deleted file mode 100644
index cf349852..00000000
--- a/branches/bug1734/src/zdaemon/DEPENDENCIES.cfg
+++ /dev/null
@@ -1 +0,0 @@
-ZConfig
diff --git a/branches/bug1734/src/zdaemon/SETUP.cfg b/branches/bug1734/src/zdaemon/SETUP.cfg
deleted file mode 100644
index 7de7fea4..00000000
--- a/branches/bug1734/src/zdaemon/SETUP.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-script  zdctl.py
-script  zdrun.py
diff --git a/branches/bug1734/src/zdaemon/__init__.py b/branches/bug1734/src/zdaemon/__init__.py
deleted file mode 100644
index 50f67a45..00000000
--- a/branches/bug1734/src/zdaemon/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""zdaemon -- a package to manage a daemon application."""
diff --git a/branches/bug1734/src/zdaemon/component.xml b/branches/bug1734/src/zdaemon/component.xml
deleted file mode 100644
index fe78453d..00000000
--- a/branches/bug1734/src/zdaemon/component.xml
+++ /dev/null
@@ -1,275 +0,0 @@
-<component>
-
-  <!-- Note on logging configuration:
-
-       This schema component expects to use a section type named
-       "eventlog"; this type needs to be provided by some other
-       component that the top-level schema needs to import.
-
-       The import is not performed here to allow applications to
-       load the type from different components.
-    -->
-
-  <sectiontype name="runner">
-
-    <description>
-      This section describes the options for zdctl.py and zdrun.py.
-      The only required option is "program".  Many other options have
-      no default value specified in the schema; in some cases, the
-      program calculates a dynamic default, in others, the feature
-      associated with the option is disabled.
-
-      For those options that also have corresponding command-line
-      options, the command line option (short and long form) are given
-      here too.
-    </description>
-
-    <section name="*" type="ZConfig.logger.log"
-             attribute="eventlog"
-             required="no">
-      <description>
-        Log configuration for zdctl.py and zdrun.py.  These
-        applications will normally use the eventlog section at the top
-        level of the configuration, but will use this eventlog section
-        if it exists.
-
-        (This is done so that the combined schema for the runner and
-        the controlled application will write to the same logs by
-        default, but a separation of logs can be achieved if desired.)
-      </description>
-    </section>
-
-    <key name="program" datatype="string-list"
-         required="yes">
-      <description>
-        Command-line option: -p or --program (zdctl.py only).
-
-        This option gives the command used to start the subprocess
-        managed by zdrun.py.  This is currently a simple list of
-        whitespace-delimited words. The first word is the program
-        file, subsequent words are its command line arguments.  If the
-        program file contains no slashes, it is searched using $PATH.
-        (XXX There is no way to to include whitespace in the program
-        file or an argument, and under certain circumstances other
-        shell metacharacters are also a problem, e.g. the "foreground"
-        command of zdctl.py.)
-
-        NOTE: zdrun.py doesn't use this option; it uses its positional
-        arguments.  Rather, zdctl.py uses this option to determine the
-        positional argument with which to invoke zdrun.py.  (XXX This
-        could be better.)
-      </description>
-    </key>
-
-    <key name="python" datatype="existing-path"
-         required="no">
-      <description>
-        Path to the Python interpreter.  Used by zdctl.py to start the
-        zdrun.py process.  Defaults to sys.executable.
-      </description>
-    </key>
-
-    <key name="zdrun" datatype="existing-path"
-         required="no">
-      <description>
-        Path to the zdrun.py script.  Used by zdctl.py to start the
-        zdrun.py process.  Defaults to a file named "zdrun.py" in the
-        same directory as zdctl.py.
-      </description>
-    </key>
-
-    <key name="socket-name" datatype="existing-dirpath"
-         required="no"
-         default="zdsock">
-      <description>
-        Command-line option: -s or --socket-name.
-
-        The pathname of the Unix domain socket used for communication
-        between zdctl.py and zdrun.py.  The default is relative to the
-        current directory in which zdctl.py and zdrun.py are started.
-        You want to specify an absolute pathname here.
-      </description>
-    </key>
-
-    <key name="daemon" datatype="boolean"
-         required="no"
-         default="false">
-      <description>
-        Command-line option: -d or --daemon.
-
-        If this option is true, zdrun.py runs in the background as a
-        true daemon.  It forks a child process which becomes the
-        subprocess manager, while the parent exits (making the shell
-        that started it believe it is done).  The child process also
-        does the following:
-
-        - if the directory option is set, change into that directory
-
-        - redirect stdin, stdout and stderr to /dev/null
-
-        - call setsid() so it becomes a session leader
-
-        - call umask() with specified value
-      </description>
-    </key>
-
-    <key name="directory" datatype="existing-directory"
-         required="no">
-      <description>
-        Command-line option: -z or --directory.
-
-        If the daemon option is true, this option can specify a
-        directory into which zdrun.py changes as part of the
-        "daemonizing".  If the daemon option is false, this option is
-        ignored.
-      </description>
-    </key>
-
-    <key name="backoff-limit" datatype="integer"
-         required="no"
-         default="10">
-      <description>
-        Command-line option: -b or --backoff-limit.
-
-        When the subprocess crashes, zdrun.py inserts a one-second
-        delay before it restarts it.  When the subprocess crashes
-        again right away, the delay is incremented by one second, and
-        so on.  What happens when the delay has reached the value of
-        backoff-limit (in seconds), depends on the value of the
-        forever option.  If forever is false, zdrun.py gives up at
-        this point, and exits.  An always-crashing subprocess will
-        have been restarted exactly backoff-limit times in this case.
-        If forever is true, zdrun.py continues to attempt to restart
-        the process, keeping the delay at backoff-limit seconds.
-
-        If the subprocess stays up for more than backoff-limit
-        seconds, the delay is reset to 1 second.
-      </description>
-    </key>
-
-    <key name="forever" datatype="boolean"
-         required="no"
-         default="false">
-      <description>
-        Command-line option: -f or --forever.
-
-        If this option is true, zdrun.py will keep restarting a
-        crashing subprocess forever.  If it is false, it will give up
-        after backoff-limit crashes in a row.  See the description of
-        backoff-limit for details.
-      </description>
-    </key>
-
-    <key name="exit-codes" datatype="zdaemon.zdoptions.list_of_ints"
-         required="no"
-         default="0,2">
-      <description>
-        Command-line option: -x or --exit-codes.
-
-        If the subprocess exits with an exit status that is equal to
-        one of the integers in this list, zdrun.py will not restart
-        it.  The default list requires some explanation.  Exit status
-        0 is considered a willful successful exit; the ZEO and Zope
-        server processes use this exit status when they want to stop
-        without being restarted.  (Including in response to a
-        SIGTERM.)  Exit status 2 is typically issued for command line
-        syntax errors; in this case, restarting the program will not
-        help!
-
-        NOTE: this mechanism overrides the backoff-limit and forever
-        options; i.e. even if forever is true, a subprocess exit
-        status code in this list makes zdrun.py give up.  To disable
-        this, change the value to an empty list.
-      </description>
-    </key>
-
-    <key name="user" datatype="string"
-         required="no">
-      <description>
-        Command-line option: -u or --user.
-
-        When zdrun.py is started by root, this option specifies the
-        user as who the the zdrun.py process (and hence the daemon
-        subprocess) will run.  This can be a user name or a numeric
-        user id.  Both the user and the group are set from the
-        corresponding password entry, using setuid() and setgid().
-        This is done before zdrun.py does anything else besides
-        parsing its command line arguments.
-
-        NOTE: when zdrun.py is not started by root, specifying this
-        option is an error.  (XXX This may be a mistake.)
-
-        XXX The zdrun.py event log file may be opened *before*
-        setuid() is called.  Is this good or bad?
-      </description>
-    </key>
-
-    <key name="umask" datatype="zdaemon.zdoptions.octal_type"
-         required="no"
-         default="022">
-      <description>
-        Command-line option: -m or --umask.
-
-        When daemon mode is used, this option specifies the octal umask
-        of the subprocess.
-      </description>
-    </key>
-
-    <key name="hang-around" datatype="boolean"
-         required="no"
-         default="false">
-      <description>
-        If this option is true, the zdrun.py process will remain even
-        when the daemon subprocess is stopped.  In this case, zdctl.py
-        will restart zdrun.py as necessary.  If this option is false,
-        zdrun.py will exit when the daemon subprocess is stopped
-        (unless zdrun.py intends to restart it).
-      </description>
-    </key>
-
-    <key name="default-to-interactive" datatype="boolean"
-         required="no"
-         default="true">
-      <description>
-        If this option is true, zdctl.py enters interactive mode
-        when it is invoked without a positional command argument.  If
-        it is false, you must use the -i or --interactive command line
-        option to zdctl.py to enter interactive mode.
-      </description>
-    </key>
-
-    <key name="logfile" datatype="existing-dirpath"
-         required="no">
-      <description>
-        This option specifies a log file that is the default target of
-        the "logtail" zdctl.py command.
-
-        NOTE: This is NOT the log file to which zdrun.py writes its
-        logging messages!  That log file is specified by the
-        &lt;eventlog&gt; section.
-      </description>
-    </key>
-
-    <key name="transcript" datatype="existing-dirpath"
-         required="no">
-      <description>
-        The name of a file in which a transcript of all output from
-        the command being run will be written to when daemonized.
-
-        If not specified, output from the command will be discarded.
-
-        This only takes effect when the "daemon" option is enabled.
-      </description>
-    </key>
-
-    <key name="prompt" datatype="string"
-         required="no">
-       <description>
-         The prompt shown by the controller program.  The default must
-         be provided by the application.
-       </description>
-    </key>
-
-  </sectiontype>
-
-</component>
diff --git a/branches/bug1734/src/zdaemon/sample.conf b/branches/bug1734/src/zdaemon/sample.conf
deleted file mode 100644
index d76cef73..00000000
--- a/branches/bug1734/src/zdaemon/sample.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-# Sample config file for zdctl.py and zdrun.py (which share a schema).
-
-<runner>
-  # Harmless example
-  program       sleep 100
-  # Repeat the defaults
-  backoff-limit 10
-  daemon	True
-  forever	True
-  socket-name	zdsock
-  exit-codes	0,2
-  # user has no default
-  umask		022
-  directory	.
-  default-to-interactive True
-  hang-around   False
-</runner>
-
-<eventlog>
-  level info
-  <logfile>
-    path /tmp/zdrun.log
-  </logfile>
-</eventlog>
diff --git a/branches/bug1734/src/zdaemon/schema.xml b/branches/bug1734/src/zdaemon/schema.xml
deleted file mode 100644
index ffffeb01..00000000
--- a/branches/bug1734/src/zdaemon/schema.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<schema>
-
-  <description>
-    This schema describes various options that control zdctl.py and
-    zdrun.py.  zdrun.py is the "daemon process manager"; it runs a
-    subprocess in the background and restarts it when it crashes.
-    zdctl.py is the user interface to zdrun.py; it can tell zdrun.py
-    to start, stop or restart the subprocess, send it a signal, etc.
-
-    There are two sections: &lt;runner&gt; defines options unique
-    zdctl.py and zdrun.py, and &lt;eventlog&gt; defines a standard
-    event logging section used by zdrun.py.
-
-    More information about zdctl.py and zdrun.py can be found in the
-    file Doc/zdctl.txt.  This all is specific to Unix/Linux.
-  </description>
-
-  <import package="ZConfig.components.logger"/>
-
-  <import package="zdaemon"/>
-
-  <section name="*" type="runner" attribute="runner" required="yes" />
-
-  <section name="*" type="eventlog" attribute="eventlog" required="no" />
-
-</schema>
diff --git a/branches/bug1734/src/zdaemon/tests/__init__.py b/branches/bug1734/src/zdaemon/tests/__init__.py
deleted file mode 100644
index 46b66bba..00000000
--- a/branches/bug1734/src/zdaemon/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is needed to make this a package.
diff --git a/branches/bug1734/src/zdaemon/tests/donothing.sh b/branches/bug1734/src/zdaemon/tests/donothing.sh
deleted file mode 100755
index a56d7f2c..00000000
--- a/branches/bug1734/src/zdaemon/tests/donothing.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-while [ "1" -ne "2" ]; do
-   sleep 10
-done
-
-    
diff --git a/branches/bug1734/src/zdaemon/tests/nokill.py b/branches/bug1734/src/zdaemon/tests/nokill.py
deleted file mode 100755
index a50976f7..00000000
--- a/branches/bug1734/src/zdaemon/tests/nokill.py
+++ /dev/null
@@ -1,8 +0,0 @@
-#! /usr/bin/env python
-
-import signal
-
-signal.signal(signal.SIGTERM, signal.SIG_IGN)
-
-while 1:
-    signal.pause()
diff --git a/branches/bug1734/src/zdaemon/tests/parent.py b/branches/bug1734/src/zdaemon/tests/parent.py
deleted file mode 100644
index 859838ba..00000000
--- a/branches/bug1734/src/zdaemon/tests/parent.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import time
-import os
-import sys
-
-def main():
-    # dummy zdctl startup of zdrun
-    shutup()
-    file = os.path.normpath(os.path.abspath(sys.argv[0]))
-    dir = os.path.dirname(file)
-    zctldir = os.path.dirname(dir)
-    zdrun = os.path.join(zctldir, 'zdrun.py')
-    args = [sys.executable, zdrun]
-    args += ['-d', '-b', '10', '-s', os.path.join(dir, 'testsock'),
-             '-x', '0,2', '-z', dir, os.path.join(dir, 'donothing.sh')]
-    flag = os.P_NOWAIT
-    #cmd = ' '.join([sys.executable] + args)
-    #print cmd
-    os.spawnvp(flag, args[0], args)
-    while 1:
-        # wait to be signaled
-        time.sleep(1)
-
-def shutup():
-    os.close(0)
-    sys.stdin = sys.__stdin__ = open("/dev/null")
-    os.close(1)
-    sys.stdout = sys.__stdout__ = open("/dev/null", "w")
-    os.close(2)
-    sys.stderr = sys.__stderr__ = open("/dev/null", "w")
-
-if __name__ == '__main__':
-    main()
diff --git a/branches/bug1734/src/zdaemon/tests/testzdoptions.py b/branches/bug1734/src/zdaemon/tests/testzdoptions.py
deleted file mode 100644
index ac8d3b67..00000000
--- a/branches/bug1734/src/zdaemon/tests/testzdoptions.py
+++ /dev/null
@@ -1,294 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-
-"""Test suite for zdaemon.zdoptions."""
-
-import os
-import sys
-import tempfile
-import unittest
-from StringIO import StringIO
-
-import ZConfig
-import zdaemon
-from zdaemon.zdoptions import ZDOptions
-
-class ZDOptionsTestBase(unittest.TestCase):
-
-    OptionsClass = ZDOptions
-
-    def save_streams(self):
-        self.save_stdout = sys.stdout
-        self.save_stderr = sys.stderr
-        sys.stdout = self.stdout = StringIO()
-        sys.stderr = self.stderr = StringIO()
-
-    def restore_streams(self):
-        sys.stdout = self.save_stdout
-        sys.stderr = self.save_stderr
-
-    def check_exit_code(self, options, args):
-        save_sys_stderr = sys.stderr
-        try:
-            sys.stderr = StringIO()
-            try:
-                options.realize(args)
-            except SystemExit, err:
-                self.assertEqual(err.code, 2)
-            else:
-                self.fail("SystemExit expected")
-        finally:
-            sys.stderr = save_sys_stderr
-
-
-class TestZDOptions(ZDOptionsTestBase):
-
-    input_args = ["arg1", "arg2"]
-    output_opts = []
-    output_args = ["arg1", "arg2"]
-
-    def test_basic(self):
-        progname = "progname"
-        doc = "doc"
-        options = self.OptionsClass()
-        options.positional_args_allowed = 1
-        options.schemadir = os.path.dirname(zdaemon.__file__)
-        options.realize(self.input_args, progname, doc)
-        self.assertEqual(options.progname, "progname")
-        self.assertEqual(options.doc, "doc")
-        self.assertEqual(options.options, self.output_opts)
-        self.assertEqual(options.args, self.output_args)
-
-    def test_configure(self):
-        configfile = os.path.join(os.path.dirname(zdaemon.__file__),
-                                  "sample.conf")
-        for arg in "-C", "--c", "--configure":
-            options = self.OptionsClass()
-            options.realize([arg, configfile])
-            self.assertEqual(options.configfile, configfile)
-
-    def test_help(self):
-        for arg in "-h", "--h", "--help":
-            options = self.OptionsClass()
-            try:
-                self.save_streams()
-                try:
-                    options.realize([arg])
-                finally:
-                    self.restore_streams()
-            except SystemExit, err:
-                self.assertEqual(err.code, 0)
-            else:
-                self.fail("%s didn't call sys.exit()" % repr(arg))
-
-    def test_unrecognized(self):
-        # Check that we get an error for an unrecognized option
-        self.check_exit_code(self.OptionsClass(), ["-/"])
-
-
-class TestBasicFunctionality(TestZDOptions):
-
-    def test_no_positional_args(self):
-        # Check that we get an error for positional args when they
-        # haven't been enabled.
-        self.check_exit_code(self.OptionsClass(), ["A"])
-
-    def test_positional_args(self):
-        options = self.OptionsClass()
-        options.positional_args_allowed = 1
-        options.realize(["A", "B"])
-        self.assertEqual(options.args, ["A", "B"])
-
-    def test_positional_args_empty(self):
-        options = self.OptionsClass()
-        options.positional_args_allowed = 1
-        options.realize([])
-        self.assertEqual(options.args, [])
-
-    def test_positional_args_unknown_option(self):
-        # Make sure an unknown option doesn't become a positional arg.
-        options = self.OptionsClass()
-        options.positional_args_allowed = 1
-        self.check_exit_code(options, ["-o", "A", "B"])
-
-    def test_conflicting_flags(self):
-        # Check that we get an error for flags which compete over the
-        # same option setting.
-        options = self.OptionsClass()
-        options.add("setting", None, "a", flag=1)
-        options.add("setting", None, "b", flag=2)
-        self.check_exit_code(options, ["-a", "-b"])
-
-    def test_handler_simple(self):
-        # Test that a handler is called; use one that doesn't return None.
-        options = self.OptionsClass()
-        options.add("setting", None, "a:", handler=int)
-        options.realize(["-a2"])
-        self.assertEqual(options.setting, 2)
-
-    def test_handler_side_effect(self):
-        # Test that a handler is called and conflicts are not
-        # signalled when it returns None.
-        options = self.OptionsClass()
-        L = []
-        options.add("setting", None, "a:", "append=", handler=L.append)
-        options.realize(["-a2", "--append", "3"])
-        self.assert_(options.setting is None)
-        self.assertEqual(L, ["2", "3"])
-
-    def test_handler_with_bad_value(self):
-        options = self.OptionsClass()
-        options.add("setting", None, "a:", handler=int)
-        self.check_exit_code(options, ["-afoo"])
-
-    def test_raise_getopt_errors(self):
-        options = self.OptionsClass()
-        # note that we do not add "a" to the list of options;
-        # if raise_getopt_errors was true, this test would error
-        options.realize(["-afoo"], raise_getopt_errs=False)
-        # check_exit_code realizes the options with raise_getopt_errs=True
-        self.check_exit_code(options, ['-afoo'])
-
-
-class EnvironmentOptions(ZDOptionsTestBase):
-
-    saved_schema = None
-
-    class OptionsClass(ZDOptions):
-        def __init__(self):
-            ZDOptions.__init__(self)
-            self.add("opt", "opt", "o:", "opt=",
-                     default=42, handler=int, env="OPT")
-
-        def load_schema(self):
-            # Doing this here avoids needing a separate file for the schema:
-            if self.schema is None:
-                if EnvironmentOptions.saved_schema is None:
-                    schema = ZConfig.loadSchemaFile(StringIO("""\
-                        <schema>
-                          <key name='opt' datatype='integer' default='12'/>
-                        </schema>
-                        """))
-                    EnvironmentOptions.saved_schema = schema
-                self.schema = EnvironmentOptions.saved_schema
-
-        def load_configfile(self):
-            if getattr(self, "configtext", None):
-                self.configfile = tempfile.mktemp()
-                f = open(self.configfile, 'w')
-                f.write(self.configtext)
-                f.close()
-                try:
-                    ZDOptions.load_configfile(self)
-                finally:
-                    os.unlink(self.configfile)
-            else:
-                ZDOptions.load_configfile(self)
-
-    # Save and restore the environment around each test:
-
-    def setUp(self):
-        self._oldenv = os.environ
-        env = {}
-        for k, v in os.environ.items():
-            env[k] = v
-        os.environ = env
-
-    def tearDown(self):
-        os.environ = self._oldenv
-
-    def create_with_config(self, text):
-        options = self.OptionsClass()
-        zdpkgdir = os.path.dirname(os.path.abspath(zdaemon.__file__))
-        options.schemadir = os.path.join(zdpkgdir, 'tests')
-        options.schemafile = "envtest.xml"
-        # configfile must be set for ZDOptions to use ZConfig:
-        if text:
-            options.configfile = "not used"
-            options.configtext = text
-        return options
-
-
-class TestZDOptionsEnvironment(EnvironmentOptions):
-
-    def test_with_environment(self):
-        os.environ["OPT"] = "2"
-        self.check_from_command_line()
-        options = self.OptionsClass()
-        options.realize([])
-        self.assertEqual(options.opt, 2)
-
-    def test_without_environment(self):
-        self.check_from_command_line()
-        options = self.OptionsClass()
-        options.realize([])
-        self.assertEqual(options.opt, 42)
-
-    def check_from_command_line(self):
-        for args in (["-o1"], ["--opt", "1"]):
-            options = self.OptionsClass()
-            options.realize(args)
-            self.assertEqual(options.opt, 1)
-
-    def test_with_bad_environment(self):
-        os.environ["OPT"] = "Spooge!"
-        # make sure the bad value is ignored if the command-line is used:
-        self.check_from_command_line()
-        options = self.OptionsClass()
-        try:
-            self.save_streams()
-            try:
-                options.realize([])
-            finally:
-                self.restore_streams()
-        except SystemExit, e:
-            self.assertEqual(e.code, 2)
-        else:
-            self.fail("expected SystemExit")
-
-    def test_environment_overrides_configfile(self):
-        options = self.create_with_config("opt 3")
-        options.realize([])
-        self.assertEqual(options.opt, 3)
-
-        os.environ["OPT"] = "2"
-        options = self.create_with_config("opt 3")
-        options.realize([])
-        self.assertEqual(options.opt, 2)
-
-
-class TestCommandLineOverrides(EnvironmentOptions):
-
-    def test_simple_override(self):
-        options = self.create_with_config("# empty config")
-        options.realize(["-X", "opt=-2"])
-        self.assertEqual(options.opt, -2)
-
-    def test_error_propogation(self):
-        self.check_exit_code(self.create_with_config("# empty"),
-                             ["-Xopt=1", "-Xopt=2"])
-        self.check_exit_code(self.create_with_config("# empty"),
-                             ["-Xunknown=foo"])
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    for cls in [TestBasicFunctionality,
-                TestZDOptionsEnvironment,
-                TestCommandLineOverrides]:
-        suite.addTest(unittest.makeSuite(cls))
-    return suite
-
-if __name__ == "__main__":
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/zdaemon/tests/testzdrun.py b/branches/bug1734/src/zdaemon/tests/testzdrun.py
deleted file mode 100644
index 5485da4f..00000000
--- a/branches/bug1734/src/zdaemon/tests/testzdrun.py
+++ /dev/null
@@ -1,299 +0,0 @@
-"""Test suite for zdrun.py."""
-
-import os
-import sys
-import time
-import signal
-import tempfile
-import unittest
-import socket
-
-from StringIO import StringIO
-
-import ZConfig
-
-from zdaemon import zdrun, zdctl
-
-
-class ConfiguredOptions:
-    """Options class that loads configuration from a specified string.
-
-    This always loads from the string, regardless of any -C option
-    that may be given.
-    """
-
-    def set_configuration(self, configuration):
-        self.__configuration = configuration
-        self.configfile = "<preloaded string>"
-
-    def load_configfile(self):
-        sio = StringIO(self.__configuration)
-        cfg = ZConfig.loadConfigFile(self.schema, sio, self.zconfig_options)
-        self.configroot, self.confighandlers = cfg
-
-
-class ConfiguredZDRunOptions(ConfiguredOptions, zdrun.ZDRunOptions):
-
-    def __init__(self, configuration):
-        zdrun.ZDRunOptions.__init__(self)
-        self.set_configuration(configuration)
-
-
-class ZDaemonTests(unittest.TestCase):
-
-    python = os.path.abspath(sys.executable)
-    assert os.path.exists(python)
-    here = os.path.abspath(os.path.dirname(__file__))
-    assert os.path.isdir(here)
-    nokill = os.path.join(here, "nokill.py")
-    assert os.path.exists(nokill)
-    parent = os.path.dirname(here)
-    zdrun = os.path.join(parent, "zdrun.py")
-    assert os.path.exists(zdrun)
-
-    ppath = os.pathsep.join(sys.path)
-
-    def setUp(self):
-        self.zdsock = tempfile.mktemp()
-        self.new_stdout = StringIO()
-        self.save_stdout = sys.stdout
-        sys.stdout = self.new_stdout
-        self.expect = ""
-
-    def tearDown(self):
-        sys.stdout = self.save_stdout
-        for sig in (signal.SIGTERM,
-                    signal.SIGHUP,
-                    signal.SIGINT,
-                    signal.SIGCHLD):
-            signal.signal(sig, signal.SIG_DFL)
-        try:
-            os.unlink(self.zdsock)
-        except os.error:
-            pass
-        output = self.new_stdout.getvalue()
-        self.assertEqual(self.expect, output)
-
-    def quoteargs(self, args):
-        for i in range(len(args)):
-            if " " in args[i]:
-                args[i] = '"%s"' % args[i]
-        return " ".join(args)
-
-    def rundaemon(self, args):
-        # Add quotes, in case some pathname contains spaces (e.g. Mac OS X)
-        args = self.quoteargs(args)
-        cmd = ('PYTHONPATH="%s" "%s" "%s" -d -s "%s" %s' %
-               (self.ppath, self.python, self.zdrun, self.zdsock, args))
-        os.system(cmd)
-        # When the daemon crashes, the following may help debug it:
-        ##os.system("PYTHONPATH=%s %s %s -s %s %s &" %
-        ##    (self.ppath, self.python, self.zdrun, self.zdsock, args))
-
-    def _run(self, args, cmdclass=None):
-        if type(args) is type(""):
-            args = args.split()
-        try:
-            zdctl.main(["-s", self.zdsock] + args, cmdclass=cmdclass)
-        except SystemExit:
-            pass
-
-    def testCmdclassOverride(self):
-        class MyCmd(zdctl.ZDCmd):
-            def do_sproing(self, rest):
-                print rest
-        self._run("-p echo sproing expected", cmdclass=MyCmd)
-        self.expect = "expected\n"
-
-    def testSystem(self):
-        self.rundaemon(["echo", "-n"])
-        self.expect = ""
-
-##     def testInvoke(self):
-##         self._run("echo -n")
-##         self.expect = ""
-
-##     def testControl(self):
-##         self.rundaemon(["sleep", "1000"])
-##         time.sleep(1)
-##         self._run("stop")
-##         time.sleep(1)
-##         self._run("exit")
-##         self.expect = "Sent SIGTERM\nExiting now\n"
-
-##     def testStop(self):
-##         self.rundaemon([self.python, self.nokill])
-##         time.sleep(1)
-##         self._run("stop")
-##         time.sleep(1)
-##         self._run("exit")
-##         self.expect = "Sent SIGTERM\nSent SIGTERM; will exit later\n"
-
-    def testHelp(self):
-        self._run("-h")
-        import __main__
-        self.expect = __main__.__doc__
-
-    def testOptionsSysArgv(self):
-        # Check that options are parsed from sys.argv by default
-        options = zdrun.ZDRunOptions()
-        save_sys_argv = sys.argv
-        try:
-            sys.argv = ["A", "B", "C"]
-            options.realize()
-        finally:
-            sys.argv = save_sys_argv
-        self.assertEqual(options.options, [])
-        self.assertEqual(options.args, ["B", "C"])
-
-    def testOptionsBasic(self):
-        # Check basic option parsing
-        options = zdrun.ZDRunOptions()
-        options.realize(["B", "C"], "foo")
-        self.assertEqual(options.options, [])
-        self.assertEqual(options.args, ["B", "C"])
-        self.assertEqual(options.progname, "foo")
-
-    def testOptionsHelp(self):
-        # Check that -h behaves properly
-        options = zdrun.ZDRunOptions()
-        try:
-            options.realize(["-h"], doc=zdrun.__doc__)
-        except SystemExit, err:
-            self.failIf(err.code)
-        else:
-            self.fail("SystemExit expected")
-        self.expect = zdrun.__doc__
-
-    def testSubprocessBasic(self):
-        # Check basic subprocess management: spawn, kill, wait
-        options = zdrun.ZDRunOptions()
-        options.realize(["sleep", "100"])
-        proc = zdrun.Subprocess(options)
-        self.assertEqual(proc.pid, 0)
-        pid = proc.spawn()
-        self.assertEqual(proc.pid, pid)
-        msg = proc.kill(signal.SIGTERM)
-        self.assertEqual(msg, None)
-        wpid, wsts = os.waitpid(pid, 0)
-        self.assertEqual(wpid, pid)
-        self.assertEqual(os.WIFSIGNALED(wsts), 1)
-        self.assertEqual(os.WTERMSIG(wsts), signal.SIGTERM)
-        proc.setstatus(wsts)
-        self.assertEqual(proc.pid, 0)
-
-    def testEventlogOverride(self):
-        # Make sure runner.eventlog is used if it exists
-        options = ConfiguredZDRunOptions("""\
-            <runner>
-              program /bin/true
-              <eventlog>
-                level 42
-              </eventlog>
-            </runner>
-
-            <eventlog>
-              level 35
-            </eventlog>
-            """)
-        options.realize(["/bin/true"])
-        self.assertEqual(options.config_logger.level, 42)
-
-    def testEventlogWithoutOverride(self):
-        # Make sure eventlog is used if runner.eventlog doesn't exist
-        options = ConfiguredZDRunOptions("""\
-            <runner>
-              program /bin/true
-            </runner>
-
-            <eventlog>
-              level 35
-            </eventlog>
-            """)
-        options.realize(["/bin/true"])
-        self.assertEqual(options.config_logger.level, 35)
-
-    def testRunIgnoresParentSignals(self):
-        # Spawn a process which will in turn spawn a zdrun process.
-        # We make sure that the zdrun process is still running even if
-        # its parent process receives an interrupt signal (it should
-        # not be passed to zdrun).
-        zdrun_socket = os.path.join(self.here, 'testsock')
-        zdctlpid = os.spawnvp(
-            os.P_NOWAIT,
-            sys.executable,
-            [sys.executable, os.path.join(self.here, 'parent.py')]
-            )
-        # Wait for it to start, but no longer than a minute.
-        deadline = time.time() + 60
-        is_started = False
-        while time.time() < deadline:
-             response = send_action('status\n', zdrun_socket)
-             if response is None:
-                 time.sleep(0.05)
-             else:
-                 is_started = True
-                 break
-        self.assert_(is_started, "spawned process failed to start in a minute")
-        # Kill it, and wait a little to ensure it's dead.
-        os.kill(zdctlpid, signal.SIGINT)
-        time.sleep(0.25)
-        # Make sure the child is still responsive.
-        response = send_action('status\n', zdrun_socket)
-        self.assert_(response is not None and '\n' in response)
-        # Kill the process.
-        send_action('exit\n', zdrun_socket)
-
-    def testUmask(self):
-        path = tempfile.mktemp()
-        # With umask 666, we should create a file that we aren't able
-        # to write.  If access says no, assume that umask works.
-        try:
-            touch_cmd = "/bin/touch"
-            if not os.path.exists(touch_cmd):
-                touch_cmd = "/usr/bin/touch" # Mac OS X
-            self.rundaemon(["-m", "666", touch_cmd, path])
-            for i in range(5):
-                if not os.path.exists(path):
-                    time.sleep(0.1)
-            self.assert_(os.path.exists(path))
-            self.assert_(not os.access(path, os.W_OK))
-        finally:
-            if os.path.exists(path):
-                os.remove(path)
-
-def send_action(action, sockname):
-    """Send an action to the zdrun server and return the response.
-
-    Return None if the server is not up or any other error happened.
-    """
-    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-    try:
-        sock.connect(sockname)
-        sock.send(action + "\n")
-        sock.shutdown(1) # We're not writing any more
-        response = ""
-        while 1:
-            data = sock.recv(1000)
-            if not data:
-                break
-            response += data
-        sock.close()
-        return response
-    except socket.error, msg:
-        if str(msg) == 'AF_UNIX path too long':
-            # MacOS has apparent small limits on the length of a UNIX
-            # domain socket filename, we want to make MacOS users aware
-            # of the actual problem
-            raise
-        return None
-
-def test_suite():
-    suite = unittest.TestSuite()
-    if os.name == "posix":
-        suite.addTest(unittest.makeSuite(ZDaemonTests))
-    return suite
-
-if __name__ == '__main__':
-    __file__ = sys.argv[0]
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/zdaemon/zdctl.py b/branches/bug1734/src/zdaemon/zdctl.py
deleted file mode 100755
index 7800d9a3..00000000
--- a/branches/bug1734/src/zdaemon/zdctl.py
+++ /dev/null
@@ -1,584 +0,0 @@
-#!python
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""zdctl -- control an application run by zdaemon.
-
-Usage: python zdctl.py [-C URL] [-S schema.xml] [-h] [-p PROGRAM]
-       [zdrun-options] [action [arguments]]
-
-Options:
--C/--configure URL -- configuration file or URL
--S/--schema XML Schema -- XML schema for configuration file
--h/--help -- print usage message and exit
--b/--backoff-limit SECONDS -- set backoff limit to SECONDS (default 10)
--d/--daemon -- run as a proper daemon; fork a subprocess, close files etc.
--f/--forever -- run forever (by default, exit when backoff limit is exceeded)
--h/--help -- print this usage message and exit
--i/--interactive -- start an interactive shell after executing commands
--l/--logfile -- log file to be read by logtail command
--p/--program PROGRAM -- the program to run
--s/--socket-name SOCKET -- Unix socket name for client (default "zdsock")
--u/--user USER -- run as this user (or numeric uid)
--m/--umask UMASK -- use this umask for daemon subprocess (default is 022)
--x/--exit-codes LIST -- list of fatal exit codes (default "0,2")
--z/--directory DIRECTORY -- directory to chdir to when using -d (default off)
-action [arguments] -- see below
-
-Actions are commands like "start", "stop" and "status".  If -i is
-specified or no action is specified on the command line, a "shell"
-interpreting actions typed interactively is started (unless the
-configuration option default_to_interactive is set to false).  Use the
-action "help" to find out about available actions.
-"""
-
-import os
-import re
-import cmd
-import sys
-import time
-import signal
-import socket
-import stat
-
-if __name__ == "__main__":
-    # Add the parent of the script directory to the module search path
-    # (but only when the script is run from inside the zdaemon package)
-    from os.path import dirname, basename, abspath, normpath
-    scriptdir = dirname(normpath(abspath(sys.argv[0])))
-    if basename(scriptdir).lower() == "zdaemon":
-        sys.path.append(dirname(scriptdir))
-
-from zdaemon.zdoptions import RunnerOptions
-
-
-def string_list(arg):
-    return arg.split()
-
-
-class ZDCtlOptions(RunnerOptions):
-
-    positional_args_allowed = 1
-
-    def __init__(self):
-        RunnerOptions.__init__(self)
-        self.add("schemafile", short="S:", long="schema=",
-                 default="schema.xml",
-                 handler=self.set_schemafile)
-        self.add("interactive", None, "i", "interactive", flag=1)
-        self.add("default_to_interactive", "runner.default_to_interactive",
-                 default=1)
-        self.add("program", "runner.program", "p:", "program=",
-                 handler=string_list,
-                 required="no program specified; use -p or -C")
-        self.add("logfile", "runner.logfile", "l:", "logfile=")
-        self.add("python", "runner.python")
-        self.add("zdrun", "runner.zdrun")
-        programname = os.path.basename(sys.argv[0])
-        base, ext = os.path.splitext(programname)
-        if ext == ".py":
-            programname = base
-        self.add("prompt", "runner.prompt", default=(programname + ">"))
-
-    def realize(self, *args, **kwds):
-        RunnerOptions.realize(self, *args, **kwds)
-
-        # Maybe the config file requires -i or positional args
-        if not self.args and not self.interactive:
-            if not self.default_to_interactive:
-                self.usage("either -i or an action argument is required")
-            self.interactive = 1
-
-        # Where's python?
-        if not self.python:
-            self.python = sys.executable
-
-        # Where's zdrun?
-        if not self.zdrun:
-            if __name__ == "__main__":
-                file = sys.argv[0]
-            else:
-                file = __file__
-            file = os.path.normpath(os.path.abspath(file))
-            dir = os.path.dirname(file)
-            self.zdrun = os.path.join(dir, "zdrun.py")
-
-    def set_schemafile(self, file):
-        self.schemafile = file
-
-
-
-class ZDCmd(cmd.Cmd):
-
-    def __init__(self, options):
-        self.options = options
-        self.prompt = self.options.prompt + ' '
-        cmd.Cmd.__init__(self)
-        self.get_status()
-        if self.zd_status:
-            m = re.search("(?m)^args=(.*)$", self.zd_status)
-            if m:
-                s = m.group(1)
-                args = eval(s, {"__builtins__": {}})
-                if args != self.options.program:
-                    print "WARNING! zdrun is managing a different program!"
-                    print "our program   =", self.options.program
-                    print "daemon's args =", args
-
-    def emptyline(self):
-        # We don't want a blank line to repeat the last command.
-        # Showing status is a nice alternative.
-        self.do_status()
-
-    def send_action(self, action):
-        """Send an action to the zdrun server and return the response.
-
-        Return None if the server is not up or any other error happened.
-        """
-        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        try:
-            sock.connect(self.options.sockname)
-            sock.send(action + "\n")
-            sock.shutdown(1) # We're not writing any more
-            response = ""
-            while 1:
-                data = sock.recv(1000)
-                if not data:
-                    break
-                response += data
-            sock.close()
-            return response
-        except socket.error, msg:
-            return None
-
-    def get_status(self):
-        self.zd_up = 0
-        self.zd_pid = 0
-        self.zd_status = None
-        resp = self.send_action("status")
-        if not resp:
-            return
-        m = re.search("(?m)^application=(\d+)$", resp)
-        if not m:
-            return
-        self.zd_up = 1
-        self.zd_pid = int(m.group(1))
-        self.zd_status = resp
-
-    def awhile(self, cond, msg):
-        try:
-            self.get_status()
-            while not cond():
-                sys.stdout.write(". ")
-                sys.stdout.flush()
-                time.sleep(1)
-                self.get_status()
-        except KeyboardInterrupt:
-            print "^C"
-        else:
-            print msg % self.__dict__
-
-    def help_help(self):
-        print "help          -- Print a list of available actions."
-        print "help <action> -- Print help for <action>."
-
-    def do_EOF(self, arg):
-        print
-        return 1
-
-    def help_EOF(self):
-        print "To quit, type ^D or use the quit command."
-
-    def do_start(self, arg):
-        self.get_status()
-        if not self.zd_up:
-            args = [
-                self.options.python,
-                self.options.zdrun,
-                ]
-            args += self._get_override("-S", "schemafile")
-            args += self._get_override("-C", "configfile")
-            args += self._get_override("-b", "backofflimit")
-            args += self._get_override("-d", "daemon", flag=1)
-            args += self._get_override("-f", "forever", flag=1)
-            args += self._get_override("-s", "sockname")
-            args += self._get_override("-u", "user")
-            args += self._get_override("-m", "umask")
-            args += self._get_override(
-                "-x", "exitcodes", ",".join(map(str, self.options.exitcodes)))
-            args += self._get_override("-z", "directory")
-            args.extend(self.options.program)
-            if self.options.daemon:
-                flag = os.P_NOWAIT
-            else:
-                flag = os.P_WAIT
-            os.spawnvp(flag, args[0], args)
-        elif not self.zd_pid:
-            self.send_action("start")
-        else:
-            print "daemon process already running; pid=%d" % self.zd_pid
-            return
-        self.awhile(lambda: self.zd_pid,
-                    "daemon process started, pid=%(zd_pid)d")
-
-    def _get_override(self, opt, name, svalue=None, flag=0):
-        value = getattr(self.options, name)
-        if value is None:
-            return []
-        configroot = self.options.configroot
-        if configroot is not None:
-            for n, cn in self.options.names_list:
-                if n == name and cn:
-                    v = configroot
-                    for p in cn.split("."):
-                        v = getattr(v, p, None)
-                        if v is None:
-                            break
-                    if v == value: # We didn't override anything
-                        return []
-                    break
-        if flag:
-            if value:
-                args = [opt]
-            else:
-                args = []
-        else:
-            if svalue is None:
-                svalue = str(value)
-            args = [opt, svalue]
-        return args
-
-    def help_start(self):
-        print "start -- Start the daemon process."
-        print "         If it is already running, do nothing."
-
-    def do_stop(self, arg):
-        self.get_status()
-        if not self.zd_up:
-            print "daemon manager not running"
-        elif not self.zd_pid:
-            print "daemon process not running"
-        else:
-            self.send_action("stop")
-            self.awhile(lambda: not self.zd_pid, "daemon process stopped")
-
-    def help_stop(self):
-        print "stop -- Stop the daemon process."
-        print "        If it is not running, do nothing."
-
-    def do_restart(self, arg):
-        self.get_status()
-        pid = self.zd_pid
-        if not pid:
-            self.do_start(arg)
-        else:
-            self.send_action("restart")
-            self.awhile(lambda: self.zd_pid not in (0, pid),
-                        "daemon process restarted, pid=%(zd_pid)d")
-
-    def help_restart(self):
-        print "restart -- Stop and then start the daemon process."
-
-    def do_kill(self, arg):
-        if not arg:
-            sig = signal.SIGTERM
-        else:
-            try:
-                sig = int(arg)
-            except: # int() can raise any number of exceptions
-                print "invalid signal number", `arg`
-                return
-        self.get_status()
-        if not self.zd_pid:
-            print "daemon process not running"
-            return
-        print "kill(%d, %d)" % (self.zd_pid, sig)
-        try:
-            os.kill(self.zd_pid, sig)
-        except os.error, msg:
-            print "Error:", msg
-        else:
-            print "signal %d sent to process %d" % (sig, self.zd_pid)
-
-    def help_kill(self):
-        print "kill [sig] -- Send signal sig to the daemon process."
-        print "              The default signal is SIGTERM."
-
-    def do_wait(self, arg):
-        self.awhile(lambda: not self.zd_pid, "daemon process stopped")
-        self.do_status()
-
-    def help_wait(self):
-        print "wait -- Wait for the daemon process to exit."
-
-    def do_status(self, arg=""):
-        if arg not in ["", "-l"]:
-            print "status argument must be absent or -l"
-            return
-        self.get_status()
-        if not self.zd_up:
-            print "daemon manager not running"
-        elif not self.zd_pid:
-            print "daemon manager running; daemon process not running"
-        else:
-            print "program running; pid=%d" % self.zd_pid
-        if arg == "-l" and self.zd_status:
-            print self.zd_status
-
-    def help_status(self):
-        print "status [-l] -- Print status for the daemon process."
-        print "               With -l, show raw status output as well."
-
-    def do_show(self, arg):
-        if not arg:
-            arg = "options"
-        try:
-            method = getattr(self, "show_" + arg)
-        except AttributeError, err:
-            print err
-            self.help_show()
-            return
-        method()
-
-    def show_options(self):
-        print "zdctl/zdrun options:"
-        print "schemafile:  ", repr(self.options.schemafile)
-        print "configfile:  ", repr(self.options.configfile)
-        print "interactive: ", repr(self.options.interactive)
-        print "default_to_interactive:",
-        print                  repr(self.options.default_to_interactive)
-        print "zdrun:       ", repr(self.options.zdrun)
-        print "python:      ", repr(self.options.python)
-        print "program:     ", repr(self.options.program)
-        print "backofflimit:", repr(self.options.backofflimit)
-        print "daemon:      ", repr(self.options.daemon)
-        print "forever:     ", repr(self.options.forever)
-        print "sockname:    ", repr(self.options.sockname)
-        print "exitcodes:   ", repr(self.options.exitcodes)
-        print "user:        ", repr(self.options.user)
-        print "umask:       ", oct(self.options.umask)
-        print "directory:   ", repr(self.options.directory)
-        print "logfile:     ", repr(self.options.logfile)
-        print "hang_around: ", repr(self.options.hang_around)
-
-    def show_python(self):
-        print "Python info:"
-        version = sys.version.replace("\n", "\n              ")
-        print "Version:     ", version
-        print "Platform:    ", sys.platform
-        print "Executable:  ", repr(sys.executable)
-        print "Arguments:   ", repr(sys.argv)
-        print "Directory:   ", repr(os.getcwd())
-        print "Path:"
-        for dir in sys.path:
-            print "    " + repr(dir)
-
-    def show_all(self):
-        self.show_options()
-        print
-        self.show_python()
-
-    def help_show(self):
-        print "show options -- show zdctl options"
-        print "show python -- show Python version and details"
-        print "show all -- show all of the above"
-
-    def complete_show(self, text, *ignored):
-        options = ["options", "python", "all"]
-        return [x for x in options if x.startswith(text)]
-
-    def do_logreopen(self, arg):
-        self.do_kill(str(signal.SIGUSR2))
-
-    def help_logreopen(self):
-        print "logreopen -- Send a SIGUSR2 signal to the daemon process."
-        print "             This is designed to reopen the log file."
-
-    def do_logtail(self, arg):
-        if not arg:
-            arg = self.options.logfile
-            if not arg:
-                print "No default log file specified; use logtail <logfile>"
-                return
-        try:
-            helper = TailHelper(arg)
-            helper.tailf()
-        except KeyboardInterrupt:
-            print
-        except IOError, msg:
-            print msg
-        except OSError, msg:
-            print msg
-
-    def help_logtail(self):
-        print "logtail [logfile] -- Run tail -f on the given logfile."
-        print "                     A default file may exist."
-        print "                     Hit ^C to exit this mode."
-
-    def do_shell(self, arg):
-        if not arg:
-            arg = os.getenv("SHELL") or "/bin/sh"
-        try:
-            os.system(arg)
-        except KeyboardInterrupt:
-            print
-
-    def help_shell(self):
-        print "shell [command] -- Execute a shell command."
-        print "                   Without a command, start an interactive sh."
-        print "An alias for this command is ! [command]"
-
-    def do_reload(self, arg):
-        if arg:
-            args = arg.split()
-            if self.options.configfile:
-                args = ["-C", self.options.configfile] + args
-        else:
-            args = None
-        options = ZDCtlOptions()
-        options.positional_args_allowed = 0
-        try:
-            options.realize(args)
-        except SystemExit:
-            print "Configuration not reloaded"
-        else:
-            self.options = options
-            if self.options.configfile:
-                print "Configuration reloaded from", self.options.configfile
-            else:
-                print "Configuration reloaded without a config file"
-
-    def help_reload(self):
-        print "reload [options] -- Reload the configuration."
-        print "    Without options, this reparses the command line."
-        print "    With options, this substitutes 'options' for the"
-        print "    command line, except that if no -C option is given,"
-        print "    the last configuration file is used."
-
-    def do_foreground(self, arg):
-        self.get_status()
-        pid = self.zd_pid
-        if pid:
-            print "To run the program in the foreground, please stop it first."
-            return
-        program = " ".join(self.options.program)
-        print program
-        try:
-            os.system(program)
-        except KeyboardInterrupt:
-            print
-
-    def do_fg(self, arg):
-        self.do_foreground(arg)
-
-    def help_foreground(self):
-        print "foreground -- Run the program in the forground."
-        print "fg -- an alias for foreground."
-
-    def help_fg(self):
-        self.help_foreground()
-
-    def do_quit(self, arg):
-        self.get_status()
-        if not self.zd_up:
-            print "daemon manager not running"
-        elif not self.zd_pid:
-            print "daemon process not running; stopping daemon manager"
-            self.send_action("exit")
-            self.awhile(lambda: not self.zd_up, "daemon manager stopped")
-        else:
-            print "daemon process and daemon manager still running"
-        return 1
-
-    def help_quit(self):
-        print "quit -- Exit the zdctl shell."
-        print "        If the daemon process is not running,"
-        print "        stop the daemon manager."
-
-
-class TailHelper:
-
-    MAX_BUFFSIZE = 1024
-
-    def __init__(self, fname):
-        self.f = open(fname, 'r')
-
-    def tailf(self):
-        sz, lines = self.tail(10)
-        for line in lines:
-            sys.stdout.write(line)
-            sys.stdout.flush()
-        while 1:
-            newsz = self.fsize()
-            bytes_added = newsz - sz
-            if bytes_added < 0:
-                sz = 0
-                print "==> File truncated <=="
-                bytes_added = newsz
-            if bytes_added > 0:
-                self.f.seek(-bytes_added, 2)
-                bytes = self.f.read(bytes_added)
-                sys.stdout.write(bytes)
-                sys.stdout.flush()
-                sz = newsz
-            time.sleep(1)
-
-    def tail(self, max=10):
-        self.f.seek(0, 2)
-        pos = sz = self.f.tell()
-
-        lines = []
-        bytes = []
-        num_bytes = 0
-
-        while 1:
-            if pos == 0:
-                break
-            self.f.seek(pos)
-            byte = self.f.read(1)
-            if byte == '\n':
-                if len(lines) == max:
-                    break
-                bytes.reverse()
-                line = ''.join(bytes)
-                line and lines.append(line)
-                bytes = []
-            bytes.append(byte)
-            num_bytes = num_bytes + 1
-            if num_bytes > self.MAX_BUFFSIZE:
-                break
-            pos = pos - 1
-        lines.reverse()
-        return sz, lines
-
-    def fsize(self):
-        return os.fstat(self.f.fileno())[stat.ST_SIZE]
-
-def main(args=None, options=None, cmdclass=ZDCmd):
-    if options is None:
-        options = ZDCtlOptions()
-    options.realize(args)
-    c = cmdclass(options)
-    if options.args:
-        c.onecmd(" ".join(options.args))
-    if options.interactive:
-        try:
-            import readline
-        except ImportError:
-            pass
-        print "program:", " ".join(options.program)
-        c.do_status()
-        c.cmdloop()
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/zdaemon/zdoptions.py b/branches/bug1734/src/zdaemon/zdoptions.py
deleted file mode 100644
index 64b2ca4a..00000000
--- a/branches/bug1734/src/zdaemon/zdoptions.py
+++ /dev/null
@@ -1,411 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
-"""Option processing for zdaemon and related code."""
-
-import os
-import sys
-import getopt
-
-import ZConfig
-
-class ZDOptions:
-
-    doc = None
-    progname = None
-    configfile = None
-    schemadir = None
-    schemafile = "schema.xml"
-    schema = None
-    confighandlers = None
-    configroot = None
-
-    # Class variable to control automatic processing of an <eventlog>
-    # section.  This should be the (possibly dotted) name of something
-    # accessible from configroot, typically "eventlog".
-    logsectionname = None
-    config_logger = None # The configured event logger, if any
-
-    # Class variable deciding whether positional arguments are allowed.
-    # If you want positional arguments, set this to 1 in your subclass.
-    positional_args_allowed = 0
-
-    def __init__(self):
-        self.names_list = []
-        self.short_options = []
-        self.long_options = []
-        self.options_map = {}
-        self.default_map = {}
-        self.required_map = {}
-        self.environ_map = {}
-        self.zconfig_options = []
-        self.add(None, None, "h", "help", self.help)
-        self.add("configfile", None, "C:", "configure=")
-        self.add(None, None, "X:", handler=self.zconfig_options.append)
-
-    def help(self, dummy):
-        """Print a long help message (self.doc) to stdout and exit(0).
-
-        Occurrences of "%s" in self.doc are replaced by self.progname.
-        """
-        doc = self.doc
-        if doc.find("%s") > 0:
-            doc = doc.replace("%s", self.progname)
-        print doc,
-        sys.exit(0)
-
-    def usage(self, msg):
-        """Print a brief error message to stderr and exit(2)."""
-        sys.stderr.write("Error: %s\n" % str(msg))
-        sys.stderr.write("For help, use %s -h\n" % self.progname)
-        sys.exit(2)
-
-    def remove(self,
-               name=None,               # attribute name on self
-               confname=None,           # name in ZConfig (may be dotted)
-               short=None,              # short option name
-               long=None,               # long option name
-               ):
-        """Remove all traces of name, confname, short and/or long."""
-        if name:
-            for n, cn in self.names_list[:]:
-                if n == name:
-                    self.names_list.remove((n, cn))
-            if self.default_map.has_key(name):
-                del self.default_map[name]
-            if self.required_map.has_key(name):
-                del self.required_map[name]
-        if confname:
-            for n, cn in self.names_list[:]:
-                if cn == confname:
-                    self.names_list.remove((n, cn))
-        if short:
-            key = "-" + short[0]
-            if self.options_map.has_key(key):
-                del self.options_map[key]
-        if long:
-            key = "--" + long
-            if key[-1] == "=":
-                key = key[:-1]
-            if self.options_map.has_key(key):
-                del self.options_map[key]
-
-    def add(self,
-            name=None,                  # attribute name on self
-            confname=None,              # name in ZConfig (may be dotted)
-            short=None,                 # short option name
-            long=None,                  # long option name
-            handler=None,               # handler (defaults to string)
-            default=None,               # default value
-            required=None,              # message if not provided
-            flag=None,                  # if not None, flag value
-            env=None,                   # if not None, environment variable
-            ):
-        """Add information about a configuration option.
-
-        This can take several forms:
-
-        add(name, confname)
-            Configuration option 'confname' maps to attribute 'name'
-        add(name, None, short, long)
-            Command line option '-short' or '--long' maps to 'name'
-        add(None, None, short, long, handler)
-            Command line option calls handler
-        add(name, None, short, long, handler)
-            Assign handler return value to attribute 'name'
-
-        In addition, one of the following keyword arguments may be given:
-
-        default=...  -- if not None, the default value
-        required=... -- if nonempty, an error message if no value provided
-        flag=...     -- if not None, flag value for command line option
-        env=...      -- if not None, name of environment variable that
-                        overrides the configuration file or default
-        """
-
-        if flag is not None:
-            if handler is not None:
-                raise ValueError, "use at most one of flag= and handler="
-            if not long and not short:
-                raise ValueError, "flag= requires a command line flag"
-            if short and short.endswith(":"):
-                raise ValueError, "flag= requires a command line flag"
-            if long and long.endswith("="):
-                raise ValueError, "flag= requires a command line flag"
-            handler = lambda arg, flag=flag: flag
-
-        if short and long:
-            if short.endswith(":") != long.endswith("="):
-                raise ValueError, "inconsistent short/long options: %r %r" % (
-                    short, long)
-
-        if short:
-            if short[0] == "-":
-                raise ValueError, "short option should not start with '-'"
-            key, rest = short[:1], short[1:]
-            if rest not in ("", ":"):
-                raise ValueError, "short option should be 'x' or 'x:'"
-            key = "-" + key
-            if self.options_map.has_key(key):
-                raise ValueError, "duplicate short option key '%s'" % key
-            self.options_map[key] = (name, handler)
-            self.short_options.append(short)
-
-        if long:
-            if long[0] == "-":
-                raise ValueError, "long option should not start with '-'"
-            key = long
-            if key[-1] == "=":
-                key = key[:-1]
-            key = "--" + key
-            if self.options_map.has_key(key):
-                raise ValueError, "duplicate long option key '%s'" % key
-            self.options_map[key] = (name, handler)
-            self.long_options.append(long)
-
-        if env:
-            self.environ_map[env] = (name, handler)
-
-        if name:
-            if not hasattr(self, name):
-                setattr(self, name, None)
-            self.names_list.append((name, confname))
-            if default is not None:
-                self.default_map[name] = default
-            if required:
-                self.required_map[name] = required
-
-    def realize(self, args=None, progname=None, doc=None,
-                raise_getopt_errs=True):
-        """Realize a configuration.
-
-        Optional arguments:
-
-        args     -- the command line arguments, less the program name
-                    (default is sys.argv[1:])
-
-        progname -- the program name (default is sys.argv[0])
-
-        doc      -- usage message (default is __main__.__doc__)
-        """
-
-         # Provide dynamic default method arguments
-        if args is None:
-            try:
-                args = sys.argv[1:]
-            except AttributeError:
-                args = ()
-
-        if progname is None:
-            try:
-                progname = sys.argv[0]
-            except (AttributeError, IndexError):
-                progname = 'zope'
-
-        if doc is None:
-            import __main__
-            doc = __main__.__doc__
-        self.progname = progname
-        self.doc = doc
-
-        self.options = []
-        self.args = []
-
-        # Call getopt
-        try:
-            self.options, self.args = getopt.getopt(
-                args, "".join(self.short_options), self.long_options)
-        except getopt.error, msg:
-            if raise_getopt_errs:
-                self.usage(msg)
-
-        # Check for positional args
-        if self.args and not self.positional_args_allowed:
-            self.usage("positional arguments are not supported")
-
-        # Process options returned by getopt
-        for opt, arg in self.options:
-            name, handler = self.options_map[opt]
-            if handler is not None:
-                try:
-                    arg = handler(arg)
-                except ValueError, msg:
-                    self.usage("invalid value for %s %r: %s" % (opt, arg, msg))
-            if name and arg is not None:
-                if getattr(self, name) is not None:
-                    self.usage("conflicting command line option %r" % opt)
-                setattr(self, name, arg)
-
-        # Process environment variables
-        for envvar in self.environ_map.keys():
-            name, handler = self.environ_map[envvar]
-            if name and getattr(self, name, None) is not None:
-                continue
-            if os.environ.has_key(envvar):
-                value = os.environ[envvar]
-                if handler is not None:
-                    try:
-                        value = handler(value)
-                    except ValueError, msg:
-                        self.usage("invalid environment value for %s %r: %s"
-                                   % (envvar, value, msg))
-                if name and value is not None:
-                    setattr(self, name, value)
-
-        if self.configfile is None:
-            self.configfile = self.default_configfile()
-        if self.zconfig_options and self.configfile is None:
-            self.usage("configuration overrides (-X) cannot be used"
-                       " without a configuration file")
-        if self.configfile is not None:
-            # Process config file
-            self.load_schema()
-            try:
-                self.load_configfile()
-            except ZConfig.ConfigurationError, msg:
-                self.usage(str(msg))
-
-        # Copy config options to attributes of self.  This only fills
-        # in options that aren't already set from the command line.
-        for name, confname in self.names_list:
-            if confname and getattr(self, name) is None:
-                parts = confname.split(".")
-                obj = self.configroot
-                for part in parts:
-                    if obj is None:
-                        break
-                    # Here AttributeError is not a user error!
-                    obj = getattr(obj, part)
-                setattr(self, name, obj)
-
-        # Process defaults
-        for name, value in self.default_map.items():
-            if getattr(self, name) is None:
-                setattr(self, name, value)
-
-        # Process required options
-        for name, message in self.required_map.items():
-            if getattr(self, name) is None:
-                self.usage(message)
-
-        if self.logsectionname:
-            self.load_logconf(self.logsectionname)
-
-    def default_configfile(self):
-        """Return the name of the default config file, or None."""
-        # This allows a default configuration file to be used without
-        # affecting the -C command line option; setting self.configfile
-        # before calling realize() makes the -C option unusable since
-        # then realize() thinks it has already seen the option.  If no
-        # -C is used, realize() will call this method to try to locate
-        # a configuration file.
-        return None
-
-    def load_schema(self):
-        if self.schema is None:
-            # Load schema
-            if self.schemadir is None:
-                self.schemadir = os.path.dirname(__file__)
-            self.schemafile = os.path.join(self.schemadir, self.schemafile)
-            self.schema = ZConfig.loadSchema(self.schemafile)
-
-    def load_configfile(self):
-        self.configroot, self.confighandlers = \
-            ZConfig.loadConfig(self.schema, self.configfile,
-                               self.zconfig_options)
-
-    def load_logconf(self, sectname="eventlog"):
-        parts = sectname.split(".")
-        obj = self.configroot
-        for p in parts:
-            if obj == None:
-                break
-            obj = getattr(obj, p)
-        self.config_logger = obj
-        if obj is not None:
-            obj.startup()
-
-
-class RunnerOptions(ZDOptions):
-
-    uid = gid = None
-
-    def __init__(self):
-        ZDOptions.__init__(self)
-        self.add("backofflimit", "runner.backoff_limit",
-                 "b:", "backoff-limit=", int, default=10)
-        self.add("daemon", "runner.daemon", "d", "daemon", flag=1, default=0)
-        self.add("forever", "runner.forever", "f", "forever",
-                 flag=1, default=0)
-        self.add("sockname", "runner.socket_name", "s:", "socket-name=",
-                 ZConfig.datatypes.existing_dirpath, default="zdsock")
-        self.add("exitcodes", "runner.exit_codes", "x:", "exit-codes=",
-                 list_of_ints, default=[0, 2])
-        self.add("user", "runner.user", "u:", "user=")
-        self.add("umask", "runner.umask", "m:", "umask=", octal_type,
-                 default=022)
-        self.add("directory", "runner.directory", "z:", "directory=",
-                 ZConfig.datatypes.existing_directory)
-        self.add("hang_around", "runner.hang_around", default=0)
-
-    def realize(self, *args, **kwds):
-        ZDOptions.realize(self, *args, **kwds)
-
-        # Additional checking of user option; set uid and gid
-        if self.user is not None:
-            import pwd
-            try:
-                uid = int(self.user)
-            except ValueError:
-                try:
-                    pwrec = pwd.getpwnam(self.user)
-                except KeyError:
-                    self.usage("username %r not found" % self.user)
-                uid = pwrec[2]
-            else:
-                try:
-                    pwrec = pwd.getpwuid(uid)
-                except KeyError:
-                    self.usage("uid %r not found" % self.user)
-            gid = pwrec[3]
-            self.uid = uid
-            self.gid = gid
-
-
-# ZConfig datatype
-
-def list_of_ints(arg):
-    if not arg:
-        return []
-    else:
-        return map(int, arg.split(","))
-
-def octal_type(arg):
-    return int(arg, 8)
-
-
-def _test():
-    # Stupid test program
-    z = ZDOptions()
-    z.add("program", "zdctl.program", "p:", "program=")
-    print z.names_list
-    z.realize()
-    names = z.names_list[:]
-    names.sort()
-    for name, confname in names:
-        print "%-20s = %.56r" % (name, getattr(z, name))
-
-if __name__ == "__main__":
-    __file__ = sys.argv[0]
-    _test()
diff --git a/branches/bug1734/src/zdaemon/zdrun.py b/branches/bug1734/src/zdaemon/zdrun.py
deleted file mode 100755
index 04a270b3..00000000
--- a/branches/bug1734/src/zdaemon/zdrun.py
+++ /dev/null
@@ -1,719 +0,0 @@
-#!python
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""zrdun -- run an application as a daemon.
-
-Usage: python zrdun.py [zrdun-options] program [program-arguments]
-
-Options:
--C/--configure URL -- configuration file or URL
--S/--schema XML Schema -- XML schema for configuration file
--b/--backoff-limit SECONDS -- set backoff limit to SECONDS (default 10)
--d/--daemon -- run as a proper daemon; fork a subprocess, setsid(), etc.
--f/--forever -- run forever (by default, exit when backoff limit is exceeded)
--h/--help -- print this usage message and exit
--s/--socket-name SOCKET -- Unix socket name for client (default "zdsock")
--u/--user USER -- run as this user (or numeric uid)
--m/--umask UMASK -- use this umask for daemon subprocess (default is 022)
--t/--transcript FILE -- transript of output from daemon-mode program
--x/--exit-codes LIST -- list of fatal exit codes (default "0,2")
--z/--directory DIRECTORY -- directory to chdir to when using -d (default off)
-program [program-arguments] -- an arbitrary application to run
-
-This daemon manager has two purposes: it restarts the application when
-it dies, and (when requested to do so with the -d option) it runs the
-application in the background, detached from the foreground tty
-session that started it (if any).
-
-Exit codes: if at any point the application exits with an exit status
-listed by the -x option, it is not restarted.  Any other form of
-termination (either being killed by a signal or exiting with an exit
-status not listed in the -x option) causes it to be restarted.
-
-Backoff limit: when the application exits (nearly) immediately after a
-restart, the daemon manager starts slowing down by delaying between
-restarts.  The delay starts at 1 second and is increased by one on
-each restart up to the backoff limit given by the -b option; it is
-reset when the application runs for more than the backoff limit
-seconds.  By default, when the delay reaches the backoff limit, the
-daemon manager exits (under the assumption that the application has a
-persistent fault).  The -f (forever) option prevents this exit; use it
-when you expect that a temporary external problem (such as a network
-outage or an overfull disk) may prevent the application from starting
-but you want the daemon manager to keep trying.
-"""
-
-"""
-XXX TO DO
-
-- Finish OO design -- use multiple classes rather than folding
-  everything into one class.
-
-- Add unit tests.
-
-- Add doc strings.
-
-"""
-
-import os
-import sys
-import time
-import errno
-import logging
-import socket
-import select
-import signal
-from stat import ST_MODE
-
-if __name__ == "__main__":
-    # Add the parent of the script directory to the module search path
-    # (but only when the script is run from inside the zdaemon package)
-    from os.path import dirname, basename, abspath, normpath
-    scriptdir = dirname(normpath(abspath(sys.argv[0])))
-    if basename(scriptdir).lower() == "zdaemon":
-        sys.path.append(dirname(scriptdir))
-
-from zdaemon.zdoptions import RunnerOptions
-
-
-class ZDRunOptions(RunnerOptions):
-
-    positional_args_allowed = 1
-    logsectionname = "runner.eventlog"
-    program = None
-
-    def __init__(self):
-        RunnerOptions.__init__(self)
-        self.add("schemafile", short="S:", long="schema=",
-                 default="schema.xml",
-                 handler=self.set_schemafile)
-        self.add("transcript", "runner.transcript", "t:", "transcript=",
-                 default="/dev/null")
-
-    def set_schemafile(self, file):
-        self.schemafile = file
-
-    def realize(self, *args, **kwds):
-        RunnerOptions.realize(self, *args, **kwds)
-        if self.args:
-            self.program = self.args
-        if not self.program:
-            self.usage("no program specified (use -C or positional args)")
-        if self.sockname:
-            # Convert socket name to absolute path
-            self.sockname = os.path.abspath(self.sockname)
-        if self.config_logger is None:
-            # This doesn't perform any configuration of the logging
-            # package, but that's reasonable in this case.
-            self.logger = logging.getLogger()
-        else:
-            self.logger = self.config_logger()
-
-    def load_logconf(self, sectname):
-        """Load alternate eventlog if the specified section isn't present."""
-        RunnerOptions.load_logconf(self, sectname)
-        if self.config_logger is None and sectname != "eventlog":
-            RunnerOptions.load_logconf(self, "eventlog")
-
-
-class Subprocess:
-
-    """A class to manage a subprocess."""
-
-    # Initial state; overridden by instance variables
-    pid = 0 # Subprocess pid; 0 when not running
-    lasttime = 0 # Last time the subprocess was started; 0 if never
-
-    def __init__(self, options, args=None):
-        """Constructor.
-
-        Arguments are a ZDRunOptions instance and a list of program
-        arguments; the latter's first item must be the program name.
-        """
-        if args is None:
-            args = options.args
-        if not args:
-            options.usage("missing 'program' argument")
-        self.options = options
-        self.args = args
-        self._set_filename(args[0])
-
-    def _set_filename(self, program):
-        """Internal: turn a program name into a file name, using $PATH."""
-        if "/" in program:
-            filename = program
-            try:
-                st = os.stat(filename)
-            except os.error:
-                self.options.usage("can't stat program %r" % program)
-        else:
-            path = get_path()
-            for dir in path:
-                filename = os.path.join(dir, program)
-                try:
-                    st = os.stat(filename)
-                except os.error:
-                    continue
-                mode = st[ST_MODE]
-                if mode & 0111:
-                    break
-            else:
-                self.options.usage("can't find program %r on PATH %s" %
-                                   (program, path))
-        if not os.access(filename, os.X_OK):
-            self.options.usage("no permission to run program %r" % filename)
-        self.filename = filename
-
-    def spawn(self):
-        """Start the subprocess.  It must not be running already.
-
-        Return the process id.  If the fork() call fails, return 0.
-        """
-        assert not self.pid
-        self.lasttime = time.time()
-        try:
-            pid = os.fork()
-        except os.error:
-            return 0
-        if pid != 0:
-            # Parent
-            self.pid = pid
-            self.options.logger.info("spawned process pid=%d" % pid)
-            return pid
-        else:
-            # Child
-            try:
-                # Close file descriptors except std{in,out,err}.
-                # XXX We don't know how many to close; hope 100 is plenty.
-                for i in range(3, 100):
-                    try:
-                        os.close(i)
-                    except os.error:
-                        pass
-                try:
-                    os.execv(self.filename, self.args)
-                except os.error, err:
-                    sys.stderr.write("can't exec %r: %s\n" %
-                                     (self.filename, err))
-            finally:
-                os._exit(127)
-            # Does not return
-
-    def kill(self, sig):
-        """Send a signal to the subprocess.  This may or may not kill it.
-
-        Return None if the signal was sent, or an error message string
-        if an error occurred or if the subprocess is not running.
-        """
-        if not self.pid:
-            return "no subprocess running"
-        try:
-            os.kill(self.pid, sig)
-        except os.error, msg:
-            return str(msg)
-        return None
-
-    def setstatus(self, sts):
-        """Set process status returned by wait() or waitpid().
-
-        This simply notes the fact that the subprocess is no longer
-        running by setting self.pid to 0.
-        """
-        self.pid = 0
-
-
-class Daemonizer:
-
-    def main(self, args=None):
-        self.options = ZDRunOptions()
-        self.options.realize(args)
-        self.logger = self.options.logger
-        self.set_uid()
-        self.run()
-
-    def set_uid(self):
-        if self.options.uid is None:
-            return
-        uid = os.geteuid()
-        if uid != 0 and uid != self.options.uid:
-            self.options.usage("only root can use -u USER to change users")
-        os.setgid(self.options.gid)
-        os.setuid(self.options.uid)
-
-    def run(self):
-        self.proc = Subprocess(self.options)
-        self.opensocket()
-        try:
-            self.setsignals()
-            if self.options.daemon:
-                self.daemonize()
-            self.runforever()
-        finally:
-            try:
-                os.unlink(self.options.sockname)
-            except os.error:
-                pass
-
-    mastersocket = None
-    commandsocket = None
-
-    def opensocket(self):
-        sockname = self.options.sockname
-        tempname = "%s.%d" % (sockname, os.getpid())
-        self.unlink_quietly(tempname)
-        while 1:
-            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-            try:
-                sock.bind(tempname)
-                os.chmod(tempname, 0700)
-                try:
-                    os.link(tempname, sockname)
-                    break
-                except os.error:
-                    # Lock contention, or stale socket.
-                    self.checkopen()
-                    # Stale socket -- delete, sleep, and try again.
-                    msg = "Unlinking stale socket %s; sleep 1" % sockname
-                    sys.stderr.write(msg + "\n")
-                    self.logger.warn(msg)
-                    self.unlink_quietly(sockname)
-                    sock.close()
-                    time.sleep(1)
-                    continue
-            finally:
-                self.unlink_quietly(tempname)
-        sock.listen(1)
-        sock.setblocking(0)
-        self.mastersocket = sock
-
-    def unlink_quietly(self, filename):
-        try:
-            os.unlink(filename)
-        except os.error:
-            pass
-
-    def checkopen(self):
-        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        try:
-            s.connect(self.options.sockname)
-            s.send("status\n")
-            data = s.recv(1000)
-            s.close()
-        except socket.error:
-            pass
-        else:
-            while data.endswith("\n"):
-                data = data[:-1]
-            msg = ("Another zrdun is already up using socket %r:\n%s" %
-                   (self.options.sockname, data))
-            sys.stderr.write(msg + "\n")
-            self.logger.critical(msg)
-            sys.exit(1)
-
-    def setsignals(self):
-        signal.signal(signal.SIGTERM, self.sigexit)
-        signal.signal(signal.SIGHUP, self.sigexit)
-        signal.signal(signal.SIGINT, self.sigexit)
-        signal.signal(signal.SIGCHLD, self.sigchild)
-
-    def sigexit(self, sig, frame):
-        self.logger.critical("daemon manager killed by %s" % signame(sig))
-        sys.exit(1)
-
-    waitstatus = None
-
-    def sigchild(self, sig, frame):
-        try:
-            pid, sts = os.waitpid(-1, os.WNOHANG)
-        except os.error:
-            return
-        if pid:
-            self.waitstatus = pid, sts
-
-    def daemonize(self):
-
-        # To daemonize, we need to become the leader of our own session
-        # (process) group.  If we do not, signals sent to our
-        # parent process will also be sent to us.   This might be bad because
-        # signals such as SIGINT can be sent to our parent process during
-        # normal (uninteresting) operations such as when we press Ctrl-C in the
-        # parent terminal window to escape from a logtail command.
-        # To disassociate ourselves from our parent's session group we use
-        # os.setsid.  It means "set session id", which has the effect of
-        # disassociating a process from is current session and process group
-        # and setting itself up as a new session leader.
-        #
-        # Unfortunately we cannot call setsid if we're already a session group
-        # leader, so we use "fork" to make a copy of ourselves that is
-        # guaranteed to not be a session group leader.
-        #
-        # We also change directories, set stderr and stdout to null, and
-        # change our umask.
-        #
-        # This explanation was (gratefully) garnered from
-        # http://www.hawklord.uklinux.net/system/daemons/d3.htm
-
-        pid = os.fork()
-        if pid != 0:
-            # Parent
-            self.logger.debug("daemon manager forked; parent exiting")
-            os._exit(0)
-        # Child
-        self.logger.info("daemonizing the process")
-        if self.options.directory:
-            try:
-                os.chdir(self.options.directory)
-            except os.error, err:
-                self.logger.warn("can't chdir into %r: %s"
-                                 % (self.options.directory, err))
-            else:
-                self.logger.info("set current directory: %r"
-                                 % self.options.directory)
-        os.close(0)
-        sys.stdin = sys.__stdin__ = open("/dev/null")
-        os.close(1)
-        sys.stdout = sys.__stdout__ = open(self.options.transcript, "a", 0)
-        os.close(2)
-        sys.stderr = sys.__stderr__ = open(self.options.transcript, "a", 0)
-        os.setsid()
-        os.umask(self.options.umask)
-        # XXX Stevens, in his Advanced Unix book, section 13.3 (page
-        # 417) recommends calling umask(0) and closing unused
-        # file descriptors.  In his Network Programming book, he
-        # additionally recommends ignoring SIGHUP and forking again
-        # after the setsid() call, for obscure SVR4 reasons.
-
-    mood = 1 # 1: up, 0: down, -1: suicidal
-    delay = 0 # If nonzero, delay starting or killing until this time
-    killing = 0 # If true, send SIGKILL when delay expires
-    proc = None # Subprocess instance
-
-    def runforever(self):
-        self.logger.info("daemon manager started")
-        min_mood = not self.options.hang_around
-        while self.mood >= min_mood or self.proc.pid:
-            if self.mood > 0 and not self.proc.pid and not self.delay:
-                pid = self.proc.spawn()
-                if not pid:
-                    # Can't fork.  Try again later...
-                    self.delay = time.time() + self.backofflimit
-            if self.waitstatus:
-                self.reportstatus()
-            r, w, x = [self.mastersocket], [], []
-            if self.commandsocket:
-                r.append(self.commandsocket)
-            timeout = self.options.backofflimit
-            if self.delay:
-                timeout = max(0, min(timeout, self.delay - time.time()))
-                if timeout <= 0:
-                    self.delay = 0
-                    if self.killing and self.proc.pid:
-                        self.proc.kill(signal.SIGKILL)
-                        self.delay = time.time() + self.options.backofflimit
-            try:
-                r, w, x = select.select(r, w, x, timeout)
-            except select.error, err:
-                if err[0] != errno.EINTR:
-                    raise
-                r = w = x = []
-            if self.waitstatus:
-                self.reportstatus()
-            if self.commandsocket and self.commandsocket in r:
-                try:
-                    self.dorecv()
-                except socket.error, msg:
-                    self.logger.exception("socket.error in dorecv(): %s"
-                                          % str(msg))
-                    self.commandsocket = None
-            if self.mastersocket in r:
-                try:
-                    self.doaccept()
-                except socket.error, msg:
-                    self.logger.exception("socket.error in doaccept(): %s"
-                                          % str(msg))
-                    self.commandsocket = None
-        self.logger.info("Exiting")
-        sys.exit(0)
-
-    def reportstatus(self):
-        pid, sts = self.waitstatus
-        self.waitstatus = None
-        es, msg = decode_wait_status(sts)
-        msg = "pid %d: " % pid + msg
-        if pid != self.proc.pid:
-            msg = "unknown " + msg
-            self.logger.warn(msg)
-        else:
-            killing = self.killing
-            if killing:
-                self.killing = 0
-                self.delay = 0
-            else:
-                self.governor()
-            self.proc.setstatus(sts)
-            if es in self.options.exitcodes and not killing:
-                msg = msg + "; exiting now"
-                self.logger.info(msg)
-                sys.exit(es)
-            self.logger.info(msg)
-
-    backoff = 0
-
-    def governor(self):
-        # Back off if respawning too frequently
-        now = time.time()
-        if not self.proc.lasttime:
-            pass
-        elif now - self.proc.lasttime < self.options.backofflimit:
-            # Exited rather quickly; slow down the restarts
-            self.backoff += 1
-            if self.backoff >= self.options.backofflimit:
-                if self.options.forever:
-                    self.backoff = self.options.backofflimit
-                else:
-                    self.logger.critical("restarting too frequently; quit")
-                    sys.exit(1)
-            self.logger.info("sleep %s to avoid rapid restarts" % self.backoff)
-            self.delay = now + self.backoff
-        else:
-            # Reset the backoff timer
-            self.backoff = 0
-            self.delay = 0
-
-    def doaccept(self):
-        if self.commandsocket:
-            # Give up on previous command socket!
-            self.sendreply("Command superseded by new command")
-            self.commandsocket.close()
-            self.commandsocket = None
-        self.commandsocket, addr = self.mastersocket.accept()
-        self.commandbuffer = ""
-
-    def dorecv(self):
-        data = self.commandsocket.recv(1000)
-        if not data:
-            self.sendreply("Command not terminated by newline")
-            self.commandsocket.close()
-            self.commandsocket = None
-        self.commandbuffer += data
-        if "\n" in self.commandbuffer:
-            self.docommand()
-            self.commandsocket.close()
-            self.commandsocket = None
-        elif len(self.commandbuffer) > 10000:
-            self.sendreply("Command exceeds 10 KB")
-            self.commandsocket.close()
-            self.commandsocket = None
-
-    def docommand(self):
-        lines = self.commandbuffer.split("\n")
-        args = lines[0].split()
-        if not args:
-            self.sendreply("Empty command")
-            return
-        command = args[0]
-        methodname = "cmd_" + command
-        method = getattr(self, methodname, None)
-        if method:
-            method(args)
-        else:
-            self.sendreply("Unknown command %r; 'help' for a list" % args[0])
-
-    def cmd_start(self, args):
-        self.mood = 1 # Up
-        self.backoff = 0
-        self.delay = 0
-        self.killing = 0
-        if not self.proc.pid:
-            self.proc.spawn()
-            self.sendreply("Application started")
-        else:
-            self.sendreply("Application already started")
-
-    def cmd_stop(self, args):
-        self.mood = 0 # Down
-        self.backoff = 0
-        self.delay = 0
-        self.killing = 0
-        if self.proc.pid:
-            self.proc.kill(signal.SIGTERM)
-            self.sendreply("Sent SIGTERM")
-            self.killing = 1
-            self.delay = time.time() + self.options.backofflimit
-        else:
-            self.sendreply("Application already stopped")
-
-    def cmd_restart(self, args):
-        self.mood = 1 # Up
-        self.backoff = 0
-        self.delay = 0
-        self.killing = 0
-        if self.proc.pid:
-            self.proc.kill(signal.SIGTERM)
-            self.sendreply("Sent SIGTERM; will restart later")
-            self.killing = 1
-            self.delay = time.time() + self.options.backofflimit
-        else:
-            self.proc.spawn()
-            self.sendreply("Application started")
-
-    def cmd_exit(self, args):
-        self.mood = -1 # Suicidal
-        self.backoff = 0
-        self.delay = 0
-        self.killing = 0
-        if self.proc.pid:
-            self.proc.kill(signal.SIGTERM)
-            self.sendreply("Sent SIGTERM; will exit later")
-            self.killing = 1
-            self.delay = time.time() + self.options.backofflimit
-        else:
-            self.sendreply("Exiting now")
-            self.logger.info("Exiting")
-            sys.exit(0)
-
-    def cmd_kill(self, args):
-        if args[1:]:
-            try:
-                sig = int(args[1])
-            except:
-                self.sendreply("Bad signal %r" % args[1])
-                return
-        else:
-            sig = signal.SIGTERM
-        if not self.proc.pid:
-            self.sendreply("Application not running")
-        else:
-            msg = self.proc.kill(sig)
-            if msg:
-                self.sendreply("Kill %d failed: %s" % (sig, msg))
-            else:
-                self.sendreply("Signal %d sent" % sig)
-
-    def cmd_status(self, args):
-        if not self.proc.pid:
-            status = "stopped"
-        else:
-            status = "running"
-        self.sendreply("status=%s\n" % status +
-                       "now=%r\n" % time.time() +
-                       "mood=%d\n" % self.mood +
-                       "delay=%r\n" % self.delay +
-                       "backoff=%r\n" % self.backoff +
-                       "lasttime=%r\n" % self.proc.lasttime +
-                       "application=%r\n" % self.proc.pid +
-                       "manager=%r\n" % os.getpid() +
-                       "backofflimit=%r\n" % self.options.backofflimit +
-                       "filename=%r\n" % self.proc.filename +
-                       "args=%r\n" % self.proc.args)
-
-    def cmd_help(self, args):
-        self.sendreply(
-            "Available commands:\n"
-            "  help -- return command help\n"
-            "  status -- report application status (default command)\n"
-            "  kill [signal] -- send a signal to the application\n"
-            "                   (default signal is SIGTERM)\n"
-            "  start -- start the application if not already running\n"
-            "  stop -- stop the application if running\n"
-            "          (the daemon manager keeps running)\n"
-            "  restart -- stop followed by start\n"
-            "  exit -- stop the application and exit\n"
-            )
-
-    def sendreply(self, msg):
-        try:
-            if not msg.endswith("\n"):
-                msg = msg + "\n"
-            if hasattr(self.commandsocket, "sendall"):
-                self.commandsocket.sendall(msg)
-            else:
-                # This is quadratic, but msg is rarely more than 100 bytes :-)
-                while msg:
-                    sent = self.commandsocket.send(msg)
-                    msg = msg[sent:]
-        except socket.error, msg:
-            self.logger.warn("Error sending reply: %s" % str(msg))
-
-
-# Helpers for dealing with signals and exit status
-
-def decode_wait_status(sts):
-    """Decode the status returned by wait() or waitpid().
-
-    Return a tuple (exitstatus, message) where exitstatus is the exit
-    status, or -1 if the process was killed by a signal; and message
-    is a message telling what happened.  It is the caller's
-    responsibility to display the message.
-    """
-    if os.WIFEXITED(sts):
-        es = os.WEXITSTATUS(sts) & 0xffff
-        msg = "exit status %s" % es
-        return es, msg
-    elif os.WIFSIGNALED(sts):
-        sig = os.WTERMSIG(sts)
-        msg = "terminated by %s" % signame(sig)
-        if hasattr(os, "WCOREDUMP"):
-            iscore = os.WCOREDUMP(sts)
-        else:
-            iscore = sts & 0x80
-        if iscore:
-            msg += " (core dumped)"
-        return -1, msg
-    else:
-        msg = "unknown termination cause 0x%04x" % sts
-        return -1, msg
-
-_signames = None
-
-def signame(sig):
-    """Return a symbolic name for a signal.
-
-    Return "signal NNN" if there is no corresponding SIG name in the
-    signal module.
-    """
-
-    if _signames is None:
-        _init_signames()
-    return _signames.get(sig) or "signal %d" % sig
-
-def _init_signames():
-    global _signames
-    d = {}
-    for k, v in signal.__dict__.items():
-        k_startswith = getattr(k, "startswith", None)
-        if k_startswith is None:
-            continue
-        if k_startswith("SIG") and not k_startswith("SIG_"):
-            d[v] = k
-    _signames = d
-
-def get_path():
-    """Return a list corresponding to $PATH, or a default."""
-    path = ["/bin", "/usr/bin", "/usr/local/bin"]
-    if os.environ.has_key("PATH"):
-        p = os.environ["PATH"]
-        if p:
-            path = p.split(os.pathsep)
-    return path
-
-# Main program
-def main(args=None):
-    assert os.name == "posix", "This code makes many Unix-specific assumptions"
-
-    d = Daemonizer()
-    d.main(args)
-
-if __name__ == "__main__":
-    main()
diff --git a/branches/bug1734/src/zope/__init__.py b/branches/bug1734/src/zope/__init__.py
deleted file mode 100644
index 40829591..00000000
--- a/branches/bug1734/src/zope/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-#
-# This file is necessary to make this directory a package.
diff --git a/branches/bug1734/src/zope/interface/DEPENDENCIES.cfg b/branches/bug1734/src/zope/interface/DEPENDENCIES.cfg
deleted file mode 100644
index ea3a37f9..00000000
--- a/branches/bug1734/src/zope/interface/DEPENDENCIES.cfg
+++ /dev/null
@@ -1 +0,0 @@
-zope.testing
diff --git a/branches/bug1734/src/zope/interface/PUBLICATION.cfg b/branches/bug1734/src/zope/interface/PUBLICATION.cfg
deleted file mode 100644
index d5ab9500..00000000
--- a/branches/bug1734/src/zope/interface/PUBLICATION.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-Metadata-Version: 1.0
-Name: zope.interface
-Summary: Zope 3 Interface Infrastructure
-Author: Zope Corporation and Contributors
-Author-email: zope3-dev@zope.org
-License: ZPL 2.1
-Description:
-        The implementation of interface definitions for Zope 3.
diff --git a/branches/bug1734/src/zope/interface/README.txt b/branches/bug1734/src/zope/interface/README.txt
deleted file mode 100644
index 6f9c8be7..00000000
--- a/branches/bug1734/src/zope/interface/README.txt
+++ /dev/null
@@ -1,697 +0,0 @@
-==========
-Interfaces
-==========
-
-.. contents::
-
-Interfaces are objects that specify (document) the external behavior
-of objects that "provide" them.  An interface specifies behavior
-through:
-
-- Informal documentation in a doc string
-
-- Attribute definitions
-
-- Invariants, which are conditions that must hold for objects that
-  provide the interface
-
-Attribute definitions specify specific attributes. They define the
-attribute name and provide documentation and constraints of attribute
-values.  Attribute definitions can take a number of forms, as we'll
-see below.
-
-Defining interfaces
-===================
-
-Interfaces are defined using Python class statements:
-
-  >>> import zope.interface
-  >>> class IFoo(zope.interface.Interface):
-  ...    """Foo blah blah"""
-  ...
-  ...    x = zope.interface.Attribute("""X blah blah""")
-  ...
-  ...    def bar(q, r=None):
-  ...        """bar blah blah"""
-
-In the example above, we've created an interface, `IFoo`.  We
-subclassed `zope.interface.Interface`, which is an ancestor interface for
-all interfaces, much as `object` is an ancestor of all new-style
-classes [#create]_.   The interface is not a class, it's an Interface,
-an instance of `InterfaceClass`::
-
-  >>> type(IFoo)
-  <class 'zope.interface.interface.InterfaceClass'>
-
-We can ask for the interface's documentation::
-
-  >>> IFoo.__doc__
-  'Foo blah blah'
-
-and its name::
-
-  >>> IFoo.__name__
-  'IFoo'
-
-and even its module::
-
-  >>> IFoo.__module__
-  '__main__'
-
-The interface defined two attributes:
-
-`x`
-  This is the simplest form of attribute definition.  It has a name
-  and a doc string.  It doesn't formally specify anything else.
-
-`bar`
-  This is a method.  A method is defined via a function definition.  A
-  method is simply an attribute constrained to be a callable with a
-  particular signature, as provided by the function definition.
-
-  Note that `bar` doesn't take a `self` argument.  Interfaces document
-  how an object is *used*.  When calling instance methods, you don't
-  pass a `self` argument, so a `self` argument isn't included in the
-  interface signature.  The `self` argument in instance methods is
-  really an implementation detail of Python instances. Other objects,
-  besides instances can provide interfaces and their methods might not
-  be instance methods. For example, modules can provide interfaces and
-  their methods are usually just functions.  Even instances can have
-  methods that are not instance methods.
-
-You can access the attributes defined by an interface using mapping
-syntax::
-
-  >>> x = IFoo['x']
-  >>> type(x)
-  <class 'zope.interface.interface.Attribute'>
-  >>> x.__name__
-  'x'
-  >>> x.__doc__
-  'X blah blah'
-
-  >>> IFoo.get('x').__name__
-  'x'
-
-  >>> IFoo.get('y')
-
-You can use `in` to determine if an interface defines a name::
-
-  >>> 'x' in IFoo
-  True
-
-You can iterate over interfaces to get the names they define::
-
-  >>> names = list(IFoo)
-  >>> names.sort()
-  >>> names
-  ['bar', 'x']
-
-Remember that interfaces aren't classes. You can't access attribute
-definitions as attributes of interfaces::
-
-  >>> IFoo.x
-  Traceback (most recent call last):
-    File "<stdin>", line 1, in ?
-  AttributeError: 'InterfaceClass' object has no attribute 'x'
-
-Methods provide access to the method signature::
-
-  >>> bar = IFoo['bar']
-  >>> bar.getSignatureString()
-  '(q, r=None)'
-
-TODO
-  Methods really should have a better API.  This is something that
-  needs to be improved.
-
-Declaring interfaces
-====================
-
-Having defined interfaces, we can *declare* that objects provide
-them.  Before we describe the details, lets define some some terms:
-
-*provide*
-   We say that objects *provide* interfaces.  If an object provides an
-   interface, then the interface specifies the behavior of the
-   object. In other words, interfaces specify the behavior of the
-   objects that provide them.
-
-*implement*
-   We normally say that classes *implement* interfaces.  If a class
-   implements an interface, then the instances of the class provide
-   the interface.  Objects provide interfaces that their classes
-   implement [#factory]_.  (Objects can provide interfaces directly,
-   in addition to what their classes implement.)
-
-   It is important to note that classes don't usually provide the
-   interfaces that the implement.
-
-   We can generalize this to factories.  For any callable object we
-   can declare that it produces objects that provides some interfaces
-   by saying that the factory implements the interfaces.
-
-Now that we've defined these terms, we can talk about the API for
-declaring interfaces.
-
-Declaring implemented interfaces
---------------------------------
-
-The most common way to declare interfaces is using the implements
-function in a class statement::
-
-  >>> class Foo:
-  ...     zope.interface.implements(IFoo)
-  ...
-  ...     def __init__(self, x=None):
-  ...         self.x = x
-  ...
-  ...     def bar(self, q, r=None):
-  ...         return q, r, self.x
-  ...
-  ...     def __repr__(self):
-  ...         return "Foo(%s)" % self.x
-
-
-In this example, we declared that `Foo` implements `IFoo`. This means
-that instances of `Foo` provide `IFoo`.  Having made this declaration,
-there are several ways we can introspect the declarations.  First, we
-can ask an interface whether it is implemented by a class::
-
-  >>> IFoo.implementedBy(Foo)
-  True
-
-And we can ask whether an interface is provided by an object::
-
-  >>> foo = Foo()
-  >>> IFoo.providedBy(foo)
-  True
-
-Of course, `Foo` doesn't provide `IFoo`, it implements it::
-
-  >>> IFoo.providedBy(Foo)
-  False
-
-We can also ask what interfaces are implemented by an object::
-
-  >>> list(zope.interface.implementedBy(Foo))
-  [<InterfaceClass __main__.IFoo>]
-
-It's an error to ask for interfaces implemented by a non-callable
-object::
-
-  >>> IFoo.implementedBy(foo)
-  Traceback (most recent call last):
-  ...
-  TypeError: ('ImplementedBy called for non-factory', Foo(None))
-
-  >>> list(zope.interface.implementedBy(foo))
-  Traceback (most recent call last):
-  ...
-  TypeError: ('ImplementedBy called for non-factory', Foo(None))
-
-Similarly, we can ask what interfaces are provided by an object::
-
-  >>> list(zope.interface.providedBy(foo))
-  [<InterfaceClass __main__.IFoo>]
-  >>> list(zope.interface.providedBy(Foo))
-  []
-
-We can declare interfaces implemented by other factories (besides
-classes).  We do this using a Python-2.4-style decorator named
-`implementer`.  In versions of Python before 2.4, this looks like:
-
-
-  >>> def yfoo(y):
-  ...     foo = Foo()
-  ...     foo.y = y
-  ...     return foo
-  >>> yfoo = zope.interface.implementer(IFoo)(yfoo)
-
-  >>> list(zope.interface.implementedBy(yfoo))
-  [<InterfaceClass __main__.IFoo>]
-
-Note that the implementer decorator may modify it's argument. Callers
-should not assume that a new object is created.
-
-Also note that, at least for now, implementer cannt be used with
-classes:
-
-  >>> zope.interface.implementer(IFoo)(Foo)
-  ... # doctest: +NORMALIZE_WHITESPACE
-  Traceback (most recent call last):
-    ...
-  TypeError: Can't use implementer with classes.  
-  Use one of the class-declaration functions instead.
-
-Declaring provided interfaces
------------------------------
-
-We can declare interfaces directly provided by objects.  Suppose that
-we want to document what the `__init__` method of the `Foo` class
-does.  It's not *really* part of `IFoo`.  You wouldn't normally call
-the `__init__` method on Foo instances.  Rather, the `__init__` method
-is part of the `Foo`'s `__call__` method::
-
-  >>> class IFooFactory(zope.interface.Interface):
-  ...     """Create foos"""
-  ...
-  ...     def __call__(x=None):
-  ...         """Create a foo
-  ...
-  ...         The argument provides the initial value for x ...
-  ...         """
-
-It's the class that provides this interface, so we declare the
-interface on the class::
-
-  >>> zope.interface.directlyProvides(Foo, IFooFactory)
-
-And then, we'll see that Foo provides some interfaces::
-
-  >>> list(zope.interface.providedBy(Foo))
-  [<InterfaceClass __main__.IFooFactory>]
-  >>> IFooFactory.providedBy(Foo)
-  True
-
-Declaring class interfaces is common enough that there's a special
-declaration function for it, `classProvides`, that allows the
-declaration from within a class statement::
-
-  >>> class Foo2:
-  ...     zope.interface.implements(IFoo)
-  ...     zope.interface.classProvides(IFooFactory)
-  ...
-  ...     def __init__(self, x=None):
-  ...         self.x = x
-  ...
-  ...     def bar(self, q, r=None):
-  ...         return q, r, self.x
-  ...
-  ...     def __repr__(self):
-  ...         return "Foo(%s)" % self.x
-
-  >>> list(zope.interface.providedBy(Foo2))
-  [<InterfaceClass __main__.IFooFactory>]
-  >>> IFooFactory.providedBy(Foo2)
-  True
-
-There's a similar function, `moduleProvides`, that supports interface
-declarations from within module definitions.  For example, see the use
-of `moduleProvides` call in `zope.interface.__init__`, which declares that
-the package `zope.interface` provides `IInterfaceDeclaration`.
-
-Sometimes, we want to declare interfaces on instances, even though
-those instances get interfaces from their classes.  Suppose we create
-a new interface, `ISpecial`::
-
-  >>> class ISpecial(zope.interface.Interface):
-  ...     reason = zope.interface.Attribute("Reason why we're special")
-  ...     def brag():
-  ...         "Brag about being special"
-
-We can make a an existing foo instance special by providing `reason`
-and `brag` attributes::
-
-  >>> foo.reason = 'I just am'
-  >>> def brag():
-  ...      return "I'm special!"
-  >>> foo.brag = brag
-  >>> foo.reason
-  'I just am'
-  >>> foo.brag()
-  "I'm special!"
-
-and by declaring the interface::
-
-  >>> zope.interface.directlyProvides(foo, ISpecial)
-
-then the new interface is included in the provided interfaces::
-
-  >>> ISpecial.providedBy(foo)
-  True
-  >>> list(zope.interface.providedBy(foo))
-  [<InterfaceClass __main__.ISpecial>, <InterfaceClass __main__.IFoo>]
-
-We can find out what interfaces are directly provided by an object::
-
-  >>> list(zope.interface.directlyProvidedBy(foo))
-  [<InterfaceClass __main__.ISpecial>]
-
-  >>> newfoo = Foo()
-  >>> list(zope.interface.directlyProvidedBy(newfoo))
-  []
-
-Inherited declarations
-----------------------
-
-Normally, declarations are inherited::
-
-  >>> class SpecialFoo(Foo):
-  ...     zope.interface.implements(ISpecial)
-  ...     reason = 'I just am'
-  ...     def brag(self):
-  ...         return "I'm special because %s" % self.reason
-
-  >>> list(zope.interface.implementedBy(SpecialFoo))
-  [<InterfaceClass __main__.ISpecial>, <InterfaceClass __main__.IFoo>]
-
-  >>> list(zope.interface.providedBy(SpecialFoo()))
-  [<InterfaceClass __main__.ISpecial>, <InterfaceClass __main__.IFoo>]
-
-Sometimes, you don't want to inherit declarations.  In that case, you
-can use `implementsOnly`, instead of `implements`::
-
-  >>> class Special(Foo):
-  ...     zope.interface.implementsOnly(ISpecial)
-  ...     reason = 'I just am'
-  ...     def brag(self):
-  ...         return "I'm special because %s" % self.reason
-
-  >>> list(zope.interface.implementedBy(Special))
-  [<InterfaceClass __main__.ISpecial>]
-
-  >>> list(zope.interface.providedBy(Special()))
-  [<InterfaceClass __main__.ISpecial>]
-
-External declarations
----------------------
-
-Normally, we make implementation declarations as part of a class
-definition. Sometimes, we may want to make declarations from outside
-the class definition. For example, we might want to declare interfaces
-for classes that we didn't write.  The function `classImplements` can
-be used for this purpose::
-
-  >>> class C:
-  ...     pass
-
-  >>> zope.interface.classImplements(C, IFoo)
-  >>> list(zope.interface.implementedBy(C))
-  [<InterfaceClass __main__.IFoo>]
-
-We can use `classImplementsOnly` to exclude inherited interfaces::
-
-  >>> class C(Foo):
-  ...     pass
-
-  >>> zope.interface.classImplementsOnly(C, ISpecial)
-  >>> list(zope.interface.implementedBy(C))
-  [<InterfaceClass __main__.ISpecial>]
-
-
-
-Declaration Objects
--------------------
-
-When we declare interfaces, we create *declaration* objects.  When we
-query declarations, declaration objects are returned::
-
-  >>> type(zope.interface.implementedBy(Special))
-  <class 'zope.interface.declarations.Implements'>
-
-Declaration objects and interface objects are similar in many ways. In
-fact, they share a common base class.  The important thing to realize
-about them is that they can be used where interfaces are expected in
-declarations. Here's a silly example::
-
-  >>> class Special2(Foo):
-  ...     zope.interface.implementsOnly(
-  ...          zope.interface.implementedBy(Foo),
-  ...          ISpecial,
-  ...          )
-  ...     reason = 'I just am'
-  ...     def brag(self):
-  ...         return "I'm special because %s" % self.reason
-
-The declaration here is almost the same as
-``zope.interface.implements(ISpecial)``, except that the order of
-interfaces in the resulting declaration is different::
-
-  >>> list(zope.interface.implementedBy(Special2))
-  [<InterfaceClass __main__.IFoo>, <InterfaceClass __main__.ISpecial>]
-
-
-Interface Inheritance
-=====================
-
-Interfaces can extend other interfaces. They do this simply by listing
-the other interfaces as base interfaces::
-
-  >>> class IBlat(zope.interface.Interface):
-  ...     """Blat blah blah"""
-  ...
-  ...     y = zope.interface.Attribute("y blah blah")
-  ...     def eek():
-  ...         """eek blah blah"""
-
-  >>> IBlat.__bases__
-  (<InterfaceClass zope.interface.Interface>,)
-
-  >>> class IBaz(IFoo, IBlat):
-  ...     """Baz blah"""
-  ...     def eek(a=1):
-  ...         """eek in baz blah"""
-  ...
-
-  >>> IBaz.__bases__
-  (<InterfaceClass __main__.IFoo>, <InterfaceClass __main__.IBlat>)
-
-  >>> names = list(IBaz)
-  >>> names.sort()
-  >>> names
-  ['bar', 'eek', 'x', 'y']
-
-Note that `IBaz` overrides eek::
-
-  >>> IBlat['eek'].__doc__
-  'eek blah blah'
-  >>> IBaz['eek'].__doc__
-  'eek in baz blah'
-
-We were careful to override eek in a compatible way.  When an
-extending an interface, the extending interface should be compatible
-[#compat]_ with the extended interfaces.
-
-We can ask whether one interface extends another::
-
-  >>> IBaz.extends(IFoo)
-  True
-  >>> IBlat.extends(IFoo)
-  False
-
-Note that interfaces don't extend themselves::
-
-  >>> IBaz.extends(IBaz)
-  False
-
-Sometimes we wish they did, but we can, instead use `isOrExtends`::
-
-  >>> IBaz.isOrExtends(IBaz)
-  True
-  >>> IBaz.isOrExtends(IFoo)
-  True
-  >>> IFoo.isOrExtends(IBaz)
-  False
-
-When we iterate over an interface, we get all of the names it defines,
-including names defined by base interfaces. Sometimes, we want *just*
-the names defined by the interface directly. We bane use the `names`
-method for that::
-
-  >>> list(IBaz.names())
-  ['eek']
-
-Inheritance if attribute specifications
----------------------------------------
-
-An interface may override attribute definitions frob base interfaces.
-If two base interfaces define the same attribute, the attribute is
-inherited from the most specific interface. For example, with:
-
-  >>> class IBase(zope.interface.Interface):
-  ...
-  ...     def foo():
-  ...         "base foo doc"
-
-  >>> class IBase1(IBase):
-  ...     pass
-
-  >>> class IBase2(IBase):
-  ...
-  ...     def foo():
-  ...         "base2 foo doc"
-
-  >>> class ISub(IBase1, IBase2):
-  ...     pass
-
-ISub's definition of foo is the one from IBase2, since IBase2 is more
-specific that IBase:
-
-  >>> ISub['foo'].__doc__
-  'base2 foo doc'
-
-Note that this differs from a depth-first search.
-
-Sometimes, it's useful to ask whether an interface defines an
-attribute directly.  You can use the direct method to get a directly
-defined definitions:
-
-  >>> IBase.direct('foo').__doc__
-  'base foo doc'
-
-  >>> ISub.direct('foo')
-
-Specifications
---------------
-
-Interfaces and declarations are both special cases of specifications.
-What we described above for interface inheritence applies to both
-declarations and specifications.  Declarations actually extend the
-interfaces that they declare:
-
-  >>> class Baz:
-  ...     zope.interface.implements(IBaz)
-
-  >>> baz_implements = zope.interface.implementedBy(Baz)
-  >>> baz_implements.__bases__
-  (<InterfaceClass __main__.IBaz>,)
-
-  >>> baz_implements.extends(IFoo)
-  True
-
-  >>> baz_implements.isOrExtends(IFoo)
-  True
-  >>> baz_implements.isOrExtends(baz_implements)
-  True
-
-Specifications (interfaces and declarations) provide an `__sro__`
-that lists the specification and all of it's ancestors:
-
-  >>> baz_implements.__sro__
-  (<implementedBy __main__.Baz>,
-   <InterfaceClass __main__.IBaz>,
-   <InterfaceClass __main__.IFoo>,
-   <InterfaceClass __main__.IBlat>,
-   <InterfaceClass zope.interface.Interface>)
-
-
-Tagged Values
-=============
-
-Interfaces and attribute descriptions support an extension mechanism,
-borrowed from UML, called "tagged values" that lets us store extra
-data::
-
-  >>> IFoo.setTaggedValue('date-modified', '2004-04-01')
-  >>> IFoo.setTaggedValue('author', 'Jim Fulton')
-  >>> IFoo.getTaggedValue('date-modified')
-  '2004-04-01'
-  >>> IFoo.queryTaggedValue('date-modified')
-  '2004-04-01'
-  >>> IFoo.queryTaggedValue('datemodified')
-  >>> tags = list(IFoo.getTaggedValueTags())
-  >>> tags.sort()
-  >>> tags
-  ['author', 'date-modified']
-
-Function attributes are converted to tagged values when method
-attribute definitions are created::
-
-  >>> class IBazFactory(zope.interface.Interface):
-  ...     def __call__():
-  ...         "create one"
-  ...     __call__.return_type = IBaz
-
-  >>> IBazFactory['__call__'].getTaggedValue('return_type')
-  <InterfaceClass __main__.IBaz>
-
-
-Invariants
-==========
-
-Interfaces can express conditions that must hold for objects that
-provide them. These conditions are expressed using one or more
-invariants.  Invariants are callable objects that will be called with
-an object that provides an interface. An invariant raises an `Invalid`
-exception if the condition doesn't hold.  Here's an example::
-
-  >>> class RangeError(zope.interface.Invalid):
-  ...     """A range has invalid limits"""
-  ...     def __repr__(self):
-  ...         return "RangeError(%r)" % self.args
-
-  >>> def range_invariant(ob):
-  ...     if ob.max < ob.min:
-  ...         raise RangeError(ob)
-
-Given this invariant, we can use it in an interface definition::
-
-  >>> class IRange(zope.interface.Interface):
-  ...     min = zope.interface.Attribute("Lower bound")
-  ...     max = zope.interface.Attribute("Upper bound")
-  ...
-  ...     zope.interface.invariant(range_invariant)
-
-Interfaces have a method for checking their invariants::
-
-  >>> class Range(object):
-  ...     zope.interface.implements(IRange)
-  ...
-  ...     def __init__(self, min, max):
-  ...         self.min, self.max = min, max
-  ...
-  ...     def __repr__(self):
-  ...         return "Range(%s, %s)" % (self.min, self.max)
-
-  >>> IRange.validateInvariants(Range(1,2))
-  >>> IRange.validateInvariants(Range(1,1))
-  >>> IRange.validateInvariants(Range(2,1))
-  Traceback (most recent call last):
-  ...
-  RangeError: Range(2, 1)
-
-If you have multiple invariants, you may not want to stop checking
-after the first error.  If you pass a list to `validateInvariants`,
-then a single `Invalid` exception will be raised with the list of
-exceptions as it's argument::
-
-  >>> errors = []
-  >>> IRange.validateInvariants(Range(2,1), errors)
-  Traceback (most recent call last):
-  ...
-  Invalid: [RangeError(Range(2, 1))]
-
-And the list will be filled with the individual exceptions::
-
-  >>> errors
-  [RangeError(Range(2, 1))]
-
-
-
-
-
-.. [#create] The main reason we subclass `Interface` is to cause the
-             Python class statement to create an interface, rather
-             than a class.
-
-             It's possible to create interfaces by calling a special
-             interface class directly.  Doing this, it's possible
-             (and, on rare occasions, useful) to create interfaces
-             that don't descend from `Interface`.  Using this
-             technique is beyond the scope of this document.
-
-.. [#factory] Classes are factories.  They can be called to create
-              their instances.  We expect that we will eventually
-              extend the concept of implementation to other kinds of
-              factories, so that we can declare the interfaces
-              provided by the objects created.
-
-.. [#compat] The goal is substitutability.  An object that provides an
-             extending interface should be substitutable for an object
-             that provides the extended interface.  In our example, an
-             object that provides IBaz should be usable whereever an
-             object that provides IBlat is expected.
-
-             The interface implementation doesn't enforce this. XXX
-             but maybe it should do some checks.
diff --git a/branches/bug1734/src/zope/interface/SETUP.cfg b/branches/bug1734/src/zope/interface/SETUP.cfg
deleted file mode 100644
index db9a8f07..00000000
--- a/branches/bug1734/src/zope/interface/SETUP.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-# Extension information for zpkg:
-
-<extension _zope_interface_coptimizations>
-  source _zope_interface_coptimizations.c
-</extension>
diff --git a/branches/bug1734/src/zope/interface/__init__.py b/branches/bug1734/src/zope/interface/__init__.py
deleted file mode 100644
index 50227f65..00000000
--- a/branches/bug1734/src/zope/interface/__init__.py
+++ /dev/null
@@ -1,80 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interfaces
-
-This package implements the Python "scarecrow" proposal.
-
-The package exports two objects, `Interface` and `Attribute` directly. It also
-exports several helper methods. Interface is used to create an interface with
-a class statement, as in:
-
-  class IMyInterface(Interface):
-    '''Interface documentation
-    '''
-
-    def meth(arg1, arg2):
-        '''Documentation for meth
-        '''
-
-    # Note that there is no self argument
-
-To find out what you can do with interfaces, see the interface
-interface, `IInterface` in the `interfaces` module.
-
-The package has several public modules:
-
-  o `declarations` provides utilities to declare interfaces on objects. It
-    also provides a wide range of helpful utilities that aid in managing
-    declared interfaces. Most of its public names are however imported here. 
-
-  o `document` has a utility for documenting an interface as structured text.
-
-  o `exceptions` has the interface-defined exceptions
-
-  o `interfaces` contains a list of all public interfaces for this package.
-
-  o `verify` has utilities for verifying implementations of interfaces.
-
-See the module doc strings for more information.
-
-$Id$
-"""
-__docformat__ = 'restructuredtext'
-
-from zope.interface.interface import Interface, _wire
-
-# Need to actually get the interface elements to implement the right interfaces
-_wire()
-del _wire
-
-from zope.interface.interface import Attribute, invariant
-
-from zope.interface.declarations import providedBy, implementedBy
-from zope.interface.declarations import classImplements, classImplementsOnly
-from zope.interface.declarations import directlyProvidedBy, directlyProvides
-from zope.interface.declarations import alsoProvides, implementer
-from zope.interface.declarations import implements, implementsOnly
-from zope.interface.declarations import classProvides, moduleProvides
-from zope.interface.declarations import Declaration
-from zope.interface.exceptions import Invalid
-
-# The following are to make spec pickles cleaner
-from zope.interface.declarations import Provides
-
-
-from zope.interface.interfaces import IInterfaceDeclaration
-
-moduleProvides(IInterfaceDeclaration)
-
-__all__ = ('Interface', 'Attribute') + tuple(IInterfaceDeclaration)
diff --git a/branches/bug1734/src/zope/interface/_flatten.py b/branches/bug1734/src/zope/interface/_flatten.py
deleted file mode 100644
index 7645abb1..00000000
--- a/branches/bug1734/src/zope/interface/_flatten.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Adapter-style interface registry
-
-See Adapter class.
-
-$Id$
-"""
-from zope.interface import Declaration
-
-def _flatten(implements, include_None=0):
-
-    try:
-        r = implements.flattened()
-    except AttributeError:
-        if implements is None:
-            r=()
-        else:
-            r = Declaration(implements).flattened()
-
-    if not include_None:
-        return r
-
-    r = list(r)
-    r.append(None)
-    return r
diff --git a/branches/bug1734/src/zope/interface/_zope_interface_coptimizations.c b/branches/bug1734/src/zope/interface/_zope_interface_coptimizations.c
deleted file mode 100644
index 4d8db467..00000000
--- a/branches/bug1734/src/zope/interface/_zope_interface_coptimizations.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*###########################################################################
- #
- # Copyright (c) 2003 Zope Corporation and Contributors.
- # All Rights Reserved.
- #
- # This software is subject to the provisions of the Zope Public License,
- # Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
- # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
- # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
- # FOR A PARTICULAR PURPOSE.
- #
- ############################################################################*/
-
-#include "Python.h"
-#include "structmember.h"
-
-#define TYPE(O) ((PyTypeObject*)(O))
-#define OBJECT(O) ((PyObject*)(O))
-#define CLASSIC(O) ((PyClassObject*)(O))
-
-static PyObject *str__dict__, *str__implemented__, *strextends;
-static PyObject *BuiltinImplementationSpecifications, *str__provides__;
-static PyObject *str__class__, *str__providedBy__, *strisOrExtends;
-static PyObject *empty, *fallback, *str_implied, *str_cls, *str_implements;
-static PyTypeObject *Implements;
-
-static int imported_declarations = 0;
-
-static int 
-import_declarations(void)
-{
-  PyObject *declarations, *i;
-
-  declarations = PyImport_ImportModule("zope.interface.declarations");
-  if (declarations == NULL)
-    return -1;
-  
-  BuiltinImplementationSpecifications = PyObject_GetAttrString(
-                    declarations, "BuiltinImplementationSpecifications");
-  if (BuiltinImplementationSpecifications == NULL)
-    return -1;
-
-  empty = PyObject_GetAttrString(declarations, "_empty");
-  if (empty == NULL)
-    return -1;
-
-  fallback = PyObject_GetAttrString(declarations, "implementedByFallback");
-  if (fallback == NULL)
-    return -1;
-
-
-
-  i = PyObject_GetAttrString(declarations, "Implements");
-  if (i == NULL)
-    return -1;
-
-  if (! PyType_Check(i))
-    {
-      PyErr_SetString(PyExc_TypeError, 
-                      "zope.declarations.Implements is not a type");
-      return -1;
-    }
-
-  Implements = (PyTypeObject *)i;
-
-  Py_DECREF(declarations);
-
-  imported_declarations = 1;
-  return 0;
-}
-
-static PyTypeObject SpecType;   /* Forward */
-
-static PyObject *
-implementedByFallback(PyObject *cls)
-{
-  if (imported_declarations == 0 && import_declarations() < 0)
-    return NULL;
-
-  return PyObject_CallFunctionObjArgs(fallback, cls, NULL);
-}
-
-static PyObject *
-implementedBy(PyObject *ignored, PyObject *cls)
-{
-  /* Fast retrieval of implements spec, if possible, to optimize
-     common case.  Use fallback code if we get stuck.
-  */
-
-  PyObject *dict = NULL, *spec;
-
-  if (PyType_Check(cls))
-    {
-      dict = TYPE(cls)->tp_dict;
-      Py_XINCREF(dict);
-    }
-
-  if (dict == NULL)
-    dict = PyObject_GetAttr(cls, str__dict__);
-
-  if (dict == NULL)
-    {
-      /* Probably a security proxied class, use more expensive fallback code */
-      PyErr_Clear();
-      return implementedByFallback(cls);
-    }
-
-  spec = PyObject_GetItem(dict, str__implemented__);
-  Py_DECREF(dict);
-  if (spec)
-    {
-      if (imported_declarations == 0 && import_declarations() < 0)
-        return NULL;
-
-      if (PyObject_TypeCheck(spec, Implements))
-        return spec;
-
-      /* Old-style declaration, use more expensive fallback code */
-      Py_DECREF(spec);
-      return implementedByFallback(cls);
-    }
-
-  PyErr_Clear();
-
-  /* Maybe we have a builtin */
-  if (imported_declarations == 0 && import_declarations() < 0)
-    return NULL;
-  
-  spec = PyDict_GetItem(BuiltinImplementationSpecifications, cls);
-  if (spec != NULL)
-    {
-      Py_INCREF(spec);
-      return spec;
-    }
-
-  /* We're stuck, use fallback */
-  return implementedByFallback(cls);
-}
-
-static PyObject *
-getObjectSpecification(PyObject *ignored, PyObject *ob)
-{
-  PyObject *cls, *result;
-
-  result = PyObject_GetAttr(ob, str__provides__);
-  if (result != NULL)
-    return result;
-
-  PyErr_Clear();
-
-  /* We do a getattr here so as not to be defeated by proxies */
-  cls = PyObject_GetAttr(ob, str__class__);
-  if (cls == NULL)
-    {
-      PyErr_Clear();
-      if (imported_declarations == 0 && import_declarations() < 0)
-        return NULL;
-      Py_INCREF(empty);
-      return empty;
-    }
-
-  result = implementedBy(NULL, cls);
-  Py_DECREF(cls);
-
-  return result;
-}
-
-static PyObject *
-providedBy(PyObject *ignored, PyObject *ob)
-{
-  PyObject *result, *cls, *cp;
-  
-  result = PyObject_GetAttr(ob, str__providedBy__);
-  if (result == NULL)
-    {
-      PyErr_Clear();
-      return getObjectSpecification(NULL, ob);
-    } 
-
-
-  /* We want to make sure we have a spec. We can't do a type check
-     because we may have a proxy, so we'll just try to get the
-     only attribute.
-  */
-  if (PyObject_HasAttr(result, strextends))
-    return result;
-    
-  /*
-    The object's class doesn't understand descriptors.
-    Sigh. We need to get an object descriptor, but we have to be
-    careful.  We want to use the instance's __provides__,l if
-    there is one, but only if it didn't come from the class.
-  */
-  Py_DECREF(result);
-
-  cls = PyObject_GetAttr(ob, str__class__);
-  if (cls == NULL)
-    return NULL;
-
-  result = PyObject_GetAttr(ob, str__provides__);
-  if (result == NULL)
-    {      
-      /* No __provides__, so just fall back to implementedBy */
-      PyErr_Clear();
-      result = implementedBy(NULL, cls);
-      Py_DECREF(cls);
-      return result;
-    } 
-
-  cp = PyObject_GetAttr(cls, str__provides__);
-  if (cp == NULL)
-    {
-      /* The the class has no provides, assume we're done: */
-      PyErr_Clear();
-      Py_DECREF(cls);
-      return result;
-    }
-
-  if (cp == result)
-    {
-      /*
-        Oops, we got the provides from the class. This means
-        the object doesn't have it's own. We should use implementedBy
-      */
-      Py_DECREF(result);
-      result = implementedBy(NULL, cls);
-    }
-
-  Py_DECREF(cls);
-  Py_DECREF(cp);
-
-  return result;
-}
-
-static PyObject *
-inst_attr(PyObject *self, PyObject *name)
-{
-  /* Get an attribute from an inst dict. Return a borrowed reference.
-   */
-
-  PyObject **dictp, *v;
-
-  dictp = _PyObject_GetDictPtr(self);
-  if (dictp && *dictp && (v = PyDict_GetItem(*dictp, name)))
-    return v;
-  PyErr_SetObject(PyExc_AttributeError, name);
-  return NULL;
-}
-
-
-static PyObject *
-Spec_extends(PyObject *self, PyObject *other)
-{  
-  PyObject *implied;
-
-  implied = inst_attr(self, str_implied);
-  if (implied == NULL)
-    return NULL;
-
-#ifdef Py_True
-  if (PyDict_GetItem(implied, other) != NULL)
-    {
-      Py_INCREF(Py_True);
-      return Py_True;
-    }
-  Py_INCREF(Py_False);
-  return Py_False;
-#else
-  return PyInt_FromLong(PyDict_GetItem(implied, other) != NULL);
-#endif
-}
-
-static char Spec_extends__doc__[] = 
-"Test whether a specification is or extends another"
-;
-
-static char Spec_providedBy__doc__[] = 
-"Test whether an interface is implemented by the specification"
-;
-
-static PyObject *
-Spec_providedBy(PyObject *self, PyObject *ob)
-{
-  PyObject *decl, *item;
-
-  decl = providedBy(NULL, ob);
-  if (decl == NULL)
-    return NULL;
-
-  if (PyObject_TypeCheck(ob, &SpecType))
-    item = Spec_extends(decl, self);
-  else
-    /* decl is probably a security proxy.  We have to go the long way
-       around. 
-    */
-    item = PyObject_CallMethodObjArgs(decl, strisOrExtends, self, NULL);
-
-  Py_DECREF(decl);
-  return item;
-}
-
-
-static char Spec_implementedBy__doc__[] = 
-"Test whether the specification is implemented by instances of a class"
-;
-
-static PyObject *
-Spec_implementedBy(PyObject *self, PyObject *cls)
-{
-  PyObject *decl, *item;
-
-  decl = implementedBy(NULL, cls);
-  if (decl == NULL)
-    return NULL;
-  
-  if (PyObject_TypeCheck(decl, &SpecType))
-    item = Spec_extends(decl, self);
-  else
-    item = PyObject_CallMethodObjArgs(decl, strisOrExtends, self, NULL);
-
-  Py_DECREF(decl);
-  return item;
-}
-
-static struct PyMethodDef Spec_methods[] = {
-	{"providedBy",  
-         (PyCFunction)Spec_providedBy,		METH_O,
-	 Spec_providedBy__doc__},
-	{"implementedBy", 
-         (PyCFunction)Spec_implementedBy,	METH_O,
-	 Spec_implementedBy__doc__},
-	{"isOrExtends",	(PyCFunction)Spec_extends,	METH_O,
-	 Spec_extends__doc__},
-
-	{NULL,		NULL}		/* sentinel */
-};
-
-static PyTypeObject SpecType = {
-	PyObject_HEAD_INIT(NULL)
-	/* ob_size           */ 0,
-	/* tp_name           */ "_interface_coptimizations."
-                                "SpecificationBase",
-	/* tp_basicsize      */ 0,
-	/* tp_itemsize       */ 0,
-	/* tp_dealloc        */ (destructor)0,
-	/* tp_print          */ (printfunc)0,
-	/* tp_getattr        */ (getattrfunc)0,
-	/* tp_setattr        */ (setattrfunc)0,
-	/* tp_compare        */ (cmpfunc)0,
-	/* tp_repr           */ (reprfunc)0,
-	/* tp_as_number      */ 0,
-	/* tp_as_sequence    */ 0,
-	/* tp_as_mapping     */ 0,
-	/* tp_hash           */ (hashfunc)0,
-	/* tp_call           */ (ternaryfunc)0,
-	/* tp_str            */ (reprfunc)0,
-        /* tp_getattro       */ (getattrofunc)0,
-        /* tp_setattro       */ (setattrofunc)0,
-        /* tp_as_buffer      */ 0,
-        /* tp_flags          */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
-        "Base type for Specification objects",
-        /* tp_traverse       */ (traverseproc)0,
-        /* tp_clear          */ (inquiry)0,
-        /* tp_richcompare    */ (richcmpfunc)0,
-        /* tp_weaklistoffset */ (long)0,
-        /* tp_iter           */ (getiterfunc)0,
-        /* tp_iternext       */ (iternextfunc)0,
-        /* tp_methods        */ Spec_methods,
-};
-
-static PyObject *
-OSD_descr_get(PyObject *self, PyObject *inst, PyObject *cls)
-{
-  PyObject *provides;
-
-  if (inst == NULL)
-    return getObjectSpecification(NULL, cls);
-
-  provides = PyObject_GetAttr(inst, str__provides__);
-  if (provides != NULL)
-    return provides;
-  PyErr_Clear();
-  return implementedBy(NULL, cls);
-}
-
-static PyTypeObject OSDType = {
-	PyObject_HEAD_INIT(NULL)
-	/* ob_size           */ 0,
-	/* tp_name           */ "_interface_coptimizations."
-                                "ObjectSpecificationDescriptor",
-	/* tp_basicsize      */ 0,
-	/* tp_itemsize       */ 0,
-	/* tp_dealloc        */ (destructor)0,
-	/* tp_print          */ (printfunc)0,
-	/* tp_getattr        */ (getattrfunc)0,
-	/* tp_setattr        */ (setattrfunc)0,
-	/* tp_compare        */ (cmpfunc)0,
-	/* tp_repr           */ (reprfunc)0,
-	/* tp_as_number      */ 0,
-	/* tp_as_sequence    */ 0,
-	/* tp_as_mapping     */ 0,
-	/* tp_hash           */ (hashfunc)0,
-	/* tp_call           */ (ternaryfunc)0,
-	/* tp_str            */ (reprfunc)0,
-        /* tp_getattro       */ (getattrofunc)0,
-        /* tp_setattro       */ (setattrofunc)0,
-        /* tp_as_buffer      */ 0,
-        /* tp_flags          */ Py_TPFLAGS_DEFAULT
-				| Py_TPFLAGS_BASETYPE ,
-	"Object Specification Descriptor",
-        /* tp_traverse       */ (traverseproc)0,
-        /* tp_clear          */ (inquiry)0,
-        /* tp_richcompare    */ (richcmpfunc)0,
-        /* tp_weaklistoffset */ (long)0,
-        /* tp_iter           */ (getiterfunc)0,
-        /* tp_iternext       */ (iternextfunc)0,
-        /* tp_methods        */ 0,
-        /* tp_members        */ 0,
-        /* tp_getset         */ 0,
-        /* tp_base           */ 0,
-        /* tp_dict           */ 0, /* internal use */
-        /* tp_descr_get      */ (descrgetfunc)OSD_descr_get,
-};
-
-static PyObject *
-CPB_descr_get(PyObject *self, PyObject *inst, PyObject *cls)
-{
-  PyObject *mycls, *implements;
-
-  mycls = inst_attr(self, str_cls);
-  if (mycls == NULL)
-    return NULL;
-
-  if (cls == mycls)
-    {
-      if (inst == NULL)
-        {
-          Py_INCREF(self);
-          return OBJECT(self);
-        }
-
-      implements = inst_attr(self, str_implements);
-      Py_XINCREF(implements);
-      return implements;
-    }
-  
-  PyErr_SetObject(PyExc_AttributeError, str__provides__);
-  return NULL;
-}
-
-static PyTypeObject CPBType = {
-	PyObject_HEAD_INIT(NULL)
-	/* ob_size           */ 0,
-	/* tp_name           */ "_interface_coptimizations."
-                                "ClassProvidesBase",
-	/* tp_basicsize      */ 0,
-	/* tp_itemsize       */ 0,
-	/* tp_dealloc        */ (destructor)0,
-	/* tp_print          */ (printfunc)0,
-	/* tp_getattr        */ (getattrfunc)0,
-	/* tp_setattr        */ (setattrfunc)0,
-	/* tp_compare        */ (cmpfunc)0,
-	/* tp_repr           */ (reprfunc)0,
-	/* tp_as_number      */ 0,
-	/* tp_as_sequence    */ 0,
-	/* tp_as_mapping     */ 0,
-	/* tp_hash           */ (hashfunc)0,
-	/* tp_call           */ (ternaryfunc)0,
-	/* tp_str            */ (reprfunc)0,
-        /* tp_getattro       */ (getattrofunc)0,
-        /* tp_setattro       */ (setattrofunc)0,
-        /* tp_as_buffer      */ 0,
-        /* tp_flags          */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
-        "C Base class for ClassProvides",
-        /* tp_traverse       */ (traverseproc)0,
-        /* tp_clear          */ (inquiry)0,
-        /* tp_richcompare    */ (richcmpfunc)0,
-        /* tp_weaklistoffset */ (long)0,
-        /* tp_iter           */ (getiterfunc)0,
-        /* tp_iternext       */ (iternextfunc)0,
-        /* tp_methods        */ 0,
-        /* tp_members        */ 0,
-        /* tp_getset         */ 0,
-        /* tp_base           */ &SpecType,
-        /* tp_dict           */ 0, /* internal use */
-        /* tp_descr_get      */ (descrgetfunc)CPB_descr_get,
-};
-
-
-static struct PyMethodDef m_methods[] = {
-  {"implementedBy", (PyCFunction)implementedBy, METH_O,
-   "Interfaces implemented by instances of a class"},
-  {"getObjectSpecification", (PyCFunction)getObjectSpecification, METH_O,
-   "Get an object's interfaces (internal api)"},
-  {"providedBy", (PyCFunction)providedBy, METH_O,
-   "Get an object's interfaces"},
-  
-  {NULL,	 (PyCFunction)NULL, 0, NULL}		/* sentinel */
-};
-
-#ifndef PyMODINIT_FUNC	/* declarations for DLL import/export */
-#define PyMODINIT_FUNC void
-#endif
-PyMODINIT_FUNC
-init_zope_interface_coptimizations(void)
-{
-  PyObject *m;
-
-#define DEFINE_STRING(S) \
-  if(! (str ## S = PyString_FromString(# S))) return
-
-  DEFINE_STRING(__dict__);
-  DEFINE_STRING(__implemented__);
-  DEFINE_STRING(__provides__);
-  DEFINE_STRING(__class__);
-  DEFINE_STRING(__providedBy__);
-  DEFINE_STRING(isOrExtends);
-  DEFINE_STRING(extends);
-  DEFINE_STRING(_implied);
-  DEFINE_STRING(_implements);
-  DEFINE_STRING(_cls);
-#undef DEFINE_STRING
-  
-        
-  /* Initialize types: */
-  SpecType.tp_new = PyBaseObject_Type.tp_new;
-  if (PyType_Ready(&SpecType) < 0)
-    return;
-  OSDType.tp_new = PyBaseObject_Type.tp_new;
-  if (PyType_Ready(&OSDType) < 0)
-    return;
-  CPBType.tp_new = PyBaseObject_Type.tp_new;
-  if (PyType_Ready(&CPBType) < 0)
-    return;
-  
-  /* Create the module and add the functions */
-  m = Py_InitModule3("_zope_interface_coptimizations", m_methods,
-                     "C optimizations for zope.interface\n\n"
-                     "$Id$");  
-  if (m == NULL)
-    return;
-  
-  /* Add types: */
-  if (PyModule_AddObject(m, "SpecificationBase", (PyObject *)&SpecType) < 0)
-    return;
-  if (PyModule_AddObject(m, "ObjectSpecificationDescriptor", 
-                         (PyObject *)&OSDType) < 0)
-    return;
-  if (PyModule_AddObject(m, "ClassProvidesBase", (PyObject *)&CPBType) < 0)
-    return;
-}
-
diff --git a/branches/bug1734/src/zope/interface/adapter.py b/branches/bug1734/src/zope/interface/adapter.py
deleted file mode 100644
index f77fe0cb..00000000
--- a/branches/bug1734/src/zope/interface/adapter.py
+++ /dev/null
@@ -1,732 +0,0 @@
-############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################
-"""Adapter-style interface registry
-
-This implementation is based on a notion of "surrogate" interfaces.
-
-$Id$
-"""
-
-# Implementation notes
-
-# We keep a collection of surrogates.
-
-# A surrogate is a surrogate for a specification (interface or
-# declaration).  We use weak references in order to remove surrogates
-# if the corresponding specification goes away.
-
-# Each surrogate keeps track of:
-
-# - The adapters registered directly for that surrogate, and
-
-# - The "implied" adapters, which is the adapters that can be computed
-#   from instances of that surrogate.
-
-# The later data structure takes into account adapters registered for
-# specifications that the registered surrogate extends.
-
-# The registrations are of the form:
-
-#   {(subscription, with, name, specification) -> factories}
-
-# where:
-
-#   'subscription' is a flag indicating if this registration is for
-#   subscription adapters.
-
-#   'with' is a tuple of specs that is non-empty only in the case
-#   of multi-adapters.  
-
-#   'name' is a unicode adapter name.  Unnamed adapters have an empty
-#   name.
-
-#   'specification' is the interface being adapted to.
-
-#   'factories' is normally a tuple of factories, but can be anything.
-#   (See the "raw" option to the query-adapter calls.)  For subscription
-#   adapters, it is a tuple of tuples of factories.
-
-# The implied adapters are held in a single dictionary. The items in the
-# dictionary are of several forms:
-
-# For single adapters:
-#
-# {specification -> {name -> object}
-#
-# where object is usually a sequence of factories
-
-# For multiple adapters:
-#
-# {(specification, order) -> {name -> {with -> object}}}
-
-# For single subscription adapters:
-#
-# {('s', specification) -> tuple([object])}
-
-# For multiple-subscription adapters:
-#
-# {('s', specification, order) -> {with -> tuple([object])}}
-
-
-from __future__ import generators
-
-import weakref
-from zope.interface.ro import ro
-from zope.interface.declarations import providedBy
-from zope.interface.interface import InterfaceClass, Interface
-
-Default = InterfaceClass("Default", (), {})
-Null = InterfaceClass("Null", (), {})
-
-# 2.2 backwards compatability
-try:
-    enumerate
-except NameError:
-    def enumerate(l):
-        i = 0
-        for o in l:
-            yield i, o
-            i += 1
-try:
-    basestring
-except NameError:
-    basestring = (str, unicode)
-
-
-class ReadProperty(object):
-
-    def __init__(self, func):
-        self.func = func
-
-    def __get__(self, inst, class_):
-        if inst is None:
-            return self
-        return self.func(inst)
-
-class Surrogate(object):
-    """Specification surrogate
-
-    A specification surrogate is used to hold adapter registrations on
-    behalf of a specification.
-    """
-
-    def __init__(self, spec, registry):
-        self.spec = spec.weakref()
-        spec.subscribe(self)
-        self.adapters = {}
-        self.dependents = weakref.WeakKeyDictionary()
-
-        self.registry = registry
-        self.__bases__ = [registry.get(base) for base in spec.__bases__]
-        for base in self.__bases__:
-            base.subscribe(self)
-
-    def dirty(self):
-        if 'get' in self.__dict__:
-            # Not already dirty
-            del self.selfImplied
-            del self.multImplied
-            del self.get
-        for dependent in self.dependents.keys():
-            dependent.dirty()
-
-    def clean(self):
-        for base in self.__bases__:
-            base.unsubscribe(self)
-        self.__bases__ = [self.registry.get(base)
-                          for base in self.spec().__bases__]
-        for base in self.__bases__:
-            base.subscribe(self)
-
-        self.selfImplied, self.multImplied = adapterImplied(self.adapters)
-
-        implied = {}
-
-        ancestors = ro(self)
-
-        # Collect implied data in reverse order to have more specific data
-        # override less-specific data.
-        ancestors.reverse()
-        for ancestor in ancestors:
-            
-            for key, v in ancestor.selfImplied.iteritems():
-
-                # key is specification or ('s', specification)
-                subscription = isinstance(key, tuple) and key[0] == 's'
-                if subscription:
-                    # v is tuple of subs
-                    implied[key] = implied.get(key, ()) + v
-                else:
-                    oldbyname = implied.get(key)
-                    if not oldbyname:
-                        implied[key] = oldbyname = {}
-                    
-                    # v is name -> object
-                    oldbyname.update(v)
-
-            for key, v in ancestor.multImplied.iteritems():
-                # key is (specification, order)
-                #     or ('s', specification, order)
-                subscription = key[0] == 's'
-                if subscription:
-                    oldwithobs = implied.get(key)
-                    if not oldwithobs:
-                        oldwithobs = implied[key] = {}
-                        
-                    # v is {with -> tuple([object])}
-                    for with, objects in v.iteritems():
-                        oldwithobs[with] = oldwithobs.get(with, ()) + objects
-                    
-                else:
-                    oldbyname = implied.get(key)
-                    if not oldbyname:
-                        implied[key] = oldbyname = {}
-
-                    # v is {name -> {with -> ?}}
-                    for name, withobs in v.iteritems():
-                        oldwithobs = oldbyname.get(name)
-                        if not oldwithobs:
-                            oldwithobs = oldbyname[name] = {}
-
-                        # withobs is {with -> object}
-                        oldwithobs.update(withobs)
-
-        # Now flatten with mappings to tuples
-        for key, v in implied.iteritems():
-            if isinstance(key, tuple):
-                if key[0] == 's':
-                    # subscriptions
-                    if isinstance(v, dict):
-                        implied[key] = v.items()
-                else:
-                    byname = v
-                    for name, value in byname.iteritems():
-                        if isinstance(value, dict):
-                            # We have {with -> value}
-                            # convert it to sorted [(with, value]
-                            byname[name] = orderwith(value)
-
-        self.get = implied.get
-
-    def get(self, key):
-        """Get an implied value
-
-        This is only called when the surrogate is dirty
-        """
-        self.clean()
-        return self.__dict__['get'](key)
-
-    def selfImplied(self):
-        """Return selfImplied when dirty
-        """
-        self.clean()
-        return self.__dict__['selfImplied']
-    selfImplied = ReadProperty(selfImplied)
-
-    def multiImplied(self):
-        """Return _multiImplied when dirty
-        """
-        self.clean()
-        return self.__dict__['multiImplied']
-    multiImplied = ReadProperty(multiImplied)
-
-    def subscribe(self, dependent):
-        self.dependents[dependent] = 1
-
-    def unsubscribe(self, dependent):
-        del self.dependents[dependent]
-
-    def _adaptTo(self, specification, object, name='', with=()):
-        if object is None:
-            try:
-                del self.adapters[False, tuple(with), name, specification]
-            except KeyError:
-                pass
-        else:
-            self.adapters[False, tuple(with), name, specification
-                          ] = object
-
-        self.dirty()
-
-    def _subscriptionAdaptTo(self, specification, object, with=()):
-        if object is None:
-            raise TypeError, ("Unregistering subscription adapters" 
-                              " isn't implemented")
-
-        key = (True, tuple(with), '', specification)
-        self.adapters[key] = self.adapters.get(key, ()) + (object, )
-        self.dirty()
-
-    def changed(self, which=None):
-        self.dirty()
-
-    def __repr__(self):
-        return '<%s(%s)>' % (self.__class__.__name__, self.spec())
-
-def orderwith(bywith):
-
-    # Convert {with -> adapter} to withs, [(with, value)]
-    # such that there are no i, j, i < j, such that
-    #           withs[j][0] extends withs[i][0].
-
-    withs = []
-    for with, value in bywith.iteritems():
-        for i, (w, v) in enumerate(withs):
-            if withextends(with, w):
-                withs.insert(i, (with, value))
-                break
-        else:
-            withs.append((with, value))
-            
-    return withs
-    
-
-def withextends(with1, with2):
-    for spec1, spec2 in zip(with1, with2):
-        if spec1.extends(spec2):
-            return True
-        if spec1 != spec2:
-            break
-    return False
-
-
-class AdapterLookup(object):
-    # Adapter lookup support
-    # We have a class here because we want to provide very
-    # fast lookup support in C and making this part of the adapter
-    # registry itself would provide problems if someone wanted
-    # persistent adapter registries, because we want C slots for fast
-    # lookup that would clash with persistence-supplied slots.
-    # so this class acts a little bit like a lookup adapter for the adapter
-    # registry.
-
-    def __init__(self, registry, surrogates, _remove):
-        self._registry = registry
-        self._surrogateClass = registry._surrogateClass
-        self._default = registry._default
-        self._null = registry._null
-        self._surrogates = surrogates
-        self._remove = _remove
-
-    def lookup(self, required, provided, name='', default=None):
-        order = len(required)
-        if order == 1:
-            # Simple adapter:
-            s = self.get(required[0])
-            byname = s.get(provided)
-            if byname:
-                value = byname.get(name)
-            else:
-                value = None
-
-            if value is None:
-                byname = self._default.get(provided)
-                if byname:
-                    value = byname.get(name, default)
-                else:
-                    return default
-                
-            return value
-
-        elif order == 0:
-            # null adapter
-            byname = self._null.get(provided)
-            if byname:
-                return byname.get(name, default)
-            else:
-                return default
-
-        # Multi adapter
-
-        with = required[1:]
-        key = provided, order
-
-        for surrogate in self.get(required[0]), self._default:
-            byname = surrogate.get(key)
-            if not byname:
-                continue
-
-            bywith = byname.get(name)
-            if not bywith:
-                continue
-
-            # Selecting multi-adapters is not just a matter of matching the
-            # required interfaces of the adapter to the ones passed. Several
-            # adapters might match, but we only want the best one. We use a
-            # ranking algorithm to determine the best match.
-
-            # `best` carries the rank and value of the best found adapter.
-            best = None
-            for rwith, value in bywith:
-                # the `rank` describes how well the found adapter matches.
-                rank = []
-                for rspec, spec in zip(rwith, with):
-                    if not spec.isOrExtends(rspec):
-                        break # This one is no good
-                    # Determine the rank of this particular specification.
-                    rank.append(list(spec.__sro__).index(rspec))
-                else:
-                    # If the new rank is better than the best previously
-                    # recorded one, make the new adapter the best one found. 
-                    rank = tuple(rank)
-                    if best is None or rank < best[0]:
-                        best = rank, value
-            # If any match was found, return the best one.
-            if best:
-                return best[1]
-
-        return default
-
-    def lookup1(self, required, provided, name='', default=None):
-        return self.lookup((required,), provided, name, default)
-
-    def adapter_hook(self, interface, object, name='', default=None):
-        """Hook function used when calling interfaces.
-
-        When called from Interface.__adapt__, only the interface and
-        object parameters will be passed.
-
-        If the factory produces `None`, then the default is returned. This
-        allows us to prevent adaptation (if desired) and make the factory
-        decide whether an adapter will be available.
-        """
-        factory = self.lookup1(providedBy(object), interface, name)
-        if factory is not None:
-            adapter = factory(object)
-            if adapter is not None:
-                return adapter
-
-        return default
-
-    def queryAdapter(self, object, interface, name='', default=None):
-        # Note that we rarely call queryAdapter directly
-        # We usually end up calling adapter_hook
-        return self.adapter_hook(interface, object, name, default)
-
-
-    def subscriptions(self, required, provided):
-        if provided is None:
-            provided = Null
-
-        order = len(required)
-        if order == 1:
-            # Simple subscriptions:
-            s = self.get(required[0])
-            result = s.get(('s', provided))
-            if result:
-                result = list(result)
-            else:
-                result = []
-
-            default = self._default.get(('s', provided))
-            if default:
-                result.extend(default)
-                
-            return result
-
-        elif order == 0:
-            result = self._null.get(('s', provided))
-            if result:
-                return list(result)
-            else:
-                return []
-        
-        # Multi
-        key = 's', provided, order
-        with = required[1:]
-        result = []
-        
-        for surrogate in self.get(required[0]), self._default:
-            bywith = surrogate.get(key)
-            if not bywith:
-                continue
-
-            for rwith, values in bywith:
-                for rspec, spec in zip(rwith, with):
-                    if not spec.isOrExtends(rspec):
-                        break # This one is no good
-                else:
-                    # we didn't break, so we have a match
-                    result.extend(values)
-
-        return result
-
-        
-
-    def queryMultiAdapter(self, objects, interface, name='', default=None):
-        factory = self.lookup(map(providedBy, objects), interface, name)
-        if factory is not None:
-            return factory(*objects)
-
-        return default
-
-    def subscribers(self, objects, interface):
-        subscriptions = self.subscriptions(map(providedBy, objects), interface)
-        return [subscription(*objects) for subscription in subscriptions]
-
-    def get(self, declaration):
-        if declaration is None:
-            return self._default
-
-        ref = declaration.weakref(self._remove)
-        surrogate = self._surrogates.get(ref)
-        if surrogate is None:
-            surrogate = self._surrogateClass(declaration, self._registry)
-            self._surrogates[ref] = surrogate
-
-        return surrogate
-
-
-class AdapterRegistry(object):
-    """Adapter registry
-    """
-
-    # Implementation note:
-    # We are like a weakref dict ourselves. We can't use a weakref
-    # dict because we have to use spec.weakref() rather than
-    # weakref.ref(spec) to get weak refs to specs.
-
-    _surrogateClass = Surrogate
-
-    def __init__(self):
-        default = self._surrogateClass(Default, self)
-        self._default = default
-        null = self._surrogateClass(Null, self)
-        self._null = null
-
-        # Create separate lookup object and copy it's methods
-        surrogates = {Default.weakref(): default, Null.weakref(): null}
-        def _remove(k):
-            try:
-                del surrogates[k]
-            except KeyError:
-                pass
-        lookup = AdapterLookup(self, surrogates, _remove)
-        
-        for name in ('lookup', 'lookup1', 'queryAdapter', 'get',
-                     'adapter_hook', 'subscriptions',
-                     'queryMultiAdapter', 'subscribers',
-                     ):
-            setattr(self, name, getattr(lookup, name))
-
-    def register(self, required, provided, name, value):
-        if required:
-            with = []
-            for iface in required[1:]:
-                if iface is None:
-                    iface = Interface
-                with.append(iface)
-            with = tuple(with)
-            required = self.get(required[0])
-        else:
-            with = ()
-            required = self._null
-        
-        if not isinstance(name, basestring):
-            raise TypeError("The name provided to provideAdapter "
-                            "must be a string or unicode")
-
-        required._adaptTo(provided, value, unicode(name), with)
-
-    def lookupAll(self, required, provided):
-        order = len(required)
-        if order == 1:
-            # Simple adapter:
-            s = self.get(required[0])
-            byname = s.get(provided)
-            if byname:
-                for item in byname.iteritems():
-                    yield item
-
-            defbyname = self._default.get(provided)
-            if defbyname:
-                for name, value in defbyname.iteritems():
-                    if name in byname:
-                        continue
-                    yield name, value
-
-            return
-
-        elif order == 0:
-            # null adapter
-            byname = self._null.get(provided)
-            if byname:
-                for item in byname.iteritems():
-                    yield item
-
-            return
-
-
-        # Multi adapter
-
-        with = required[1:]
-        key = provided, order
-        first = ()
-
-        for surrogate in self.get(required[0]), self._default:
-            byname = surrogate.get(key)
-            if not byname:
-                continue
-
-            for name, bywith in byname.iteritems():
-                if not bywith or name in first:
-                    continue
-
-                # See comments on lookup() above
-                best  = None
-                for rwith, value in bywith:
-                    # the `rank` describes how well the found adapter matches.
-                    rank = []
-                    for rspec, spec in zip(rwith, with):
-                        if not spec.isOrExtends(rspec):
-                            break # This one is no good
-                        # Determine the rank of this particular specification.
-                        rank.append(list(spec.__sro__).index(rspec))
-                    else:
-                        # If the new rank is better than the best previously
-                        # recorded one, make the new adapter the best one found.
-                        rank = tuple(rank)
-                        if best is None or rank < best[0]:
-                            best = rank, value
-
-                # If any match was found, return the best one.
-                if best:
-                    yield name, best[1]
-
-            first = byname
-
-    def subscribe(self, required, provided, value):
-        if required:
-            required, with = self.get(required[0]), tuple(required[1:])
-        else:
-            required = self._null
-            with = ()
-
-        if provided is None:
-            provided = Null
-            
-        required._subscriptionAdaptTo(provided, value, with)
-
-def mextends(with, rwith):
-    if len(with) == len(rwith):
-        for w, r in zip(with, rwith):
-            if not w.isOrExtends(r):
-                break
-        else:
-            return True
-    return False
-
-def adapterImplied(adapters):
-    implied = {}
-    multi = {}
-
-    # This dictionary is used to catch situations specific adapters
-    # override less specific adapters.
-    # Because subscriptions are cumulative, registered doesn't apply.
-    registered = {}
-
-    # Add adapters and interfaces directly implied by same:
-
-    for key, value in adapters.iteritems():
-
-        # TODO: Backward compatibility
-        # BBB ? Don't need to handle 3-tuples some day
-        try:
-            (subscription, with, name, target) = key
-        except ValueError:
-            (with, name, target) = key
-            subscription = False
-
-        if subscription:
-            if with:
-                _add_multi_sub_adapter(with, target, multi, value)
-            else:
-                _add_named_sub_adapter(target, implied, value)
-        else:
-            if with:
-                _add_multi_adapter(with, name, target, target, multi,
-                                   registered, value)
-            else:
-                _add_named_adapter(target, target, name, implied,
-                                   registered, value)
-
-    return implied, multi
-
-def _add_named_adapter(target, provided, name, implied,
-                       registered, value):
-    
-    ikey = target
-    rkey = target, name
-
-    byname = implied.get(ikey)
-    if not byname:
-        byname = implied[ikey] = {}
-
-    if (name not in byname
-        or
-        (rkey in registered and registered[rkey].extends(provided))
-        ):
-
-        registered[rkey] = provided
-        byname[name] = value
-
-        for b in target.__bases__:
-            _add_named_adapter(b, provided, name, implied,
-                               registered, value)
-
-def _add_multi_adapter(with, name, target, provided, implied,
-                       registered, object):
-
-    ikey = target, (len(with) + 1)
-    byname = implied.get(ikey)
-    if not byname:
-        byname = implied[ikey] = {}
-
-    bywith = byname.get(name)
-    if not bywith:
-        bywith = byname[name] = {}
-
-    
-    rkey = ikey, name, with # The full key has all 4
-    if (with not in bywith
-        or
-        (rkey not in registered or registered[rkey].extends(provided))
-        ):
-        # This is either a new entry or it is an entry for a more
-        # general interface that is closer provided than what we had
-        # before
-        registered[rkey] = provided
-        bywith[with] = object
-
-    for b in target.__bases__:
-        _add_multi_adapter(with, name, b, provided, implied,
-                           registered, object)
-
-def _add_named_sub_adapter(target, implied, objects):
-    key = ('s', target)
-    implied[key] = implied.get(key, ()) + objects
-    
-    for b in target.__bases__:
-        _add_named_sub_adapter(b, implied, objects)
-
-def _add_multi_sub_adapter(with, target, implied, objects):
-    key = 's', target, (len(with) + 1)
-    bywith = implied.get(key)
-    if not bywith:
-        bywith = implied[key] = {}
-
-    bywith[with] = bywith.get(with, ()) + objects
-
-    for b in target.__bases__:
-        _add_multi_sub_adapter(with, b, implied, objects)
diff --git a/branches/bug1734/src/zope/interface/adapter.txt b/branches/bug1734/src/zope/interface/adapter.txt
deleted file mode 100644
index 5d1a5e23..00000000
--- a/branches/bug1734/src/zope/interface/adapter.txt
+++ /dev/null
@@ -1,505 +0,0 @@
-================
-Adapter Registry
-================
-
-Adapter registries provide a way to register objects that depend on
-one or more interface specifications and provide (perhaps indirectly)
-some interface.  In addition, the registrations have names. (You can
-think of the names as qualifiers of the provided interfaces.)
-
-The term "interface specification" refers both to interfaces and to
-interface declarations, such as declarations of interfaces implemented
-by a class.
-
-
-Single Adapters
-===============
-
-Let's look at a simple example, using a single required specification::
-
-  >>> from zope.interface.adapter import AdapterRegistry
-  >>> import zope.interface
-
-  >>> class IR1(zope.interface.Interface):
-  ...     pass
-  >>> class IP1(zope.interface.Interface):
-  ...     pass
-  >>> class IP2(IP1):
-  ...     pass
-
-  >>> registry = AdapterRegistry()
-
-We'll register an object that depends on IR1 and "provides" IP2::
-
-  >>> registry.register([IR1], IP2, '', 12)
-
-Given the registration, we can look it up again::
-
-  >>> registry.lookup([IR1], IP2, '')
-  12
-
-Note that we used an integer in the example.  In real applications,
-one would use some objects that actually depend on or provide
-interfaces. The registry doesn't care about what gets registered, so
-we'll use integers and strings to keep the examples simple. There is
-one exception.  Registering a value of None unregisters any
-previously-registered value.
-
-If an object depends on a specification, it can be looked up with a
-specification that extends the specification that it depends on::
-
-  >>> class IR2(IR1):
-  ...     pass
-  >>> registry.lookup([IR2], IP2, '')
-  12
-
-We can use a class implementation specification to look up the object::
-
-  >>> class C2:
-  ...     zope.interface.implements(IR2)
-
-  >>> registry.lookup([zope.interface.implementedBy(C2)], IP2, '')
-  12
-
-
-and it can be looked up for interfaces that its provided interface
-extends::
-
-  >>> registry.lookup([IR1], IP1, '')
-  12
-  >>> registry.lookup([IR2], IP1, '')
-  12
-
-But if you require a specification that doesn't extend the specification the
-object depends on, you won't get anything::
-
-  >>> registry.lookup([zope.interface.Interface], IP1, '')
-
-By the way, you can pass a default value to lookup::
-
-  >>> registry.lookup([zope.interface.Interface], IP1, '', 42)
-  42
-
-If you try to get an interface the object doesn't provide, you also
-won't get anything::
-
-  >>> class IP3(IP2):
-  ...     pass
-  >>> registry.lookup([IR1], IP3, '')
-
-You also won't get anything if you use the wrong name::
-
-  >>> registry.lookup([IR1], IP1, 'bob')
-  >>> registry.register([IR1], IP2, 'bob', "Bob's 12")
-  >>> registry.lookup([IR1], IP1, 'bob')
-  "Bob's 12"
-
-You can leave the name off when doing a lookup::
-
-  >>> registry.lookup([IR1], IP1)
-  12
-
-If we register an object that provides IP1::
-
-  >>> registry.register([IR1], IP1, '', 11)
-
-then that object will be prefered over O(12)::
-
-  >>> registry.lookup([IR1], IP1, '')
-  11
-
-Also, if we register an object for IR2, then that will be prefered
-when using IR2::
-
-  >>> registry.register([IR2], IP1, '', 21)
-  >>> registry.lookup([IR2], IP1, '')
-  21
-
-lookup1
--------
-
-Lookup of single adapters is common enough that there is a specialized
-version of lookup that takes a single required interface::
-
-  >>> registry.lookup1(IR2, IP1, '')
-  21
-  >>> registry.lookup1(IR2, IP1)
-  21
-
-Actual Adaptation
------------------
-
-The adapter registry is intended to support adaptation, where one
-object that implements an interface is adapted to another object that
-supports a different interface.  The adapter registry supports the
-computation of adapters. In this case, we have to register adapter
-factories::
-
-   >>> class IR(zope.interface.Interface):
-   ...     pass
-
-   >>> class X:
-   ...     zope.interface.implements(IR)
-           
-   >>> class Y:
-   ...     zope.interface.implements(IP1)
-   ...     def __init__(self, context):
-   ...         self.context = context
-
-  >>> registry.register([IR], IP1, '', Y)
-
-In this case, we registered a class as the factory. Now we can call
-`queryAdapter` to get the adapted object::
-
-  >>> x = X()
-  >>> y = registry.queryAdapter(x, IP1)
-  >>> y.__class__.__name__
-  'Y'
-  >>> y.context is x
-  True
-
-We can register and lookup by name too::
-
-  >>> class Y2(Y):
-  ...     pass
-
-  >>> registry.register([IR], IP1, 'bob', Y2)
-  >>> y = registry.queryAdapter(x, IP1, 'bob')
-  >>> y.__class__.__name__
-  'Y2'
-  >>> y.context is x
-  True
-
-When the adapter factory produces `None`, then this is treated as if no
-adapter has been found. This allows us to prevent adaptation (when desired)
-and let the adapter factory determine whether adaptation is possible based on
-the state of the object being adapted.
-
-  >>> def factory(context):
-  ...     if context.name == 'object':
-  ...         return 'adapter'
-  ...     return None
-
-  >>> class Object(object):
-  ...     zope.interface.implements(IR)
-  ...     name = 'object'
-
-  >>> registry.register([IR], IP1, 'conditional', factory) 
-  >>> obj = Object()
-  >>> registry.queryAdapter(obj, IP1, 'conditional')
-  'adapter'
-  >>> obj.name = 'no object'
-  >>> registry.queryAdapter(obj, IP1, 'conditional') is None
-  True
-  >>> registry.queryAdapter(obj, IP1, 'conditional', 'default')
-  'default'
-
-An alternate method that provides the same function as `queryAdapter()` is
-`adapter_hook()`::
-
-  >>> y = registry.adapter_hook(IP1, x)
-  >>> y.__class__.__name__
-  'Y'
-  >>> y.context is x
-  True
-  >>> y = registry.adapter_hook(IP1, x, 'bob')
-  >>> y.__class__.__name__
-  'Y2'
-  >>> y.context is x
-  True
-
-The `adapter_hook()` simply switches the order of the object and
-interface arguments.  It is used to hook into the interface call
-mechanism.
-
-
-Default Adapters
-----------------
-  
-Sometimes, you want to provide an adapter that will adapt anything.
-For that, provide None as the required interface::
-
-  >>> registry.register([None], IP1, '', 1)
-  
-then we can use that adapter for interfaces we don't have specific
-adapters for::
-
-  >>> class IQ(zope.interface.Interface):
-  ...     pass
-  >>> registry.lookup([IQ], IP1, '')
-  1
-
-Of course, specific adapters are still used when applicable::
-
-  >>> registry.lookup([IR2], IP1, '')
-  21
-
-Class adapters
---------------
-
-You can register adapters for class declarations, which is almost the
-same as registering them for a class::
-
-  >>> registry.register([zope.interface.implementedBy(C2)], IP1, '', 'C21')
-  >>> registry.lookup([zope.interface.implementedBy(C2)], IP1, '')
-  'C21'
-
-Dict adapters
--------------
-
-At some point it was impossible to register dictionary-based adapters due a
-bug. Let's make sure this works now:
-
-  >>> adapter = {}
-  >>> registry.register((), IQ, '', adapter)
-  >>> registry.lookup((), IQ, '') is adapter
-  True
-
-Unregistering
--------------
-
-You can unregister by registering None, rather than an object::
-
-  >>> registry.register([zope.interface.implementedBy(C2)], IP1, '', None)
-  >>> registry.lookup([zope.interface.implementedBy(C2)], IP1, '')
-  21
-
-Of course, this means that None can't be registered. This is an
-exception to the statement, made earlier, that the registry doesn't
-care what gets registered.
-
-Multi-adapters
-==============
-
-You can adapt multiple specifications::
-
-  >>> registry.register([IR1, IQ], IP2, '', '1q2')
-  >>> registry.lookup([IR1, IQ], IP2, '')
-  '1q2'
-  >>> registry.lookup([IR2, IQ], IP1, '')
-  '1q2'
-
-  >>> class IS(zope.interface.Interface):
-  ...     pass
-  >>> registry.lookup([IR2, IS], IP1, '')
-
-  >>> class IQ2(IQ):
-  ...     pass
-
-  >>> registry.lookup([IR2, IQ2], IP1, '')
-  '1q2'
-
-  >>> registry.register([IR1, IQ2], IP2, '', '1q22')
-  >>> registry.lookup([IR2, IQ2], IP1, '')
-  '1q22'
-
-Multi-adaptation
-----------------
-
-You can adapt multiple objects::
-
-  >>> class Q:
-  ...     zope.interface.implements(IQ)
-
-As with single adapters, we register a factory, which is often a class::
-
-  >>> class IM(zope.interface.Interface):
-  ...     pass
-  >>> class M:
-  ...     zope.interface.implements(IM)
-  ...     def __init__(self, x, q):
-  ...         self.x, self.q = x, q
-  >>> registry.register([IR, IQ], IM, '', M)
-
-And then we can call `queryMultiAdapter` to compute an adapter::
-
-  >>> q = Q()
-  >>> m = registry.queryMultiAdapter((x, q), IM)
-  >>> m.__class__.__name__
-  'M'
-  >>> m.x is x and m.q is q
-  True
-
-and, of course, we can use names::
-
-  >>> class M2(M):
-  ...     pass
-  >>> registry.register([IR, IQ], IM, 'bob', M2)
-  >>> m = registry.queryMultiAdapter((x, q), IM, 'bob')
-  >>> m.__class__.__name__
-  'M2'
-  >>> m.x is x and m.q is q
-  True
-  
-Default Adapters
-----------------
-
-As with single adapters, you can define default adapters by specifying
-None for the *first* specification::
-
-  >>> registry.register([None, IQ], IP2, '', 'q2')
-  >>> registry.lookup([IS, IQ], IP2, '')
-  'q2'
-
-Null Adapters
-=============
-
-You can also adapt no specification::
-
-  >>> registry.register([], IP2, '', 2)
-  >>> registry.lookup([], IP2, '')
-  2
-  >>> registry.lookup([], IP1, '')
-  2
-
-Listing named adapters
-----------------------
-
-Adapters are named. Sometimes, it's useful to get all of the named
-adapters for given interfaces::
-
-  >>> adapters = list(registry.lookupAll([IR1], IP1))
-  >>> adapters.sort()
-  >>> adapters
-  [(u'', 11), (u'bob', "Bob's 12")]
-
-This works for multi-adapters too::
-
-  >>> registry.register([IR1, IQ2], IP2, 'bob', '1q2 for bob')
-  >>> adapters = list(registry.lookupAll([IR2, IQ2], IP1))
-  >>> adapters.sort()
-  >>> adapters
-  [(u'', '1q22'), (u'bob', '1q2 for bob')]
-
-And even null adapters::
-
-  >>> registry.register([], IP2, 'bob', 3)
-  >>> adapters = list(registry.lookupAll([], IP1))
-  >>> adapters.sort()
-  >>> adapters
-  [(u'', 2), (u'bob', 3)]
-
-Subscriptions
-=============
-
-Normally, we want to look up an object that most-closely matches a
-specification.  Sometimes, we want to get all of the objects that
-match some specification.  We use subscriptions for this.  We
-subscribe objects against specifications and then later find all of
-the subscribed objects::
-
-  >>> registry.subscribe([IR1], IP2, 'sub12 1')
-  >>> registry.subscriptions([IR1], IP2)
-  ['sub12 1']
-
-Note that, unlike regular adapters, subscriptions are unnamed.
-
-The order of returned subscriptions is not specified.
-
-You can have multiple subscribers for the same specification::
-
-  >>> registry.subscribe([IR1], IP2, 'sub12 2')
-  >>> subs = registry.subscriptions([IR1], IP2)
-  >>> subs.sort()
-  >>> subs
-  ['sub12 1', 'sub12 2']
-
-You can register subscribers for all specifications using None::
-
-  >>> registry.subscribe([None], IP1, 'sub_1')
-  >>> subs = registry.subscriptions([IR2], IP1)
-  >>> subs.sort()
-  >>> subs
-  ['sub12 1', 'sub12 2', 'sub_1']
-
-Subscriptions may be combined over multiple compatible specifications::
-
-  >>> subs = registry.subscriptions([IR2], IP1)
-  >>> subs.sort()
-  >>> subs
-  ['sub12 1', 'sub12 2', 'sub_1']
-  >>> registry.subscribe([IR1], IP1, 'sub11')
-  >>> subs = registry.subscriptions([IR2], IP1)
-  >>> subs.sort()
-  >>> subs
-  ['sub11', 'sub12 1', 'sub12 2', 'sub_1']
-  >>> registry.subscribe([IR2], IP2, 'sub22')
-  >>> subs = registry.subscriptions([IR2], IP1)
-  >>> subs.sort()
-  >>> subs
-  ['sub11', 'sub12 1', 'sub12 2', 'sub22', 'sub_1']
-  >>> subs = registry.subscriptions([IR2], IP2)
-  >>> subs.sort()
-  >>> subs
-  ['sub12 1', 'sub12 2', 'sub22']
-
-Subscriptions can be on multiple specifications::
-
-  >>> registry.subscribe([IR1, IQ], IP2, 'sub1q2')
-  >>> registry.subscriptions([IR1, IQ], IP2)
-  ['sub1q2']
-  
-As with single subscriptions and non-subscription adapters, you can
-specify None for the first required interface, to specify a default::
-
-  >>> registry.subscribe([None, IQ], IP2, 'sub_q2')
-  >>> registry.subscriptions([IS, IQ], IP2)
-  ['sub_q2']
-  >>> subs = registry.subscriptions([IR1, IQ], IP2)
-  >>> subs.sort()
-  >>> subs
-  ['sub1q2', 'sub_q2']
-
-You can have subscriptions that are indepenent of any specifications::
-  
-  >>> registry.subscriptions([], IP1)
-  []
-
-  >>> registry.subscribe([], IP2, 'sub2')
-  >>> registry.subscriptions([], IP1)
-  ['sub2']
-  >>> registry.subscribe([], IP1, 'sub1')
-  >>> subs = registry.subscriptions([], IP1)
-  >>> subs.sort()
-  >>> subs
-  ['sub1', 'sub2']
-  >>> registry.subscriptions([], IP2)
-  ['sub2']
-
-
-Subscription adapters
----------------------
-
-We normally register adapter factories, which then allow us to compute
-adapters, but with subscriptions, we get multiple adapters.  Here's an
-example of multiple-object subscribers::
-
-  >>> registry.subscribe([IR, IQ], IM, M)
-  >>> registry.subscribe([IR, IQ], IM, M2)
-
-  >>> subscribers = registry.subscribers((x, q), IM)
-  >>> len(subscribers)
-  2
-  >>> class_names = [s.__class__.__name__ for s in subscribers]
-  >>> class_names.sort()
-  >>> class_names
-  ['M', 'M2']
-  >>> [(s.x is x and s.q is q) for s in subscribers]
-  [True, True]
-
-
-Handlers
---------
-
-A handler is a subscriber factory that doesn't produce any normal
-output.  It returns None.  A handler is unlike adapters in that it does
-all of its work when the factory is called.
-
-To register a handler, simply provide None as the provided interface::
-
-  >>> def handler(event):
-  ...     print 'handler', event
-
-  >>> registry.subscribe([IR1], None, handler)
-  >>> registry.subscriptions([IR1], None) == [handler]
-  True
diff --git a/branches/bug1734/src/zope/interface/advice.py b/branches/bug1734/src/zope/interface/advice.py
deleted file mode 100644
index 6a06167a..00000000
--- a/branches/bug1734/src/zope/interface/advice.py
+++ /dev/null
@@ -1,192 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Class advice.
-
-This module was adapted from 'protocols.advice', part of the Python
-Enterprise Application Kit (PEAK).  Please notify the PEAK authors
-(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
-Zope-specific changes are required, so that the PEAK version of this module
-can be kept in sync.
-
-PEAK is a Python application framework that interoperates with (but does
-not require) Zope 3 and Twisted.  It provides tools for manipulating UML
-models, object-relational persistence, aspect-oriented programming, and more.
-Visit the PEAK home page at http://peak.telecommunity.com for more information.
-
-$Id$
-"""
-
-from types import ClassType, FunctionType
-import sys
-
-def getFrameInfo(frame):
-    """Return (kind,module,locals,globals) for a frame
-
-    'kind' is one of "exec", "module", "class", "function call", or "unknown".
-    """
-
-    f_locals = frame.f_locals
-    f_globals = frame.f_globals
-
-    sameNamespace = f_locals is f_globals
-    hasModule = '__module__' in f_locals
-    hasName = '__name__' in f_globals
-
-    sameName = hasModule and hasName
-    sameName = sameName and f_globals['__name__']==f_locals['__module__']
-
-    module = hasName and sys.modules.get(f_globals['__name__']) or None
-
-    namespaceIsModule = module and module.__dict__ is f_globals
-
-    if not namespaceIsModule:
-        # some kind of funky exec
-        kind = "exec"
-    elif sameNamespace and not hasModule:
-        kind = "module"
-    elif sameName and not sameNamespace:
-        kind = "class"
-    elif not sameNamespace:
-        kind = "function call"
-    else:
-        # How can you have f_locals is f_globals, and have '__module__' set?
-        # This is probably module-level code, but with a '__module__' variable.
-        kind = "unknown"
-    return kind, module, f_locals, f_globals
-
-
-def addClassAdvisor(callback, depth=2):
-    """Set up 'callback' to be passed the containing class upon creation
-
-    This function is designed to be called by an "advising" function executed
-    in a class suite.  The "advising" function supplies a callback that it
-    wishes to have executed when the containing class is created.  The
-    callback will be given one argument: the newly created containing class.
-    The return value of the callback will be used in place of the class, so
-    the callback should return the input if it does not wish to replace the
-    class.
-
-    The optional 'depth' argument to this function determines the number of
-    frames between this function and the targeted class suite.  'depth'
-    defaults to 2, since this skips this function's frame and one calling
-    function frame.  If you use this function from a function called directly
-    in the class suite, the default will be correct, otherwise you will need
-    to determine the correct depth yourself.
-
-    This function works by installing a special class factory function in
-    place of the '__metaclass__' of the containing class.  Therefore, only
-    callbacks *after* the last '__metaclass__' assignment in the containing
-    class will be executed.  Be sure that classes using "advising" functions
-    declare any '__metaclass__' *first*, to ensure all callbacks are run."""
-
-    frame = sys._getframe(depth)
-    kind, module, caller_locals, caller_globals = getFrameInfo(frame)
-
-    # This causes a problem when zope interfaces are used from doctest.
-    # In these cases, kind == "exec".
-    #
-    #if kind != "class":
-    #    raise SyntaxError(
-    #        "Advice must be in the body of a class statement"
-    #    )
-
-    previousMetaclass = caller_locals.get('__metaclass__')
-    defaultMetaclass  = caller_globals.get('__metaclass__', ClassType)
-
-
-    def advise(name, bases, cdict):
-
-        if '__metaclass__' in cdict:
-            del cdict['__metaclass__']
-
-        if previousMetaclass is None:
-             if bases:
-                 # find best metaclass or use global __metaclass__ if no bases
-                 meta = determineMetaclass(bases)
-             else:
-                 meta = defaultMetaclass
-
-        elif isClassAdvisor(previousMetaclass):
-            # special case: we can't compute the "true" metaclass here,
-            # so we need to invoke the previous metaclass and let it
-            # figure it out for us (and apply its own advice in the process)
-            meta = previousMetaclass
-
-        else:
-            meta = determineMetaclass(bases, previousMetaclass)
-
-        newClass = meta(name,bases,cdict)
-
-        # this lets the callback replace the class completely, if it wants to
-        return callback(newClass)
-
-    # introspection data only, not used by inner function
-    advise.previousMetaclass = previousMetaclass
-    advise.callback = callback
-
-    # install the advisor
-    caller_locals['__metaclass__'] = advise
-
-
-def isClassAdvisor(ob):
-    """True if 'ob' is a class advisor function"""
-    return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
-
-
-def determineMetaclass(bases, explicit_mc=None):
-    """Determine metaclass from 1+ bases and optional explicit __metaclass__"""
-
-    meta = [getattr(b,'__class__',type(b)) for b in bases]
-
-    if explicit_mc is not None:
-        # The explicit metaclass needs to be verified for compatibility
-        # as well, and allowed to resolve the incompatible bases, if any
-        meta.append(explicit_mc)
-
-    if len(meta)==1:
-        # easy case
-        return meta[0]
-
-    candidates = minimalBases(meta) # minimal set of metaclasses
-
-    if not candidates:
-        # they're all "classic" classes
-        return ClassType
-
-    elif len(candidates)>1:
-        # We could auto-combine, but for now we won't...
-        raise TypeError("Incompatible metatypes",bases)
-
-    # Just one, return it
-    return candidates[0]
-
-
-def minimalBases(classes):
-    """Reduce a list of base classes to its ordered minimum equivalent"""
-
-    classes = [c for c in classes if c is not ClassType]
-    candidates = []
-
-    for m in classes:
-        for n in classes:
-            if issubclass(n,m) and m is not n:
-                break
-        else:
-            # m has no subclasses in 'classes'
-            if m in candidates:
-                candidates.remove(m)    # ensure that we're later in the list
-            candidates.append(m)
-
-    return candidates
-
diff --git a/branches/bug1734/src/zope/interface/common/__init__.py b/branches/bug1734/src/zope/interface/common/__init__.py
deleted file mode 100644
index b711d360..00000000
--- a/branches/bug1734/src/zope/interface/common/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-#
-# This file is necessary to make this directory a package.
diff --git a/branches/bug1734/src/zope/interface/common/idatetime.py b/branches/bug1734/src/zope/interface/common/idatetime.py
deleted file mode 100644
index f0db2eb0..00000000
--- a/branches/bug1734/src/zope/interface/common/idatetime.py
+++ /dev/null
@@ -1,577 +0,0 @@
-##############################################################################
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-# 
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-##############################################################################
-"""Datetime interfaces.
-
-This module is called idatetime because if it were called datetime the import
-of the real datetime would fail.
-
-$Id$
-"""
-
-from zope.interface import Interface, Attribute
-from zope.interface import classImplements, directlyProvides
-
-from datetime import timedelta, date, datetime, time, tzinfo
-
-
-class ITimeDeltaClass(Interface):
-    """This is the timedelta class interface."""
-
-    min = Attribute("The most negative timedelta object")
-
-    max = Attribute("The most positive timedelta object")
-
-    resolution = Attribute(
-        "The smallest difference between non-equal timedelta objects")
-
-
-class ITimeDelta(ITimeDeltaClass):
-    """Represent the difference between two datetime objects.
-
-    Supported operators:
-
-    - add, subtract timedelta
-    - unary plus, minus, abs
-    - compare to timedelta
-    - multiply, divide by int/long
-
-    In addition, datetime supports subtraction of two datetime objects
-    returning a timedelta, and addition or subtraction of a datetime
-    and a timedelta giving a datetime.
-
-    Representation: (days, seconds, microseconds).
-    """
-
-    days = Attribute("Days between -999999999 and 999999999 inclusive")
-
-    seconds = Attribute("Seconds between 0 and 86399 inclusive")
-
-    microseconds = Attribute("Microseconds between 0 and 999999 inclusive")
-
-
-class IDateClass(Interface):
-    """This is the date class interface."""
-
-    min = Attribute("The earliest representable date")
-
-    max = Attribute("The latest representable date")
-
-    resolution = Attribute(
-        "The smallest difference between non-equal date objects")
-
-    def today():
-        """Return the current local time.
-
-        This is equivalent to date.fromtimestamp(time.time())"""
-
-    def fromtimestamp(timestamp):
-        """Return the local date from a POSIX timestamp (like time.time())
-
-        This may raise ValueError, if the timestamp is out of the range of
-        values supported by the platform C localtime() function. It's common
-        for this to be restricted to years from 1970 through 2038. Note that
-        on non-POSIX systems that include leap seconds in their notion of a
-        timestamp, leap seconds are ignored by fromtimestamp().
-        """
-
-    def fromordinal(ordinal):
-        """Return the date corresponding to the proleptic Gregorian ordinal.
-
-         January 1 of year 1 has ordinal 1. ValueError is raised unless
-         1 <= ordinal <= date.max.toordinal().
-         For any date d, date.fromordinal(d.toordinal()) == d.
-         """
-
-
-class IDate(IDateClass):
-    """Represents a date (year, month and day) in an idealized calendar.
-
-    Operators:
-
-    __repr__, __str__
-    __cmp__, __hash__
-    __add__, __radd__, __sub__ (add/radd only with timedelta arg)
-    """
-
-    year = Attribute("Between MINYEAR and MAXYEAR inclusive.")
-
-    month = Attribute("Between 1 and 12 inclusive")
-
-    day = Attribute(
-        "Between 1 and the number of days in the given month of the given year.")
-
-    def replace(year, month, day):
-        """Return a date with the same value.
-
-        Except for those members given new values by whichever keyword
-        arguments are specified. For example, if d == date(2002, 12, 31), then
-        d.replace(day=26) == date(2000, 12, 26). 
-        """
-
-    def timetuple():
-        """Return a 9-element tuple of the form returned by time.localtime().
-
-        The hours, minutes and seconds are 0, and the DST flag is -1.
-        d.timetuple() is equivalent to
-        (d.year, d.month, d.day, 0, 0, 0, d.weekday(), d.toordinal() -
-        date(d.year, 1, 1).toordinal() + 1, -1)
-        """
-
-    def toordinal():
-        """Return the proleptic Gregorian ordinal of the date
-
-        January 1 of year 1 has ordinal 1. For any date object d,
-        date.fromordinal(d.toordinal()) == d.
-        """
-
-    def weekday():
-        """Return the day of the week as an integer.
-
-        Monday is 0 and Sunday is 6. For example,
-        date(2002, 12, 4).weekday() == 2, a Wednesday.
-
-        See also isoweekday().
-        """
-
-    def isoweekday():
-        """Return the day of the week as an integer.
-
-        Monday is 1 and Sunday is 7. For example,
-        date(2002, 12, 4).isoweekday() == 3, a Wednesday.
-
-        See also weekday(), isocalendar().
-        """
-
-    def isocalendar():
-        """Return a 3-tuple, (ISO year, ISO week number, ISO weekday).
-
-        The ISO calendar is a widely used variant of the Gregorian calendar.
-        See http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm for a good
-        explanation.
-
-        The ISO year consists of 52 or 53 full weeks, and where a week starts
-        on a Monday and ends on a Sunday. The first week of an ISO year is the
-        first (Gregorian) calendar week of a year containing a Thursday. This
-        is called week number 1, and the ISO year of that Thursday is the same
-        as its Gregorian year.
-
-        For example, 2004 begins on a Thursday, so the first week of ISO year
-        2004 begins on Monday, 29 Dec 2003 and ends on Sunday, 4 Jan 2004, so
-        that date(2003, 12, 29).isocalendar() == (2004, 1, 1) and
-        date(2004, 1, 4).isocalendar() == (2004, 1, 7).
-        """
-
-    def isoformat():
-        """Return a string representing the date in ISO 8601 format.
-
-        This is 'YYYY-MM-DD'.
-        For example, date(2002, 12, 4).isoformat() == '2002-12-04'.
-        """
-
-    def __str__():
-        """For a date d, str(d) is equivalent to d.isoformat()."""
-
-    def ctime():
-        """Return a string representing the date.
-
-        For example date(2002, 12, 4).ctime() == 'Wed Dec 4 00:00:00 2002'.
-        d.ctime() is equivalent to time.ctime(time.mktime(d.timetuple()))
-        on platforms where the native C ctime() function
-        (which time.ctime() invokes, but which date.ctime() does not invoke)
-        conforms to the C standard.
-        """
-
-    def strftime(format):
-        """Return a string representing the date.
-
-        Controlled by an explicit format string. Format codes referring to
-        hours, minutes or seconds will see 0 values.
-        """
-
-
-class IDateTimeClass(Interface):
-    """This is the datetime class interface."""
-
-    min = Attribute("The earliest representable datetime")
-
-    max = Attribute("The latest representable datetime")
-
-    resolution = Attribute(
-        "The smallest possible difference between non-equal datetime objects")
-
-    def today():
-        """Return the current local datetime, with tzinfo None.
-
-        This is equivalent to datetime.fromtimestamp(time.time()).
-        See also now(), fromtimestamp().
-        """
-
-    def now(tz=None):
-        """Return the current local date and time.
-
-        If optional argument tz is None or not specified, this is like today(),
-        but, if possible, supplies more precision than can be gotten from going
-        through a time.time() timestamp (for example, this may be possible on
-        platforms supplying the C gettimeofday() function).
-
-        Else tz must be an instance of a class tzinfo subclass, and the current
-        date and time are converted to tz's time zone. In this case the result
-        is equivalent to tz.fromutc(datetime.utcnow().replace(tzinfo=tz)).
-
-        See also today(), utcnow().
-        """
-
-    def utcnow():
-        """Return the current UTC date and time, with tzinfo None.
-
-        This is like now(), but returns the current UTC date and time, as a
-        naive datetime object. 
-
-        See also now().
-        """
-
-    def fromtimestamp(timestamp, tz=None):
-        """Return the local date and time corresponding to the POSIX timestamp.
-
-        Same as is returned by time.time(). If optional argument tz is None or
-        not specified, the timestamp is converted to the platform's local date
-        and time, and the returned datetime object is naive.
-
-        Else tz must be an instance of a class tzinfo subclass, and the
-        timestamp is converted to tz's time zone. In this case the result is
-        equivalent to
-        tz.fromutc(datetime.utcfromtimestamp(timestamp).replace(tzinfo=tz)).
-
-        fromtimestamp() may raise ValueError, if the timestamp is out of the
-        range of values supported by the platform C localtime() or gmtime()
-        functions. It's common for this to be restricted to years in 1970
-        through 2038. Note that on non-POSIX systems that include leap seconds
-        in their notion of a timestamp, leap seconds are ignored by
-        fromtimestamp(), and then it's possible to have two timestamps
-        differing by a second that yield identical datetime objects.
-
-        See also utcfromtimestamp().
-        """
-
-    def utcfromtimestamp(timestamp):
-        """Return the UTC datetime from the POSIX timestamp with tzinfo None.
-
-        This may raise ValueError, if the timestamp is out of the range of
-        values supported by the platform C gmtime() function. It's common for
-        this to be restricted to years in 1970 through 2038.
-
-        See also fromtimestamp().
-        """
-
-    def fromordinal(ordinal):
-        """Return the datetime from the proleptic Gregorian ordinal.
-
-        January 1 of year 1 has ordinal 1. ValueError is raised unless
-        1 <= ordinal <= datetime.max.toordinal().
-        The hour, minute, second and microsecond of the result are all 0, and
-        tzinfo is None.
-        """
-
-    def combine(date, time):
-        """Return a new datetime object.
-
-        Its date members are equal to the given date object's, and whose time
-        and tzinfo members are equal to the given time object's. For any
-        datetime object d, d == datetime.combine(d.date(), d.timetz()).
-        If date is a datetime object, its time and tzinfo members are ignored.
-        """
-
-
-class IDateTime(IDate, IDateTimeClass):
-    """Object contains all the information from a date object and a time object.
-    """
-
-    year = Attribute("Year between MINYEAR and MAXYEAR inclusive")
-
-    month = Attribute("Month between 1 and 12 inclusive")
-
-    day = Attribute(
-        "Day between 1 and the number of days in the given month of the year")
-
-    hour = Attribute("Hour in range(24)")
-
-    minute = Attribute("Minute in range(60)")
-
-    second = Attribute("Second in range(60)")
-
-    microsecond = Attribute("Microsecond in range(1000000)")
-
-    tzinfo = Attribute(
-        """The object passed as the tzinfo argument to the datetime constructor
-        or None if none was passed""")
-
-    def date():
-         """Return date object with same year, month and day."""
-
-    def time():
-        """Return time object with same hour, minute, second, microsecond.
-
-        tzinfo is None. See also method timetz().
-        """
-
-    def timetz():
-        """Return time object with same hour, minute, second, microsecond,
-        and tzinfo.
-
-        See also method time().
-        """
-
-    def replace(year, month, day, hour, minute, second, microsecond, tzinfo):
-        """Return a datetime with the same members, except for those members
-        given new values by whichever keyword arguments are specified.
-
-        Note that tzinfo=None can be specified to create a naive datetime from
-        an aware datetime with no conversion of date and time members.
-        """
-
-    def astimezone(tz):
-        """Return a datetime object with new tzinfo member tz, adjusting the
-        date and time members so the result is the same UTC time as self, but
-        in tz's local time.
-
-        tz must be an instance of a tzinfo subclass, and its utcoffset() and
-        dst() methods must not return None. self must be aware (self.tzinfo
-        must not be None, and self.utcoffset() must not return None).
-
-        If self.tzinfo is tz, self.astimezone(tz) is equal to self: no
-        adjustment of date or time members is performed. Else the result is
-        local time in time zone tz, representing the same UTC time as self:
-            after astz = dt.astimezone(tz), astz - astz.utcoffset()
-        will usually have the same date and time members as dt - dt.utcoffset().
-        The discussion of class tzinfo explains the cases at Daylight Saving
-        Time transition boundaries where this cannot be achieved (an issue only
-        if tz models both standard and daylight time).
-
-        If you merely want to attach a time zone object tz to a datetime dt
-        without adjustment of date and time members, use dt.replace(tzinfo=tz).
-        If you merely want to remove the time zone object from an aware
-        datetime dt without conversion of date and time members, use 
-        dt.replace(tzinfo=None).
-
-        Note that the default tzinfo.fromutc() method can be overridden in a
-        tzinfo subclass to effect the result returned by astimezone().
-        """
-
-    def utcoffset():
-        """Return the timezone offset in minutes east of UTC (negative west of
-        UTC)."""
-
-    def dst():
-        """Return 0 if DST is not in effect, or the DST offset (in minutes
-        eastward) if DST is in effect.
-        """
-
-    def tzname():
-        """Return the timezone name."""
-
-    def timetuple():
-        """Return a 9-element tuple of the form returned by time.localtime()."""
-
-    def utctimetuple():
-        """Return UTC time tuple compatilble with time.gmtimr()."""
-
-    def toordinal():
-        """Return the proleptic Gregorian ordinal of the date.
-
-        The same as self.date().toordinal().
-        """
-
-    def weekday():
-        """Return the day of the week as an integer.
-
-        Monday is 0 and Sunday is 6. The same as self.date().weekday().
-        See also isoweekday().
-        """
-
-    def isoweekday():
-        """Return the day of the week as an integer.
-
-        Monday is 1 and Sunday is 7. The same as self.date().isoweekday.
-        See also weekday(), isocalendar().
-        """
-
-    def isocalendar():
-        """Return a 3-tuple, (ISO year, ISO week number, ISO weekday).
-
-        The same as self.date().isocalendar().
-        """
-
-    def isoformat(sep='T'):
-        """Return a string representing the date and time in ISO 8601 format.
-
-        YYYY-MM-DDTHH:MM:SS.mmmmmm or YYYY-MM-DDTHH:MM:SS if microsecond is 0
-
-        If utcoffset() does not return None, a 6-character string is appended,
-        giving the UTC offset in (signed) hours and minutes:
-
-        YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM or YYYY-MM-DDTHH:MM:SS+HH:MM
-        if microsecond is 0.
-
-        The optional argument sep (default 'T') is a one-character separator,
-        placed between the date and time portions of the result.
-        """
-
-    def __str__():
-        """For a datetime instance d, str(d) is equivalent to d.isoformat(' ').
-        """
-
-    def ctime():
-        """Return a string representing the date and time.
-
-        datetime(2002, 12, 4, 20, 30, 40).ctime() == 'Wed Dec 4 20:30:40 2002'.
-        d.ctime() is equivalent to time.ctime(time.mktime(d.timetuple())) on
-        platforms where the native C ctime() function (which time.ctime()
-        invokes, but which datetime.ctime() does not invoke) conforms to the
-        C standard.
-        """
-
-    def strftime(format):
-        """Return a string representing the date and time.
-
-        This is controlled by an explicit format string.
-        """
-
-
-class ITimeClass(Interface):
-    """This is the time class interface."""
-
-    min = Attribute("The earliest representable time")
-
-    max = Attribute("The latest representable time")
-
-    resolution = Attribute(
-        "The smallest possible difference between non-equal time objects")
-
-
-class ITime(ITimeClass):
-    """Represent time with time zone.
-
-    Operators:
-
-    __repr__, __str__
-    __cmp__, __hash__
-    """
-
-    hour = Attribute("Hour in range(24)")
-
-    minute = Attribute("Minute in range(60)")
-
-    second = Attribute("Second in range(60)")
-
-    microsecond = Attribute("Microsecond in range(1000000)")
-
-    tzinfo = Attribute(
-        """The object passed as the tzinfo argument to the time constructor
-        or None if none was passed.""")
-
-    def replace(hour, minute, second, microsecond, tzinfo):
-        """Return a time with the same value.
-
-        Except for those members given new values by whichever keyword
-        arguments are specified. Note that tzinfo=None can be specified
-        to create a naive time from an aware time, without conversion of the
-        time members.
-        """
-
-    def isoformat():
-        """Return a string representing the time in ISO 8601 format.
-
-        That is HH:MM:SS.mmmmmm or, if self.microsecond is 0, HH:MM:SS
-        If utcoffset() does not return None, a 6-character string is appended,
-        giving the UTC offset in (signed) hours and minutes:
-        HH:MM:SS.mmmmmm+HH:MM or, if self.microsecond is 0, HH:MM:SS+HH:MM
-        """
-
-    def __str__():
-        """For a time t, str(t) is equivalent to t.isoformat()."""
-
-    def strftime(format):
-        """Return a string representing the time.
-
-        This is controlled by an explicit format string.
-        """
-
-    def utcoffset():
-        """Return the timezone offset in minutes east of UTC (negative west of
-        UTC).
-
-        If tzinfo is None, returns None, else returns
-        self.tzinfo.utcoffset(None), and raises an exception if the latter
-        doesn't return None or a timedelta object representing a whole number
-        of minutes with magnitude less than one day.
-        """
-
-    def dst():
-        """Return 0 if DST is not in effect, or the DST offset (in minutes
-        eastward) if DST is in effect.
-
-        If tzinfo is None, returns None, else returns self.tzinfo.dst(None),
-        and raises an exception if the latter doesn't return None, or a
-        timedelta object representing a whole number of minutes with
-        magnitude less than one day.
-        """
-
-    def tzname():
-        """Return the timezone name.
-
-        If tzinfo is None, returns None, else returns self.tzinfo.tzname(None),
-        or raises an exception if the latter doesn't return None or a string
-        object.
-        """
-
-
-class ITZInfo(Interface):
-    """Time zone info class.
-    """
-
-    def utcoffset(dt):
-        """Return offset of local time from UTC, in minutes east of UTC.
-
-        If local time is west of UTC, this should be negative.
-        Note that this is intended to be the total offset from UTC;
-        for example, if a tzinfo object represents both time zone and DST
-        adjustments, utcoffset() should return their sum. If the UTC offset
-        isn't known, return None. Else the value returned must be a timedelta
-        object specifying a whole number of minutes in the range -1439 to 1439
-        inclusive (1440 = 24*60; the magnitude of the offset must be less
-        than one day).
-        """
-
-    def dst(dt):
-        """Return the daylight saving time (DST) adjustment, in minutes east
-        of UTC, or None if DST information isn't known.
-        """
-
-    def tzname(dt):
-        """Return the time zone name corresponding to the datetime object as
-        a string.
-        """
-
-    def fromutc(dt):
-        """Return an equivalent datetime in self's local time."""
-
-
-classImplements(timedelta, ITimeDelta)
-classImplements(date, IDate)
-classImplements(datetime, IDateTime)
-classImplements(time, ITime)
-classImplements(tzinfo, ITZInfo)
-
-## directlyProvides(timedelta, ITimeDeltaClass)
-## directlyProvides(date, IDateClass)
-## directlyProvides(datetime, IDateTimeClass)
-## directlyProvides(time, ITimeClass)
diff --git a/branches/bug1734/src/zope/interface/common/interfaces.py b/branches/bug1734/src/zope/interface/common/interfaces.py
deleted file mode 100644
index 345320b4..00000000
--- a/branches/bug1734/src/zope/interface/common/interfaces.py
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interfaces for standard python exceptions
-
-$Id$
-"""
-from zope.interface import Interface
-from zope.interface import classImplements
-
-class IException(Interface): pass
-class IStandardError(IException): pass
-class IWarning(IException): pass
-class ISyntaxError(IStandardError): pass
-class ILookupError(IStandardError): pass
-class IValueError(IStandardError): pass
-class IRuntimeError(IStandardError): pass
-class IArithmeticError(IStandardError): pass
-class IAssertionError(IStandardError): pass
-class IAttributeError(IStandardError): pass
-class IDeprecationWarning(IWarning): pass
-class IEOFError(IStandardError): pass
-class IEnvironmentError(IStandardError): pass
-class IFloatingPointError(IArithmeticError): pass
-class IIOError(IEnvironmentError): pass
-class IImportError(IStandardError): pass
-class IIndentationError(ISyntaxError): pass
-class IIndexError(ILookupError): pass
-class IKeyError(ILookupError): pass
-class IKeyboardInterrupt(IStandardError): pass
-class IMemoryError(IStandardError): pass
-class INameError(IStandardError): pass
-class INotImplementedError(IRuntimeError): pass
-class IOSError(IEnvironmentError): pass
-class IOverflowError(IArithmeticError): pass
-class IOverflowWarning(IWarning): pass
-class IReferenceError(IStandardError): pass
-class IRuntimeWarning(IWarning): pass
-class IStopIteration(IException): pass
-class ISyntaxWarning(IWarning): pass
-class ISystemError(IStandardError): pass
-class ISystemExit(IException): pass
-class ITabError(IIndentationError): pass
-class ITypeError(IStandardError): pass
-class IUnboundLocalError(INameError): pass
-class IUnicodeError(IValueError): pass
-class IUserWarning(IWarning): pass
-class IZeroDivisionError(IArithmeticError): pass
-
-classImplements(ArithmeticError, IArithmeticError)
-classImplements(AssertionError, IAssertionError)
-classImplements(AttributeError, IAttributeError)
-classImplements(DeprecationWarning, IDeprecationWarning)
-classImplements(EnvironmentError, IEnvironmentError)
-classImplements(EOFError, IEOFError)
-classImplements(Exception, IException)
-classImplements(FloatingPointError, IFloatingPointError)
-classImplements(ImportError, IImportError)
-classImplements(IndentationError, IIndentationError)
-classImplements(IndexError, IIndexError)
-classImplements(IOError, IIOError)
-classImplements(KeyboardInterrupt, IKeyboardInterrupt)
-classImplements(KeyError, IKeyError)
-classImplements(LookupError, ILookupError)
-classImplements(MemoryError, IMemoryError)
-classImplements(NameError, INameError)
-classImplements(NotImplementedError, INotImplementedError)
-classImplements(OSError, IOSError)
-classImplements(OverflowError, IOverflowError)
-classImplements(OverflowWarning, IOverflowWarning)
-classImplements(ReferenceError, IReferenceError)
-classImplements(RuntimeError, IRuntimeError)
-classImplements(RuntimeWarning, IRuntimeWarning)
-classImplements(StandardError, IStandardError)
-classImplements(StopIteration, IStopIteration)
-classImplements(SyntaxError, ISyntaxError)
-classImplements(SyntaxWarning, ISyntaxWarning)
-classImplements(SystemError, ISystemError)
-classImplements(SystemExit, ISystemExit)
-classImplements(TabError, ITabError)
-classImplements(TypeError, ITypeError)
-classImplements(UnboundLocalError, IUnboundLocalError)
-classImplements(UnicodeError, IUnicodeError)
-classImplements(UserWarning, IUserWarning)
-classImplements(ValueError, IValueError)
-classImplements(Warning, IWarning)
-classImplements(ZeroDivisionError, IZeroDivisionError)
-
diff --git a/branches/bug1734/src/zope/interface/common/mapping.py b/branches/bug1734/src/zope/interface/common/mapping.py
deleted file mode 100644
index 8779dab8..00000000
--- a/branches/bug1734/src/zope/interface/common/mapping.py
+++ /dev/null
@@ -1,127 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Mapping Interfaces
-
-$Id$
-"""
-from zope.interface import Interface
-
-class IItemMapping(Interface):
-    """Simplest readable mapping object
-    """
-
-    def __getitem__(key):
-        """Get a value for a key
-
-        A KeyError is raised if there is no value for the key.
-        """
-
-
-class IReadMapping(IItemMapping):
-    """Basic mapping interface
-    """
-
-    def get(key, default=None):
-        """Get a value for a key
-
-        The default is returned if there is no value for the key.
-        """
-
-    def __contains__(key):
-        """Tell if a key exists in the mapping."""
-
-
-class IWriteMapping(Interface):
-    """Mapping methods for changing data"""
-    
-    def __delitem__(key):
-        """Delete a value from the mapping using the key."""
-
-    def __setitem__(key, value):
-        """Set a new item in the mapping."""
-        
-
-class IEnumerableMapping(IReadMapping):
-    """Mapping objects whose items can be enumerated.
-    """
-
-    def keys():
-        """Return the keys of the mapping object.
-        """
-
-    def __iter__():
-        """Return an iterator for the keys of the mapping object.
-        """
-
-    def values():
-        """Return the values of the mapping object.
-        """
-
-    def items():
-        """Return the items of the mapping object.
-        """
-
-    def __len__():
-        """Return the number of items.
-        """
-
-class IMapping(IWriteMapping, IEnumerableMapping):
-    ''' Simple mapping interface '''
-
-class IIterableMapping(IEnumerableMapping):
-
-    def iterkeys():
-        "iterate over keys; equivalent to __iter__"
-
-    def itervalues():
-        "iterate over values"
-
-    def iteritems():
-        "iterate over items"
-
-class IClonableMapping(Interface):
-    
-    def copy():
-        "return copy of dict"
-
-class IExtendedReadMapping(IIterableMapping):
-    
-    def has_key(key):
-        """Tell if a key exists in the mapping; equivalent to __contains__"""
-
-class IExtendedWriteMapping(IWriteMapping):
-    
-    def clear():
-        "delete all items"
-    
-    def update(d):
-        " Update D from E: for k in E.keys(): D[k] = E[k]"
-    
-    def setdefault(key, default=None):
-        "D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"
-    
-    def pop(k, *args):
-        """remove specified key and return the corresponding value
-        *args may contain a single default value, or may not be supplied.
-        If key is not found, default is returned if given, otherwise 
-        KeyError is raised"""
-    
-    def popitem():
-        """remove and return some (key, value) pair as a
-        2-tuple; but raise KeyError if mapping is empty"""
-
-class IFullMapping(
-    IExtendedReadMapping, IExtendedWriteMapping, IClonableMapping, IMapping):
-    ''' Full mapping interface ''' # IMapping included so tests for IMapping
-    # succeed with IFullMapping
diff --git a/branches/bug1734/src/zope/interface/common/sequence.py b/branches/bug1734/src/zope/interface/common/sequence.py
deleted file mode 100644
index 4fef9d7c..00000000
--- a/branches/bug1734/src/zope/interface/common/sequence.py
+++ /dev/null
@@ -1,129 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Sequence Interfaces
-
-$Id$
-"""
-from zope import interface
-
-class IReadSequence(interface.Interface):
-    "read interface shared by tuple and list"
-    
-    def __getitem__(index):
-        "x.__getitem__(index) <==> x[index]"
-
-    def __iter__():
-        "x.__iter__() <==> iter(x)"
-
-    def __contains__(item):
-        "x.__contains__(item) <==> item in x"
-
-    def __lt__(other):
-        "x.__lt__(other) <==> x<other"
-
-    def __le__(other):
-        "x.__le__(other) <==> x<=other"
-
-    def __eq__(other):
-        "x.__eq__(other) <==> x==other"
-
-    def __ne__(other):
-        "x.__ne__(other) <==> x!=other"
-
-    def __gt__(other):
-        "x.__gt__(other) <==> x>other"
-
-    def __ge__(other):
-        "x.__ge__(other) <==> x>=other"
-
-    def __len__():
-        "x.__len__() <==> len(x)"
-
-    def __add__(other):
-        "x.__add__(other) <==> x+other"
-
-    def __mul__(n):
-        "x.__mul__(n) <==> x*n"
-
-    def __rmul__(n):
-        "x.__rmul__(n) <==> n*x"
-    
-    def __getslice__(i, j):
-        """x.__getslice__(i, j) <==> x[i:j]
-
-        Use of negative indices is not supported.
-        """
-
-class IExtendedReadSequence(IReadSequence):
-    "Full read interface for lists"
-
-    def count(item):
-        "return number of occurrences of value"
-
-    def index(item, *args):
-        """return first index of value
-
-        L.index(value, [start, [stop]]) -> integer"""
-
-class IUniqueMemberWriteSequence(interface.Interface):
-    "The write contract for a sequence that may enforce unique members"
-
-    def __setitem__(index, item):
-        "x.__setitem__(index, item) <==> x[index]=item"
-
-    def __delitem__(index):
-        "x.__delitem__(index) <==> del x[index]"
-
-    def __setslice__(i, j, other):
-        """x.__setslice__(i, j, other) <==> x[i:j]=other
-
-        Use  of negative indices is not supported."""
-
-    def __delslice__(i, j):
-        """x.__delslice__(i, j) <==> del x[i:j]
-
-        Use of negative indices is not supported.
-        """
-    def __iadd__(y):
-        "x.__iadd__(y) <==> x+=y"
-
-    def append(item):
-        "append item to end"
-
-    def insert(index, item):
-        "insert item before index"
-
-    def pop(index=-1):
-        "remove and return item at index (default last)"
-
-    def remove(item):
-        "remove first occurrence of value"
-
-    def reverse():
-        "reverse *IN PLACE*"
-
-    def sort(cmpfunc=None):
-        """stable sort *IN PLACE*; cmpfunc(x, y) -> -1, 0, 1"""
-
-    def extend(iterable):
-        "extend list by appending elements from the iterable"
-
-class IWriteSequence(IUniqueMemberWriteSequence):
-    "Full write contract for sequences"
-
-    def __imul__(n):
-        "x.__imul__(n) <==> x*=n"
-
-class ISequence(IReadSequence, IWriteSequence):
-    "Full sequence contract"
diff --git a/branches/bug1734/src/zope/interface/common/tests/__init__.py b/branches/bug1734/src/zope/interface/common/tests/__init__.py
deleted file mode 100644
index b711d360..00000000
--- a/branches/bug1734/src/zope/interface/common/tests/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-#
-# This file is necessary to make this directory a package.
diff --git a/branches/bug1734/src/zope/interface/common/tests/basemapping.py b/branches/bug1734/src/zope/interface/common/tests/basemapping.py
deleted file mode 100644
index de03f54f..00000000
--- a/branches/bug1734/src/zope/interface/common/tests/basemapping.py
+++ /dev/null
@@ -1,115 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Base Mapping tests
-
-$Id$
-"""
-from operator import __getitem__
-
-def testIReadMapping(self, inst, state, absent):
-    for key in state:
-        self.assertEqual(inst[key], state[key])
-        self.assertEqual(inst.get(key, None), state[key])
-        self.failUnless(key in inst)
-
-    for key in absent:
-        self.assertEqual(inst.get(key, None), None)
-        self.assertEqual(inst.get(key), None)
-        self.assertEqual(inst.get(key, self), self)
-        self.assertRaises(KeyError, __getitem__, inst, key)
-
-
-def test_keys(self, inst, state):
-    # Return the keys of the mapping object
-    inst_keys = list(inst.keys()); inst_keys.sort()
-    state_keys = list(state.keys()) ; state_keys.sort()
-    self.assertEqual(inst_keys, state_keys)
-
-def test_iter(self, inst, state):
-    # Return the keys of the mapping object
-    inst_keys = list(inst); inst_keys.sort()
-    state_keys = list(state.keys()) ; state_keys.sort()
-    self.assertEqual(inst_keys, state_keys)
-
-def test_values(self, inst, state):
-    # Return the values of the mapping object
-    inst_values = list(inst.values()); inst_values.sort()
-    state_values = list(state.values()) ; state_values.sort()
-    self.assertEqual(inst_values, state_values)
-
-def test_items(self, inst, state):
-    # Return the items of the mapping object
-    inst_items = list(inst.items()); inst_items.sort()
-    state_items = list(state.items()) ; state_items.sort()
-    self.assertEqual(inst_items, state_items)
-
-def test___len__(self, inst, state):
-    # Return the number of items
-    self.assertEqual(len(inst), len(state))
-
-def testIEnumerableMapping(self, inst, state):
-    test_keys(self, inst, state)
-    test_items(self, inst, state)
-    test_values(self, inst, state)
-    test___len__(self, inst, state)
-
-
-class BaseTestIReadMapping(object):
-    def testIReadMapping(self):
-        inst = self._IReadMapping__sample()
-        state = self._IReadMapping__stateDict()
-        absent = self._IReadMapping__absentKeys()
-        testIReadMapping(self, inst, state, absent)
-
-
-class BaseTestIEnumerableMapping(BaseTestIReadMapping):
-    # Mapping objects whose items can be enumerated
-    def test_keys(self):
-        # Return the keys of the mapping object
-        inst = self._IEnumerableMapping__sample()
-        state = self._IEnumerableMapping__stateDict()
-        test_keys(self, inst, state)
-
-    def test_values(self):
-        # Return the values of the mapping object
-        inst = self._IEnumerableMapping__sample()
-        state = self._IEnumerableMapping__stateDict()
-        test_values(self, inst, state)
-
-    def test_values(self):
-        # Return the values of the mapping object
-        inst = self._IEnumerableMapping__sample()
-        state = self._IEnumerableMapping__stateDict()
-        test_iter(self, inst, state)
-
-    def test_items(self):
-        # Return the items of the mapping object
-        inst = self._IEnumerableMapping__sample()
-        state = self._IEnumerableMapping__stateDict()
-        test_items(self, inst, state)
-
-    def test___len__(self):
-        # Return the number of items
-        inst = self._IEnumerableMapping__sample()
-        state = self._IEnumerableMapping__stateDict()
-        test___len__(self, inst, state)
-
-    def _IReadMapping__stateDict(self):
-        return self._IEnumerableMapping__stateDict()
-
-    def _IReadMapping__sample(self):
-        return self._IEnumerableMapping__sample()
-
-    def _IReadMapping__absentKeys(self):
-        return self._IEnumerableMapping__absentKeys()
diff --git a/branches/bug1734/src/zope/interface/common/tests/test_idatetime.py b/branches/bug1734/src/zope/interface/common/tests/test_idatetime.py
deleted file mode 100644
index f84316a3..00000000
--- a/branches/bug1734/src/zope/interface/common/tests/test_idatetime.py
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test for datetime interfaces
-
-$Id$
-"""
-
-import unittest
-
-from zope.interface.verify import verifyObject, verifyClass
-from zope.interface.common.idatetime import ITimeDelta, ITimeDeltaClass
-from zope.interface.common.idatetime import IDate, IDateClass
-from zope.interface.common.idatetime import IDateTime, IDateTimeClass
-from zope.interface.common.idatetime import ITime, ITimeClass, ITZInfo
-from datetime import timedelta, date, datetime, time, tzinfo
-
-class TestDateTimeInterfaces(unittest.TestCase):
-
-    def test_interfaces(self):
-        verifyObject(ITimeDelta, timedelta(minutes=20))
-        verifyObject(IDate, date(2000, 1, 2))
-        verifyObject(IDateTime, datetime(2000, 1, 2, 10, 20))
-        verifyObject(ITime, time(20, 30, 15, 1234))
-        verifyObject(ITZInfo, tzinfo())
-        verifyClass(ITimeDeltaClass, timedelta)
-        verifyClass(IDateClass, date)
-        verifyClass(IDateTimeClass, datetime)
-        verifyClass(ITimeClass, time)
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(TestDateTimeInterfaces))
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/zope/interface/declarations.py b/branches/bug1734/src/zope/interface/declarations.py
deleted file mode 100644
index b773f8b0..00000000
--- a/branches/bug1734/src/zope/interface/declarations.py
+++ /dev/null
@@ -1,1388 +0,0 @@
-##############################################################################
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-##############################################################################
-"""Implementation of interface declarations
-
-There are three flavors of declarations:
-
-  - Declarations are used to simply name declared interfaces.
-
-  - ImplementsDeclarations are used to express the interfaces that a
-    class implements (that instances of the class provides).
-
-    Implements specifications support inheriting interfaces.
-
-  - ProvidesDeclarations are used to express interfaces directly
-    provided by objects.
-    
-
-$Id$
-"""
-__docformat__ = 'restructuredtext'
-import sys
-import weakref
-from zope.interface.interface import InterfaceClass, Specification
-from ro import mergeOrderings, ro
-import exceptions
-from types import ClassType
-from zope.interface.advice import addClassAdvisor
-
-# Registry of class-implementation specifications 
-BuiltinImplementationSpecifications = {}
-
-class Declaration(Specification):
-    """Interface declarations
-
-    """
-
-    def __init__(self, *interfaces):
-        Specification.__init__(self, _normalizeargs(interfaces))
-
-    def changed(self):
-        Specification.changed(self)
-        try:
-            del self._v_attrs
-        except AttributeError:
-            pass
-
-    def __contains__(self, interface):
-        """Test whether an interface is in the specification
-
-        for example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration(I2, I3)
-          >>> spec = Declaration(I4, spec)
-          >>> int(I1 in spec)
-          0
-          >>> int(I2 in spec)
-          1
-          >>> int(I3 in spec)
-          1
-          >>> int(I4 in spec)
-          1
-        """
-        return self.extends(interface) and interface in self.interfaces()
-
-    def __iter__(self):
-        """Return an iterator for the interfaces in the specification
-
-        for example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration(I2, I3)
-          >>> spec = Declaration(I4, spec)
-          >>> i = iter(spec)
-          >>> i.next().getName()
-          'I4'
-          >>> i.next().getName()
-          'I2'
-          >>> i.next().getName()
-          'I3'
-          >>> list(i)
-          []
-        """
-        return self.interfaces()
-
-    def flattened(self):
-        """Return an iterator of all included and extended interfaces
-
-        for example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration(I2, I3)
-          >>> spec = Declaration(I4, spec)
-          >>> i = spec.flattened()
-          >>> i.next().getName()
-          'I4'
-          >>> i.next().getName()
-          'I2'
-          >>> i.next().getName()
-          'I1'
-          >>> i.next().getName()
-          'I3'
-          >>> i.next().getName()
-          'Interface'
-          >>> list(i)
-          []
-
-        """
-        return iter(self.__iro__)
-
-    def __sub__(self, other):
-        """Remove interfaces from a specification
-
-        Examples::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration()
-          >>> [iface.getName() for iface in spec]
-          []
-          >>> spec -= I1
-          >>> [iface.getName() for iface in spec]
-          []
-          >>> spec -= Declaration(I1, I2)
-          >>> [iface.getName() for iface in spec]
-          []
-          >>> spec = Declaration(I2, I4)
-          >>> [iface.getName() for iface in spec]
-          ['I2', 'I4']
-          >>> [iface.getName() for iface in spec - I4]
-          ['I2']
-          >>> [iface.getName() for iface in spec - I1]
-          ['I4']
-          >>> [iface.getName() for iface
-          ...  in spec - Declaration(I3, I4)]
-          ['I2']
-
-        """
-
-        return Declaration(
-            *[i for i in self.interfaces()
-                if not [j for j in other.interfaces()
-                        if i.extends(j, 0)]
-                ]
-                )
-    
-    def __add__(self, other):
-        """Add two specifications or a specification and an interface
-
-
-        Examples::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration()
-          >>> [iface.getName() for iface in spec]
-          []
-          >>> [iface.getName() for iface in spec+I1]
-          ['I1']
-          >>> [iface.getName() for iface in I1+spec]
-          ['I1']
-          >>> spec2 = spec
-          >>> spec += I1
-          >>> [iface.getName() for iface in spec]
-          ['I1']
-          >>> [iface.getName() for iface in spec2]
-          []
-          >>> spec2 += Declaration(I3, I4)
-          >>> [iface.getName() for iface in spec2]
-          ['I3', 'I4']
-          >>> [iface.getName() for iface in spec+spec2]
-          ['I1', 'I3', 'I4']
-          >>> [iface.getName() for iface in spec2+spec]
-          ['I3', 'I4', 'I1']
-
-        """
-
-        seen = {}
-        result = []
-        for i in self.interfaces():
-            if i not in seen:
-                seen[i] = 1
-                result.append(i)
-        for i in other.interfaces():
-            if i not in seen:
-                seen[i] = 1
-                result.append(i)
-
-        return Declaration(*result)
-
-    __radd__ = __add__
-
-    def __nonzero__(self):
-        """Test whether there are any interfaces in a specification.
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface): pass
-        ...
-        >>> spec = Declaration(I1)
-        >>> int(bool(spec))
-        1
-        >>> spec = Declaration()
-        >>> int(bool(spec))
-        0
-        """
-        return bool(self.__iro__)
-
-
-##############################################################################
-#
-# Implementation specifications
-#
-# These specify interfaces implemented by instances of classes
-
-class Implements(Declaration):
-    inherit = None
-    declared = ()
-    __name__ = '?'
-
-    def __repr__(self):
-        return '<implementedBy %s>' % (self.__name__)
-    
-        
-
-def implementedByFallback(cls):
-    """Return the interfaces implemented for a class' instances
-
-      The value returned is an IDeclaration.
-
-      for example:
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface): pass
-        ...
-        >>> class I2(I1): pass
-        ...
-        >>> class I3(Interface): pass
-        ...
-        >>> class I4(I3): pass
-        ...
-        >>> class C1(object):
-        ...   implements(I2)
-        >>> class C2(C1):
-        ...   implements(I3)
-        >>> [i.getName() for i in implementedBy(C2)]
-        ['I3', 'I2']
-      """
-
-    # This also manages storage of implementation specifications
-
-    try:
-        spec = cls.__dict__.get('__implemented__')
-    except AttributeError:
-        
-        # we can't get the class dict. This is probably due to a
-        # security proxy.  If this is the case, then probably no
-        # descriptor was installed for the class.
-
-        # We don't want to depend directly on zope.secury in
-        # zope.interface, but we'll try to make reasonable
-        # accommodations in an indirect way.
-
-        # We'll check to see if there's an implements:
-
-        spec = getattr(cls, '__implemented__', None)
-        if spec is None:
-            # There's no spec stred in the class. Maybe its a builtin:
-            spec = BuiltinImplementationSpecifications.get(cls)
-            if spec is not None:
-                return spec
-            return _empty
-        
-        if spec.__class__ == Implements:
-            # we defaulted to _empty or there was a spec. Good enough.
-            # Return it.
-            return spec
-
-        # TODO: need old style __implements__ compatibility?
-        # Hm, there's an __implemented__, but it's not a spec. Must be
-        # an old-style declaration. Just compute a spec for it
-        return Declaration(*_normalizeargs((spec, )))
-        
-    if isinstance(spec, Implements):
-        return spec
-
-    if spec is None:
-        spec = BuiltinImplementationSpecifications.get(cls)
-        if spec is not None:
-            return spec
-
-    # TODO: need old style __implements__ comptability?
-    if spec is not None:
-        # old-style __implemented__ = foo declaration
-        spec = (spec, ) # tuplefy, as it might be just an int
-        spec = Implements(*_normalizeargs(spec))
-        spec.inherit = None    # old-style implies no inherit
-        del cls.__implemented__ # get rid of the old-style declaration
-    else:
-        try:
-            bases = cls.__bases__
-        except AttributeError:
-            if not callable(cls):
-                raise TypeError("ImplementedBy called for non-factory", cls)
-            bases = ()
-
-        spec = Implements(*[implementedBy(c) for c in bases])
-        spec.inherit = cls
-
-    spec.__name__ = (getattr(cls, '__module__', '?') or '?') + \
-                    '.' + cls.__name__
-
-    try:
-        cls.__implemented__ = spec
-        if not hasattr(cls, '__providedBy__'):
-            cls.__providedBy__ = objectSpecificationDescriptor
-
-        if (isinstance(cls, DescriptorAwareMetaClasses)
-            and
-            '__provides__' not in cls.__dict__):
-            # Make sure we get a __provides__ descriptor
-            cls.__provides__ = ClassProvides(
-                cls,
-                getattr(cls, '__class__', type(cls)),
-                )
-                        
-    except TypeError:
-        if not isinstance(cls, type):
-            raise TypeError("ImplementedBy called for non-type", cls)
-        BuiltinImplementationSpecifications[cls] = spec
-
-    return spec
-
-implementedBy = implementedByFallback
-
-def classImplementsOnly(cls, *interfaces):
-    """Declare the only interfaces implemented by instances of a class
-
-      The arguments after the class are one or more interfaces or
-      interface specifications (IDeclaration objects).
-
-      The interfaces given (including the interfaces in the
-      specifications) replace any previous declarations.
-
-      Consider the following example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(Interface): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(Interface): pass
-          ...
-          >>> class A(object):
-          ...   implements(I3)
-          >>> class B(object):
-          ...   implements(I4)
-          >>> class C(A, B):
-          ...   pass
-          >>> classImplementsOnly(C, I1, I2)
-          >>> [i.getName() for i in implementedBy(C)]
-          ['I1', 'I2']
-
-      Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
-      whatever interfaces instances of ``A`` and ``B`` implement.
-
-      """
-    spec = implementedBy(cls)
-    spec.__bases__ = tuple(_normalizeargs(interfaces))
-    spec.inherit = None
-
-def classImplements(cls, *interfaces):
-    """Declare additional interfaces implemented for instances of a class
-
-      The arguments after the class are one or more interfaces or
-      interface specifications (IDeclaration objects).
-
-      The interfaces given (including the interfaces in the
-      specifications) are added to any interfaces previously
-      declared.
-
-      Consider the following example::
-
-
-      for example:
-
-      >>> from zope.interface import Interface
-      >>> class I1(Interface): pass
-      ...
-      >>> class I2(Interface): pass
-      ...
-      >>> class I3(Interface): pass
-      ...
-      >>> class I4(Interface): pass
-      ...
-      >>> class I5(Interface): pass
-      ...
-      >>> class A(object):
-      ...   implements(I3)
-      >>> class B(object):
-      ...   implements(I4)
-      >>> class C(A, B):
-      ...   pass
-      >>> classImplements(C, I1, I2)
-      >>> [i.getName() for i in implementedBy(C)]
-      ['I1', 'I2', 'I3', 'I4']
-      >>> classImplements(C, I5)
-      >>> [i.getName() for i in implementedBy(C)]
-      ['I1', 'I2', 'I5', 'I3', 'I4']
-
-      Instances of ``C`` provide ``I1``, ``I2``, ``I5``, and whatever
-      interfaces instances of ``A`` and ``B`` provide.
-
-      """
-
-    spec = implementedBy(cls)
-    spec.declared += tuple(_normalizeargs(interfaces))
-
-    # compute the bases
-    bases = []
-    seen = {}
-    for b in spec.declared:
-        if b not in seen:
-            seen[b] = 1
-            bases.append(b)
-
-    if spec.inherit is not None:
-
-        for c in spec.inherit.__bases__:
-            b = implementedBy(c)
-            if b not in seen:
-                seen[b] = 1
-                bases.append(b)
-        
-    spec.__bases__ = tuple(bases)
-
-def _implements_advice(cls):
-    interfaces, classImplements = cls.__dict__['__implements_advice_data__']
-    del cls.__implements_advice_data__
-    classImplements(cls, *interfaces)
-    return cls
-
-
-class implementer:
-
-    def __init__(self, *interfaces):
-        self.interfaces = interfaces
-
-    def __call__(self, ob):
-        if isinstance(ob, DescriptorAwareMetaClasses):
-            raise TypeError("Can't use implementer with classes.  Use one of "
-                            "the class-declaration functions instead."
-                            )
-        spec = Implements(*self.interfaces)
-        try:
-            ob.__implemented__ = spec
-        except AttributeError:
-            raise TypeError("Can't declare implements", ob)
-        return ob
-
-def _implements(name, interfaces, classImplements):
-    frame = sys._getframe(2)
-    locals = frame.f_locals
-
-    # Try to make sure we were called from a class def. In 2.2.0 we can't
-    # check for __module__ since it doesn't seem to be added to the locals
-    # until later on.
-    if (locals is frame.f_globals) or (
-        ('__module__' not in locals) and sys.version_info[:3] > (2, 2, 0)):
-        raise TypeError(name+" can be used only from a class definition.")
-
-    if '__implements_advice_data__' in locals:
-        raise TypeError(name+" can be used only once in a class definition.")
-
-    locals['__implements_advice_data__'] = interfaces, classImplements
-    addClassAdvisor(_implements_advice, depth=3)
-
-def implements(*interfaces):
-    """Declare interfaces implemented by instances of a class
-
-      This function is called in a class definition.
-
-      The arguments are one or more interfaces or interface
-      specifications (IDeclaration objects).
-
-      The interfaces given (including the interfaces in the
-      specifications) are added to any interfaces previously
-      declared.
-
-      Previous declarations include declarations for base classes
-      unless implementsOnly was used.
-
-      This function is provided for convenience. It provides a more
-      convenient way to call classImplements. For example::
-
-        implements(I1)
-
-      is equivalent to calling::
-
-        classImplements(C, I1)
-
-      after the class has been created.
-
-      Consider the following example::
-
-
-        >>> from zope.interface import Interface
-        >>> class IA1(Interface): pass
-        ...
-        >>> class IA2(Interface): pass
-        ...
-        >>> class IB(Interface): pass
-        ...
-        >>> class IC(Interface): pass
-        ...
-        >>> class A(object): implements(IA1, IA2)
-        ...
-        >>> class B(object): implements(IB)
-        ...
-
-        >>> class C(A, B):
-        ...    implements(IC)
-
-        >>> ob = C()
-        >>> int(IA1 in providedBy(ob))
-        1
-        >>> int(IA2 in providedBy(ob))
-        1
-        >>> int(IB in providedBy(ob))
-        1
-        >>> int(IC in providedBy(ob))
-        1
-
-      Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
-      instances of ``A`` and ``B`` implement.
-
-      """
-    _implements("implements", interfaces, classImplements)
-
-def implementsOnly(*interfaces):
-    """Declare the only interfaces implemented by instances of a class
-
-      This function is called in a class definition.
-
-      The arguments are one or more interfaces or interface
-      specifications (IDeclaration objects).
-
-      Previous declarations including declarations for base classes
-      are overridden.
-
-      This function is provided for convenience. It provides a more
-      convenient way to call classImplementsOnly. For example::
-
-        implementsOnly(I1)
-
-      is equivalent to calling::
-
-        classImplementsOnly(I1)
-
-      after the class has been created.
-
-      Consider the following example::
-
-        >>> from zope.interface import Interface
-        >>> class IA1(Interface): pass
-        ...
-        >>> class IA2(Interface): pass
-        ...
-        >>> class IB(Interface): pass
-        ...
-        >>> class IC(Interface): pass
-        ...
-        >>> class A(object): implements(IA1, IA2)
-        ...
-        >>> class B(object): implements(IB)
-        ...
-
-        >>> class C(A, B):
-        ...    implementsOnly(IC)
-
-        >>> ob = C()
-        >>> int(IA1 in providedBy(ob))
-        0
-        >>> int(IA2 in providedBy(ob))
-        0
-        >>> int(IB in providedBy(ob))
-        0
-        >>> int(IC in providedBy(ob))
-        1
-
-
-      Instances of ``C`` implement ``IC``, regardless of what
-      instances of ``A`` and ``B`` implement.
-
-      """
-    _implements("implementsOnly", interfaces, classImplementsOnly)
-
-##############################################################################
-#
-# Instance declarations
-
-class Provides(Declaration):  # Really named ProvidesClass
-    """Implement __provides__, the instance-specific specification
-
-    When an object is pickled, we pickle the interfaces that it implements.
-    """
-
-    def __init__(self, cls, *interfaces):
-        self.__args = (cls, ) + interfaces
-        self._cls = cls
-        Declaration.__init__(self, *(interfaces + (implementedBy(cls), )))
-
-    def __reduce__(self):
-        return Provides, self.__args
-
-    __module__ = 'zope.interface'
-
-    def __get__(self, inst, cls):
-        """Make sure that a class __provides__ doesn't leak to an instance
-
-        For example::
-
-          >>> from zope.interface import Interface
-          >>> class IFooFactory(Interface): pass
-          ...
-          
-          >>> class C(object):
-          ...   pass
-
-          >>> C.__provides__ = ProvidesClass(C, IFooFactory)
-          >>> [i.getName() for i in C.__provides__]
-          ['IFooFactory']
-          >>> getattr(C(), '__provides__', 0)
-          0
-
-        """
-        if inst is None and cls is self._cls:
-            # We were accessed through a class, so we are the class'
-            # provides spec. Just return this object, but only if we are
-            # being called on the same class that we were defined for:
-            return self
-
-        raise AttributeError, '__provides__'
-
-ProvidesClass = Provides
-
-# Registry of instance declarations
-# This is a memory optimization to allow objects to share specifications.
-InstanceDeclarations = weakref.WeakValueDictionary()
-
-def Provides(*interfaces):
-    """Cache instance declarations
-
-      Instance declarations are shared among instances that have the
-      same declaration.  The declarations are cached in an weak value
-      dictionary.
-
-      (Note that, in the examples below, we are going to make
-       assertions about the size of the weakvalue dictionary.  For the
-       assertions to be meaningful, we need to force garbage
-       collection to make sure garbage objects are, indeed, removed
-       from the system. Depending on how Python is run, we may need to
-       make multiple calls to be sure.  We provide a collect function
-       to help with this:
-
-       >>> import gc
-       >>> def collect():
-       ...     for i in range(4):
-       ...         gc.collect()
-
-      )
-      
-      >>> collect()
-      >>> before = len(InstanceDeclarations)
-
-      >>> class C(object):
-      ...    pass
-
-      >>> from zope.interface import Interface
-      >>> class I(Interface):
-      ...    pass
-      
-      >>> c1 = C()
-      >>> c2 = C()
-
-      >>> len(InstanceDeclarations) == before
-      1
-
-      >>> directlyProvides(c1, I)
-      >>> len(InstanceDeclarations) == before + 1
-      1
-
-      >>> directlyProvides(c2, I)
-      >>> len(InstanceDeclarations) == before + 1
-      1
-
-      >>> del c1
-      >>> collect()
-      >>> len(InstanceDeclarations) == before + 1
-      1
-
-      >>> del c2
-      >>> collect()
-      >>> len(InstanceDeclarations) == before
-      1
-      
-      """
-    
-    spec = InstanceDeclarations.get(interfaces)
-    if spec is None:
-        spec = ProvidesClass(*interfaces)
-        InstanceDeclarations[interfaces] = spec
-
-    return spec
-Provides.__safe_for_unpickling__ = True
-
-
-DescriptorAwareMetaClasses = ClassType, type
-def directlyProvides(object, *interfaces):
-    """Declare interfaces declared directly for an object
-
-      The arguments after the object are one or more interfaces or
-      interface specifications (IDeclaration objects).
-
-      The interfaces given (including the interfaces in the
-      specifications) replace interfaces previously
-      declared for the object.
-
-      Consider the following example::
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface): pass
-        ...
-        >>> class I2(Interface): pass
-        ...
-        >>> class IA1(Interface): pass
-        ...
-        >>> class IA2(Interface): pass
-        ...
-        >>> class IB(Interface): pass
-        ...
-        >>> class IC(Interface): pass
-        ...
-        >>> class A(object): implements(IA1, IA2)
-        ...
-        >>> class B(object): implements(IB)
-        ...
-
-        >>> class C(A, B):
-        ...    implements(IC)
-
-        >>> ob = C()
-        >>> directlyProvides(ob, I1, I2)
-        >>> int(I1 in providedBy(ob))
-        1
-        >>> int(I2 in providedBy(ob))
-        1
-        >>> int(IA1 in providedBy(ob))
-        1
-        >>> int(IA2 in providedBy(ob))
-        1
-        >>> int(IB in providedBy(ob))
-        1
-        >>> int(IC in providedBy(ob))
-        1
-
-      The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
-      instances have been declared for instances of ``C``.
-
-      To remove directly provided interfaces, use ``directlyProvidedBy`` and
-      subtract the unwanted interfaces. For example::
-
-        >>> directlyProvides(ob, directlyProvidedBy(ob)-I2)
-        >>> int(I1 in providedBy(ob))
-        1
-        >>> int(I2 in providedBy(ob))
-        0
-
-      removes I2 from the interfaces directly provided by
-      ``ob``. The object, ``ob`` no longer directly provides ``I2``,
-      although it might still provide ``I2`` if it's class
-      implements ``I2``.
-
-      To add directly provided interfaces, use ``directlyProvidedBy`` and
-      include additional interfaces.  For example::
-
-        >>> int(I2 in providedBy(ob))
-        0
-        >>> directlyProvides(ob, directlyProvidedBy(ob), I2)
-
-      adds I2 to the interfaces directly provided by ob::
-
-        >>> int(I2 in providedBy(ob))
-        1
-
-      """
-
-    # We need to avoid setting this attribute on meta classes that
-    # don't support descriptors.
-    # We can do away with this check when we get rid of the old EC
-    cls = getattr(object, '__class__', None)
-    if cls is not None and getattr(cls,  '__class__', None) is cls:
-        # It's a meta class (well, at least it it could be an extension class)
-        if not isinstance(object, DescriptorAwareMetaClasses):
-            raise TypeError("Attempt to make an interface declaration on a "
-                            "non-descriptor-aware class")
-
-    interfaces = _normalizeargs(interfaces)
-    if cls is None:
-        cls = type(object)
-
-    issub = False
-    for damc in DescriptorAwareMetaClasses:
-        if issubclass(cls, damc):
-            issub = True
-            break
-    if issub:
-        # we have a class or type.  We'll use a special descriptor
-        # that provides some extra caching
-        object.__provides__ = ClassProvides(object, cls, *interfaces)
-    else:
-        object.__provides__ = Provides(cls, *interfaces)
-        
-    
-def alsoProvides(object, *interfaces):
-    """Declare interfaces declared directly for an object
-
-      The arguments after the object are one or more interfaces or
-      interface specifications (IDeclaration objects).
-
-      The interfaces given (including the interfaces in the
-      specifications) are added to the interfaces previously
-      declared for the object.
-      
-      Consider the following example::
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface): pass
-        ...
-        >>> class I2(Interface): pass
-        ...
-        >>> class IA1(Interface): pass
-        ...
-        >>> class IA2(Interface): pass
-        ...
-        >>> class IB(Interface): pass
-        ...
-        >>> class IC(Interface): pass
-        ...
-        >>> class A(object): implements(IA1, IA2)
-        ...
-        >>> class B(object): implements(IB)
-        ...
-
-        >>> class C(A, B):
-        ...    implements(IC)
-
-        >>> ob = C()
-        >>> directlyProvides(ob, I1)
-        >>> int(I1 in providedBy(ob))
-        1
-        >>> int(I2 in providedBy(ob))
-        0
-        >>> int(IA1 in providedBy(ob))
-        1
-        >>> int(IA2 in providedBy(ob))
-        1
-        >>> int(IB in providedBy(ob))
-        1
-        >>> int(IC in providedBy(ob))
-        1
-        
-        >>> alsoProvides(ob, I2)
-        >>> int(I1 in providedBy(ob))
-        1
-        >>> int(I2 in providedBy(ob))
-        1
-        >>> int(IA1 in providedBy(ob))
-        1
-        >>> int(IA2 in providedBy(ob))
-        1
-        >>> int(IB in providedBy(ob))
-        1
-        >>> int(IC in providedBy(ob))
-        1
-        
-      The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
-      instances have been declared for instances of ``C``. Notice that the
-      alsoProvides just extends the provided interfaces.
-    """
-    directlyProvides(object, directlyProvidedBy(object), *interfaces)
-
-class ClassProvidesBasePy(object):
-
-    def __get__(self, inst, cls):
-        if cls is self._cls:
-            # We only work if called on the class we were defined for
-            
-            if inst is None:
-                # We were accessed through a class, so we are the class'
-                # provides spec. Just return this object as is:
-                return self
-
-            return self._implements
-
-        raise AttributeError, '__provides__'
-
-ClassProvidesBase = ClassProvidesBasePy
-
-# Try to get C base:
-try:
-    import _zope_interface_coptimizations
-except ImportError:
-    pass
-else:
-    from _zope_interface_coptimizations import ClassProvidesBase
-
-
-class ClassProvides(Declaration, ClassProvidesBase):
-    """Special descriptor for class __provides__
-
-    The descriptor caches the implementedBy info, so that
-    we can get declarations for objects without instance-specific
-    interfaces a bit quicker.
-
-        For example::
-
-          >>> from zope.interface import Interface
-          >>> class IFooFactory(Interface):
-          ...     pass
-          >>> class IFoo(Interface):
-          ...     pass
-          >>> class C(object):
-          ...     implements(IFoo)
-          ...     classProvides(IFooFactory)
-          >>> [i.getName() for i in C.__provides__]
-          ['IFooFactory']
-
-          >>> [i.getName() for i in C().__provides__]
-          ['IFoo']
-
-    
-    """
-
-    def __init__(self, cls, metacls, *interfaces):
-        self._cls = cls
-        self._implements = implementedBy(cls)
-        self.__args = (cls, metacls, ) + interfaces
-        Declaration.__init__(self, *(interfaces + (implementedBy(metacls), )))
-
-    def __reduce__(self):
-        return self.__class__, self.__args
-
-    # Copy base-class method for speed
-    __get__ = ClassProvidesBase.__get__
-
-def directlyProvidedBy(object):
-    """Return the interfaces directly provided by the given object
-
-    The value returned is an IDeclaration.
-
-    """
-    provides = getattr(object, "__provides__", None)
-    if (provides is None # no spec
-        or
-        # We might have gotten the implements spec, as an
-        # optimization. If so, it's like having only one base, that we
-        # lop off to exclude class-supplied declarations:
-        isinstance(provides, Implements)
-        ):
-        return _empty
-
-    # Strip off the class part of the spec:
-    return Declaration(provides.__bases__[:-1])
-
-def classProvides(*interfaces):
-    """Declare interfaces provided directly by a class
-
-      This function is called in a class definition.
-
-      The arguments are one or more interfaces or interface
-      specifications (IDeclaration objects).
-
-      The given interfaces (including the interfaces in the
-      specifications) are used to create the class's direct-object
-      interface specification.  An error will be raised if the module
-      class has an direct interface specification.  In other words, it is
-      an error to call this function more than once in a class
-      definition.
-
-      Note that the given interfaces have nothing to do with the
-      interfaces implemented by instances of the class.
-
-      This function is provided for convenience. It provides a more
-      convenient way to call directlyProvidedByProvides for a class. For
-      example::
-
-        classProvides(I1)
-
-      is equivalent to calling::
-
-        directlyProvides(theclass, I1)
-
-      after the class has been created.
-
-      For example::
-
-            >>> from zope.interface import Interface
-            >>> class IFoo(Interface): pass
-            ...
-            >>> class IFooFactory(Interface): pass
-            ...
-            >>> class C(object):
-            ...   implements(IFoo)
-            ...   classProvides(IFooFactory)
-            >>> [i.getName() for i in C.__providedBy__]
-            ['IFooFactory']
-            >>> [i.getName() for i in C().__providedBy__]
-            ['IFoo']
-
-      if equivalent to::
-
-            >>> from zope.interface import Interface
-            >>> class IFoo(Interface): pass
-            ...
-            >>> class IFooFactory(Interface): pass
-            ...
-            >>> class C(object):
-            ...   implements(IFoo)
-            >>> directlyProvides(C, IFooFactory)
-            >>> [i.getName() for i in C.__providedBy__]
-            ['IFooFactory']
-            >>> [i.getName() for i in C().__providedBy__]
-            ['IFoo']
-
-
-      """
-    frame = sys._getframe(1)
-    locals = frame.f_locals
-
-    # Try to make sure we were called from a class def
-    if (locals is frame.f_globals) or ('__module__' not in locals):
-        raise TypeError(name+" can be used only from a class definition.")
-
-    if '__provides__' in locals:
-        raise TypeError(
-            "classProvides can only be used once in a class definition.")
-
-    locals["__provides__"] = _normalizeargs(interfaces)
-
-    addClassAdvisor(_classProvides_advice, depth=2)
-
-def _classProvides_advice(cls):
-    interfaces = cls.__dict__['__provides__']
-    del cls.__provides__
-    directlyProvides(cls, *interfaces)
-    return cls
-
-def moduleProvides(*interfaces):
-    """Declare interfaces provided by a module
-
-    This function is used in a module definition.
-
-    The arguments are one or more interfaces or interface
-    specifications (IDeclaration objects).
-
-    The given interfaces (including the interfaces in the
-    specifications) are used to create the module's direct-object
-    interface specification.  An error will be raised if the module
-    already has an interface specification.  In other words, it is
-    an error to call this function more than once in a module
-    definition.
-
-    This function is provided for convenience. It provides a more
-    convenient way to call directlyProvides. For example::
-
-      moduleImplements(I1)
-
-    is equivalent to::
-
-      directlyProvides(sys.modules[__name__], I1)
-
-    """
-    frame = sys._getframe(1)
-    locals = frame.f_locals
-
-    # Try to make sure we were called from a class def
-    if (locals is not frame.f_globals) or ('__name__' not in locals):
-        raise TypeError(
-            "moduleProvides can only be used from a module definition.")
-
-    if '__provides__' in locals:
-        raise TypeError(
-            "moduleProvides can only be used once in a module definition.")
-
-    module = sys.modules[__name__]
-
-    locals["__provides__"] = Provides(type(module),
-                                      *_normalizeargs(interfaces))
-
-##############################################################################
-#
-# Declaration querying support
-
-def ObjectSpecification(direct, cls):
-    """Provide object specifications
-
-    These combine information for the object and for it's classes.
-
-    For example::
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface): pass
-        ...
-        >>> class I2(Interface): pass
-        ...
-        >>> class I3(Interface): pass
-        ...
-        >>> class I31(I3): pass
-        ...
-        >>> class I4(Interface): pass
-        ...
-        >>> class I5(Interface): pass
-        ...
-        >>> class A(object): implements(I1)
-        ...
-        >>> class B(object): __implemented__ = I2
-        ...
-        >>> class C(A, B): implements(I31)
-        ...
-        >>> c = C()
-        >>> directlyProvides(c, I4)
-        >>> [i.getName() for i in providedBy(c)]
-        ['I4', 'I31', 'I1', 'I2']
-        >>> [i.getName() for i in providedBy(c).flattened()]
-        ['I4', 'I31', 'I3', 'I1', 'I2', 'Interface']
-        >>> int(I1 in providedBy(c))
-        1
-        >>> int(I3 in providedBy(c))
-        0
-        >>> int(providedBy(c).extends(I3))
-        1
-        >>> int(providedBy(c).extends(I31))
-        1
-        >>> int(providedBy(c).extends(I5))
-        0
-        >>> class COnly(A, B): implementsOnly(I31)
-        ...
-        >>> class D(COnly): implements(I5)
-        ...
-        >>> c = D()
-        >>> directlyProvides(c, I4)
-        >>> [i.getName() for i in providedBy(c)]
-        ['I4', 'I5', 'I31']
-        >>> [i.getName() for i in providedBy(c).flattened()]
-        ['I4', 'I5', 'I31', 'I3', 'Interface']
-        >>> int(I1 in providedBy(c))
-        0
-        >>> int(I3 in providedBy(c))
-        0
-        >>> int(providedBy(c).extends(I3))
-        1
-        >>> int(providedBy(c).extends(I1))
-        0
-        >>> int(providedBy(c).extends(I31))
-        1
-        >>> int(providedBy(c).extends(I5))
-        1
-
-
-        nonzero:
-
-        >>> from zope.interface import Interface
-        >>> class I1(Interface):
-        ...     pass
-        >>> class I2(Interface):
-        ...     pass
-        >>> class C(object):
-        ...     implements(I1)
-        >>> c = C()
-        >>> int(bool(providedBy(c)))
-        1
-        >>> directlyProvides(c, I2)
-        >>> int(bool(providedBy(c)))
-        1
-        >>> class C(object):
-        ...     pass
-        >>> c = C()
-        >>> int(bool(providedBy(c)))
-        0
-        >>> directlyProvides(c, I2)
-        >>> int(bool(providedBy(c)))
-        1
-
-
-    """
-
-    return Provides(cls, direct)
-
-def getObjectSpecification(ob):
-
-    provides = getattr(ob, '__provides__', None)
-    if provides is not None:
-        return provides
-    
-    try:
-        cls = ob.__class__
-    except AttributeError:
-        # We can't get the class, so just consider provides
-        return _empty
-
-    return implementedBy(cls)
-
-def providedBy(ob):
-
-    # Here we have either a special object, an old-style declaration
-    # or a descriptor
-
-    # Try to get __providedBy__
-    try:
-        r = ob.__providedBy__
-    except AttributeError:
-        # Not set yet. Fall back to lower-level thing that computes it
-        return getObjectSpecification(ob)
-    
-
-    try:
-        # We might have gotten a descriptor from an instance of a
-        # class (like an ExtensionClass) that doesn't support
-        # descriptors.  We'll make sure we got one by trying to get
-        # the only attribute, which all specs have.
-        r.extends
-
-    except AttributeError:
-
-        # The object's class doesn't understand descriptors.
-        # Sigh. We need to get an object descriptor, but we have to be
-        # careful.  We want to use the instance's __provides__, if
-        # there is one, but only if it didn't come from the class.
-
-        try:
-            r = ob.__provides__
-        except AttributeError:
-            # No __provides__, so just fall back to implementedBy
-            return implementedBy(ob.__class__)
-
-        # We need to make sure we got the __provides__ from the
-        # instance. We'll do this by making sure we don't get the same
-        # thing from the class:
-
-        try:
-            cp = ob.__class__.__provides__
-        except AttributeError:
-            # The ob doesn't have a class or the class has no
-            # provides, assume we're done:
-            return r
-
-        if r is cp:
-            # Oops, we got the provides from the class. This means
-            # the object doesn't have it's own. We should use implementedBy
-            return implementedBy(ob.__class__)
-
-    return r
-
-class ObjectSpecificationDescriptorPy(object):
-    """Implement the __providedBy__ attribute
-
-    The __providedBy__ attribute computes the interfaces peovided by
-    an object.
-    """
-
-    def __get__(self, inst, cls):
-        """Get an object specification for an object
-
-        For example::
-
-          >>> from zope.interface import Interface
-          >>> class IFoo(Interface): pass
-          ...
-          >>> class IFooFactory(Interface): pass
-          ...
-          >>> class C(object):
-          ...   implements(IFoo)
-          ...   classProvides(IFooFactory)
-          >>> [i.getName() for i in C.__providedBy__]
-          ['IFooFactory']
-          >>> [i.getName() for i in C().__providedBy__]
-          ['IFoo']
-
-        """
-
-        # Get an ObjectSpecification bound to either an instance or a class,
-        # depending on how we were accessed.
-        
-        if inst is None:
-            return getObjectSpecification(cls)
-
-        provides = getattr(inst, '__provides__', None)
-        if provides is not None:
-            return provides
-
-        return implementedBy(cls)
-
-ObjectSpecificationDescriptor = ObjectSpecificationDescriptorPy
-
-##############################################################################
-
-def _normalizeargs(sequence, output = None):
-    """Normalize declaration arguments
-
-    Normalization arguments might contain Declarions, tuples, or single
-    interfaces.
-
-    Anything but individial interfaces or implements specs will be expanded.
-    """
-    if output is None:
-        output = []
-
-    cls = sequence.__class__
-    if InterfaceClass in cls.__mro__ or Implements in cls.__mro__:
-        output.append(sequence)
-    else:
-        for v in sequence:
-            _normalizeargs(v, output)
-            
-    return output
-
-_empty = Declaration()
-
-try:
-    import _zope_interface_coptimizations
-except ImportError:
-    pass
-else:
-    from _zope_interface_coptimizations import implementedBy, providedBy
-    from _zope_interface_coptimizations import getObjectSpecification
-    from _zope_interface_coptimizations import ObjectSpecificationDescriptor
-
-objectSpecificationDescriptor = ObjectSpecificationDescriptor()
-    
diff --git a/branches/bug1734/src/zope/interface/document.py b/branches/bug1734/src/zope/interface/document.py
deleted file mode 100644
index 6fe0d017..00000000
--- a/branches/bug1734/src/zope/interface/document.py
+++ /dev/null
@@ -1,121 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-""" Pretty-Print an Interface object as structured text (Yum)
-
-This module provides a function, asStructuredText, for rendering an
-interface as structured text.
-
-$Id$
-"""
-from string import maketrans
-import zope.interface
-
-def asStructuredText(I, munge=0):
-    """ Output structured text format.  Note, this will wack any existing
-    'structured' format of the text.  """
-
-
-    r = ["%s\n\n" % I.getName()]
-    outp = r.append
-    level = 1
-
-    if I.getDoc():
-        outp(_justify_and_indent(_trim_doc_string(I.getDoc()), level)+ "\n\n")
-
-    bases = [base
-             for base in I.__bases__
-             if base is not zope.interface.Interface
-             ]
-    if bases:
-        outp((" " * level) + "This interface extends:\n\n")
-        level = level + 1
-        for b in bases:
-            item = "o %s" % b.getName()
-            outp(_justify_and_indent(_trim_doc_string(item), level, munge)
-                 + "\n\n")
-
-        level = level - 1
-
-    outp(_justify_and_indent("Attributes:", level, munge)+'\n\n')
-    level = level + 1
-
-    namesAndDescriptions = I.namesAndDescriptions()
-    namesAndDescriptions.sort()
-
-    for name, desc in namesAndDescriptions:
-        if not hasattr(desc, 'getSignatureString'):   # ugh...
-            item = "%s -- %s" % (desc.getName(),
-                                 desc.getDoc() or 'no documentation')
-            outp(_justify_and_indent(_trim_doc_string(item), level, munge)
-                 + "\n\n")
-    level = level - 1
-
-    outp(_justify_and_indent("Methods:", level, munge)+'\n\n')
-    level = level + 1
-    for name, desc in namesAndDescriptions:
-        if hasattr(desc, 'getSignatureString'):   # ugh...
-            item = "%s%s -- %s" % (desc.getName(),
-                                   desc.getSignatureString(),
-                                   desc.getDoc() or 'no documentation')
-            outp(_justify_and_indent(_trim_doc_string(item), level, munge)
-                 + "\n\n")
-
-    return "".join(r)
-
-def _trim_doc_string(text):
-    """
-    Trims a doc string to make it format
-    correctly with structured text.
-    """
-    text = text.strip().replace('\r\n', '\n')
-    lines = text.split('\n')
-    nlines = [lines[0]]
-    if len(lines) > 1:
-        min_indent=None
-        for line in lines[1:]:
-            indent=len(line) - len(line.lstrip())
-            if indent < min_indent or min_indent is None:
-                min_indent=indent
-        for line in lines[1:]:
-            nlines.append(line[min_indent:])
-    return '\n'.join(nlines)
-
-
-_trans = maketrans("\r\n", "  ")
-def _justify_and_indent(text, level, munge=0, width=72):
-    """ indent and justify text, rejustify (munge) if specified """
-
-    lines = []
-
-    if munge:
-        line = " " * level
-        text = text.translate(text, _trans).strip().split()
-
-        for word in text:
-            line = ' '.join([line, word])
-            if len(line) > width:
-                lines.append(line)
-                line = " " * level
-        else:
-            lines.append(line)
-
-        return "\n".join(lines)
-
-    else:
-        text = text.replace("\r\n", "\n").split("\n")
-
-        for line in text:
-            lines.append((" " * level) + line)
-
-        return '\n'.join(lines)
diff --git a/branches/bug1734/src/zope/interface/exceptions.py b/branches/bug1734/src/zope/interface/exceptions.py
deleted file mode 100644
index da0cf790..00000000
--- a/branches/bug1734/src/zope/interface/exceptions.py
+++ /dev/null
@@ -1,69 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interface-specific exceptions
-
-$Id$
-"""
-
-class Invalid(Exception):
-    """An specification is violated
-    """
-
-class DoesNotImplement(Invalid):
-    """ This object does not implement """
-    def __init__(self, interface):
-        self.interface = interface
-
-    def __str__(self):
-        return """An object does not implement interface %(interface)s
-
-        """ % self.__dict__
-
-class BrokenImplementation(Invalid):
-    """An attribute is not completely implemented.
-    """
-
-    def __init__(self, interface, name):
-        self.interface=interface
-        self.name=name
-
-    def __str__(self):
-        return """An object has failed to implement interface %(interface)s
-
-        The %(name)s attribute was not provided.
-        """ % self.__dict__
-
-class BrokenMethodImplementation(Invalid):
-    """An method is not completely implemented.
-    """
-
-    def __init__(self, method, mess):
-        self.method=method
-        self.mess=mess
-
-    def __str__(self):
-        return """The implementation of %(method)s violates its contract
-        because %(mess)s.
-        """ % self.__dict__
-
-class InvalidInterface(Exception):
-    """The interface has invalid contents
-    """
-
-class BadImplements(TypeError):
-    """An implementation assertion is invalid
-
-    because it doesn't contain an interface or a sequence of valid
-    implementation assertions.
-    """
diff --git a/branches/bug1734/src/zope/interface/human.txt b/branches/bug1734/src/zope/interface/human.txt
deleted file mode 100644
index 0a75595d..00000000
--- a/branches/bug1734/src/zope/interface/human.txt
+++ /dev/null
@@ -1,152 +0,0 @@
-==========================
-Using the Adapter Registry
-==========================
-
-This is a small demonstration of the zope.interface package including its
-adapter registry. It is intended to provide a concrete but narrow example on
-how to use interfaces and adapters outside of Zope 3.
-
-First we have to import the interface package.
-
-  >>> import zope.interface
-
-We now develop an interface for our object, which is a simple file in this
-case. For now we simply support one attribute, the body, which contains the
-actual file contents.
-
-  >>> class IFile(zope.interface.Interface):
-  ...
-  ...     body = zope.interface.Attribute('Contents of the file.')
-  ...
-
-For statistical reasons we often want to know the size of a file. However, it
-would be clumsy to implement the size directly in the file object, since the
-size really represents meta-data. Thus we create another interface that
-provides the size of something.
-
-  >>> class ISize(zope.interface.Interface):
-  ...
-  ...     def getSize():
-  ...         'Return the size of an object.'
-  ...
-
-Now we need to implement the file. It is essential that the object states
-that it implements the `IFile` interface. We also provide a default body
-value (just to make things simpler for this example).
-
-  >>> class File(object):
-  ...
-  ...      zope.interface.implements(IFile)
-  ...      body = 'foo bar'
-  ...
-
-Next we implement an adapter that can provide the `ISize` interface given any
-object providing `IFile`. By convention we use `__used_for__` to specify the
-interface that we expect the adapted object to provide, in our case
-`IFile`. However, this attribute is not used for anything. If you have
-multiple interfaces for which an adapter is used, just specify the interfaces
-via a tuple.
-
-Again by convention, the constructor of an adapter takes one argument, the
-context. The context in this case is an instance of `File` (providing `IFile`)
-that is used to extract the size from. Also by convention the context is
-stored in an attribute named `context` on the adapter. The twisted community
-refers to the context as the `original` object. However, you may feel free to
-use a specific argument name, such as `file`.
-
-  >>> class FileSize(object):
-  ...
-  ...      zope.interface.implements(ISize)
-  ...      __used_for__ = IFile
-  ...
-  ...      def __init__(self, context):
-  ...          self.context = context
-  ...
-  ...      def getSize(self):
-  ...          return len(self.context.body)
-  ...
-
-Now that we have written our adapter, we have to register it with an adapter
-registry, so that it can be looked up when needed. There is no such thing as a
-global registry; thus we have to instantiate one for our example manually.
-
-  >>> from zope.interface.adapter import AdapterRegistry
-  >>> registry = AdapterRegistry()
-
-
-The registry keeps a map of what adapters implement based on another
-interface, the object already provides. Therefore, we next have to register an
-adapter that adapts from `IFile` to `ISize`. The first argument to
-the registry's `register()` method is a list of original interfaces.In our
-cause we have only one original interface, `IFile`. A list makes sense, since
-the interface package has the concept of multi-adapters, which are adapters
-that require multiple objects to adapt to a new interface. In these
-situations, your adapter constructor will require an argument for each
-specified interface.
-
-The second argument is the interface the adapter provides, in our case
-`ISize`. The third argument in the name of the adapter. Since we do not care
-about names, we simply leave it as an empty string. Names are commonly useful,
-if you have adapters for the same set of interfaces, but they are useful in
-different situations. The last argument is simply the adapter class.
-
-  >>> registry.register([IFile], ISize, '', FileSize)
-
-You can now use the the registry to lookup the adapter.
-
-  >>> registry.lookup1(IFile, ISize, '')
-  <class '__main__.FileSize'>
-
-Let's get a little bit more practical. Let's create a `File` instance and
-create the adapter using a registry lookup. Then we see whether the adapter
-returns the correct size by calling `getSize()`.
-
-  >>> file = File()
-  >>> size = registry.lookup1(IFile, ISize, '')(file)
-  >>> size.getSize()
-  7
-
-However, this is not very practical, since I have to manually pass in the
-arguments to the lookup method. There is some syntactic candy that will allow
-us to get an adapter instance by simply calling `ISize(file)`. To make use of
-this functionality, we need to add our registry to the adapter_hooks list,
-which is a member of the adapters module. This list stores a collection of
-callables that are automatically invoked when IFoo(obj) is called; their
-purpose is to locate adapters that implement an interface for a certain
-context instance.
-
-You are required to implement your own adapter hook; this example covers one
-of the simplest hooks that use the registry, but you could implement one that
-used an adapter cache or persistent adapters, for instance. The helper hook is
-required to expect as first argument the desired output interface (for us
-`ISize`) and as the second argument the context of the adapter (here
-`file`). The function returns an adapter, i.e. a `FileSize` instance.
-
-  >>> def hook(provided, object):
-  ...     adapter = registry.lookup1(zope.interface.providedBy(object),
-  ...                                provided, '')
-  ...     return adapter(object)
-  ...
-
-We now just add the hook to an `adapter_hooks` list.
-
-  >>> from zope.interface.interface import adapter_hooks
-  >>> adapter_hooks.append(hook)
-
-Once the hook is registered, you can use the desired syntax.
-
-  >>> size = ISize(file)
-  >>> size.getSize()
-  7
-
-Now we have to cleanup after ourselves, so that others after us have a clean
-`adapter_hooks` list.
-
-  >>> adapter_hooks.remove(hook)
-
-That's it. I have intentionally left out a discussion of named adapters and
-multi-adapters, since this text is intended as a practical and simple
-introduction to Zope 3 interfaces and adapters. You might want to read the
-`adapter.txt` in the `zope.interface` package for a more formal, referencial
-and complete treatment of the package. Warning: People have reported that
-`adapter.txt` makes their brain feel soft!
diff --git a/branches/bug1734/src/zope/interface/interface.py b/branches/bug1734/src/zope/interface/interface.py
deleted file mode 100644
index 5680fb65..00000000
--- a/branches/bug1734/src/zope/interface/interface.py
+++ /dev/null
@@ -1,942 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interface object implementation
-
-$Id$
-"""
-
-from __future__ import generators
-
-import sys
-import warnings
-import weakref
-from types import FunctionType
-from ro import ro
-from zope.interface.exceptions import Invalid
-
-CO_VARARGS = 4
-CO_VARKEYWORDS = 8
-TAGGED_DATA = '__interface_tagged_values__'
-
-def invariant(call):
-    f_locals = sys._getframe(1).f_locals
-    tags = f_locals.get(TAGGED_DATA)
-    if tags is None:
-        tags = f_locals[TAGGED_DATA] = {}
-    invariants = tags.get('invariants')
-    if invariants is None:
-        invariants = tags['invariants'] = []
-    invariants.append(call)
-
-class Element(object):
-
-    # We can't say this yet because we don't have enough
-    # infrastructure in place.
-    #
-    #implements(IElement)
-
-    def __init__(self, __name__, __doc__=''):
-        """Create an 'attribute' description
-        """
-        if not __doc__ and __name__.find(' ') >= 0:
-            __doc__ = __name__
-            __name__ = None
-
-        self.__name__=__name__
-        self.__doc__=__doc__
-        self.__tagged_values = {}
-
-    def getName(self):
-        """ Returns the name of the object. """
-        return self.__name__
-
-    def getDoc(self):
-        """ Returns the documentation for the object. """
-        return self.__doc__
-
-    def getTaggedValue(self, tag):
-        """ Returns the value associated with 'tag'. """
-        return self.__tagged_values[tag]
-
-    def queryTaggedValue(self, tag, default=None):
-        """ Returns the value associated with 'tag'. """
-        return self.__tagged_values.get(tag, default)
-
-    def getTaggedValueTags(self):
-        """ Returns a list of all tags. """
-        return self.__tagged_values.keys()
-
-    def setTaggedValue(self, tag, value):
-        """ Associates 'value' with 'key'. """
-        self.__tagged_values[tag] = value
-
-class SpecificationBasePy(object):
-
-    def providedBy(self, ob):
-        """Is the interface implemented by an object
-
-          >>> from zope.interface import *
-          >>> class I1(Interface):
-          ...     pass
-          >>> class C(object):
-          ...     implements(I1)
-          >>> c = C()
-          >>> class X(object):
-          ...     pass
-          >>> x = X()
-          >>> I1.providedBy(x)
-          False
-          >>> I1.providedBy(C)
-          False
-          >>> I1.providedBy(c)
-          True
-          >>> directlyProvides(x, I1)
-          >>> I1.providedBy(x)
-          True
-          >>> directlyProvides(C, I1)
-          >>> I1.providedBy(C)
-          True
-        
-        """
-        spec = providedBy(ob)
-        return self in spec._implied
-
-    def implementedBy(self, cls):
-        """Do instances of the given class implement the interface?"""
-        spec = implementedBy(cls)
-        return self in spec._implied
-
-    def isOrExtends(self, interface):
-        """Is the interface the same as or extend the given interface
-
-        Examples::
-
-          >>> from zope.interface import Interface
-          >>> from zope.interface.declarations import Declaration
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration()
-          >>> int(spec.extends(Interface))
-          0
-          >>> spec = Declaration(I2)
-          >>> int(spec.extends(Interface))
-          1
-          >>> int(spec.extends(I1))
-          1
-          >>> int(spec.extends(I2))
-          1
-          >>> int(spec.extends(I3))
-          0
-          >>> int(spec.extends(I4))
-          0
-
-        """
-        return interface in self._implied
-
-SpecificationBase = SpecificationBasePy
-
-try:
-    from _zope_interface_coptimizations import SpecificationBase
-except ImportError:
-    pass
-
-class Specification(SpecificationBase):
-    """Specifications
-
-    An interface specification is used to track interface declarations
-    and component registrations.
-
-    This class is a base class for both interfaces themselves and for
-    interface specifications (declarations).
-
-    Specifications are mutable.  If you reassign their cases, their
-    relations with other specifications are adjusted accordingly.
-
-    For example:
-
-    >>> from zope.interface import Interface
-    >>> class I1(Interface):
-    ...     pass
-    >>> class I2(I1):
-    ...     pass
-    >>> class I3(I2):
-    ...     pass
-
-    >>> [i.__name__ for i in I1.__bases__]
-    ['Interface']
-
-    >>> [i.__name__ for i in I2.__bases__]
-    ['I1']
-
-    >>> I3.extends(I1)
-    1
-
-    >>> I2.__bases__ = (Interface, )
-
-    >>> [i.__name__ for i in I2.__bases__]
-    ['Interface']
-
-    >>> I3.extends(I1)
-    0
-        
-    """
-
-    # Copy some base class methods for speed
-    isOrExtends = SpecificationBase.isOrExtends
-    providedBy = SpecificationBase.providedBy
-
-    #########################################################################
-    # BBB 2004-07-13: Backward compatabilty.  These methods have been
-    # deprecated in favour of providedBy and implementedBy.
-
-    def isImplementedByInstancesOf(self, cls):
-        warnings.warn(
-            "isImplementedByInstancesOf has been renamed to implementedBy",
-            DeprecationWarning, stacklevel=2,
-            )
-        return self.implementedBy(cls)
-
-    def isImplementedBy(self, ob):
-        warnings.warn(
-            "isImplementedBy has been renamed to providedBy",
-            DeprecationWarning, stacklevel=2,
-            )
-        return self.providedBy(ob)
-    #
-    #########################################################################
-
-    def __init__(self, bases=()):
-        self._implied = {}
-        self.dependents = weakref.WeakKeyDictionary()
-        self.__bases__ = tuple(bases)
-
-    def subscribe(self, dependent):
-        self.dependents[dependent] = 1
-
-    def unsubscribe(self, dependent):
-        del self.dependents[dependent]
-
-    def __setBases(self, bases):
-        # Register ourselves as a dependent of our old bases
-        for b in self.__bases__:
-            b.unsubscribe(self)
-        
-        # Register ourselves as a dependent of our bases
-        self.__dict__['__bases__'] = bases
-        for b in bases:
-            b.subscribe(self)
-        
-        self.changed()
-
-    __bases__ = property(
-        
-        lambda self: self.__dict__.get('__bases__', ()),
-        __setBases,
-        )
-
-    def changed(self):
-        """We, or something we depend on, have changed
-        """
-
-        implied = self._implied
-        implied.clear()
-
-        ancestors = ro(self)
-        self.__sro__ = tuple(ancestors)
-        self.__iro__ = tuple([ancestor for ancestor in ancestors
-                              if isinstance(ancestor, InterfaceClass)
-                             ])
-
-        for ancestor in ancestors:
-            # We directly imply our ancestors:
-            implied[ancestor] = ()
-
-        # Now, advise our dependents of change:
-        for dependent in self.dependents.keys():
-            dependent.changed()
-
-
-    def interfaces(self):
-        """Return an iterator for the interfaces in the specification
-
-        for example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Specification((I2, I3))
-          >>> spec = Specification((I4, spec))
-          >>> i = spec.interfaces()
-          >>> i.next().getName()
-          'I4'
-          >>> i.next().getName()
-          'I2'
-          >>> i.next().getName()
-          'I3'
-          >>> list(i)
-          []
-        """
-        seen = {}
-        for base in self.__bases__:
-            for interface in base.interfaces():
-                if interface not in seen:
-                    seen[interface] = 1
-                    yield interface
-        
-
-    def extends(self, interface, strict=True):
-        """Does the specification extend the given interface?
-
-        Test whether an interface in the specification extends the
-        given interface
-
-        Examples::
-
-          >>> from zope.interface import Interface
-          >>> from zope.interface.declarations import Declaration
-          >>> class I1(Interface): pass
-          ...
-          >>> class I2(I1): pass
-          ...
-          >>> class I3(Interface): pass
-          ...
-          >>> class I4(I3): pass
-          ...
-          >>> spec = Declaration()
-          >>> int(spec.extends(Interface))
-          0
-          >>> spec = Declaration(I2)
-          >>> int(spec.extends(Interface))
-          1
-          >>> int(spec.extends(I1))
-          1
-          >>> int(spec.extends(I2))
-          1
-          >>> int(spec.extends(I3))
-          0
-          >>> int(spec.extends(I4))
-          0
-          >>> I2.extends(I2)
-          0
-          >>> I2.extends(I2, False)
-          1
-          >>> I2.extends(I2, strict=False)
-          1
-
-        """
-        return ((interface in self._implied)
-                and
-                ((not strict) or (self != interface))
-                )
-
-    def weakref(self, callback=None):
-        if callback is None:
-            return weakref.ref(self)
-        else:
-            return weakref.ref(self, callback)
-
-
-    def get(self, name, default=None):
-        """Query for an attribute description
-        """
-        try:
-            attrs = self._v_attrs
-        except AttributeError:
-            attrs = self._v_attrs = {}
-        attr = attrs.get(name)
-        if attr is None:
-            for iface in self.__iro__:
-                attr = iface.direct(name)
-                if attr is not None:
-                    attrs[name] = attr
-                    break
-            
-        if attr is None:
-            return default
-        else:
-            return attr
-
-class InterfaceClass(Element, Specification):
-    """Prototype (scarecrow) Interfaces Implementation."""
-
-    # We can't say this yet because we don't have enough
-    # infrastructure in place.
-    #
-    #implements(IInterface)
-
-    def __init__(self, name, bases=(), attrs=None, __doc__=None,
-                 __module__=None):
-
-        if __module__ is None:
-            if (attrs is not None and
-                ('__module__' in attrs) and
-                isinstance(attrs['__module__'], str)
-                ):
-                __module__ = attrs['__module__']
-                del attrs['__module__']
-            else:
-
-                try:
-                    # Figure out what module defined the interface.
-                    # This is how cPython figures out the module of
-                    # a class, but of course it does it in C. :-/
-                    __module__ = sys._getframe(1).f_globals['__name__']
-                except (AttributeError, KeyError):
-                    pass
-
-        self.__module__ = __module__
-
-        if attrs is None:
-            attrs = {}
-
-        d = attrs.get('__doc__')
-        if d is not None:
-            if not isinstance(d, Attribute):
-                if __doc__ is None:
-                    __doc__ = d
-                del attrs['__doc__']
-
-        if __doc__ is None:
-            __doc__ = ''
-
-        Element.__init__(self, name, __doc__)
-
-        if attrs.has_key(TAGGED_DATA):
-            tagged_data = attrs[TAGGED_DATA]
-            del attrs[TAGGED_DATA]
-        else:
-            tagged_data = None
-        if tagged_data is not None:
-            for key, val in tagged_data.items():
-                self.setTaggedValue(key, val)
-
-        for base in bases:
-            if not isinstance(base, InterfaceClass):
-                raise TypeError, 'Expected base interfaces'
-
-        Specification.__init__(self, bases)
-
-        # Make sure that all recorded attributes (and methods) are of type
-        # `Attribute` and `Method`
-        for name, attr in attrs.items():
-            if isinstance(attr, Attribute):
-                attr.interface = self
-                if not attr.__name__:
-                    attr.__name__ = name
-            elif isinstance(attr, FunctionType):
-                attrs[name] = fromFunction(attr, self, name=name)
-            else:
-                raise InvalidInterface("Concrete attribute, %s" %name)
-
-        self.__attrs = attrs
-
-        self.__identifier__ = "%s.%s" % (self.__module__, self.__name__)
-
-    def interfaces(self):
-        """Return an iterator for the interfaces in the specification
-
-        for example::
-
-          >>> from zope.interface import Interface
-          >>> class I1(Interface): pass
-          ...
-          >>> 
-          >>> i = I1.interfaces()
-          >>> i.next().getName()
-          'I1'
-          >>> list(i)
-          []
-        """
-        yield self
-
-
-
-    def getBases(self):
-        return self.__bases__
-
-    def isEqualOrExtendedBy(self, other):
-        """Same interface or extends?"""
-        if self == other:
-            return True
-        return other.extends(self)
-
-    def names(self, all=False):
-        """Return the attribute names defined by the interface."""
-        if not all:
-            return self.__attrs.keys()
-
-        r = {}
-        for name in self.__attrs.keys():
-            r[name] = 1
-        for base in self.__bases__:
-            for name in base.names(all):
-                r[name] = 1
-        return r.keys()
-
-    def __iter__(self):
-        return iter(self.names(all=True))
-
-    def namesAndDescriptions(self, all=False):
-        """Return attribute names and descriptions defined by interface."""
-        if not all:
-            return self.__attrs.items()
-
-        r = {}
-        for name, d in self.__attrs.items():
-            r[name] = d
-
-        for base in self.__bases__:
-            for name, d in base.namesAndDescriptions(all):
-                if name not in r:
-                    r[name] = d
-
-        return r.items()
-
-    def getDescriptionFor(self, name):
-        """Return the attribute description for the given name."""
-        r = self.get(name)
-        if r is not None:
-            return r
-
-        raise KeyError, name
-
-    __getitem__ = getDescriptionFor
-
-    def __contains__(self, name):
-        return self.get(name) is not None
-
-    def direct(self, name):
-        return self.__attrs.get(name)
-
-    def queryDescriptionFor(self, name, default=None):
-        return self.get(name, default)
-
-    def deferred(self):
-        """Return a defered class corresponding to the interface."""
-        if hasattr(self, "_deferred"): return self._deferred
-
-        klass={}
-        exec "class %s: pass" % self.__name__ in klass
-        klass=klass[self.__name__]
-
-        self.__d(klass.__dict__)
-
-        self._deferred=klass
-
-        return klass
-
-    def validateInvariants(self, obj, errors=None):
-        """validate object to defined invariants."""
-        for call in self.queryTaggedValue('invariants', []):
-            try:
-                call(obj)
-            except Invalid, e:
-                if errors is None:
-                    raise
-                else:
-                    errors.append(e)
-        for base in self.__bases__:
-            try:
-                base.validateInvariants(obj, errors)
-            except Invalid:
-                if errors is None:
-                    raise
-                pass
-        if errors:
-            raise Invalid(errors)
-
-    def _getInterface(self, ob, name):
-        """Retrieve a named interface."""
-        return None
-
-    def __d(self, dict):
-
-        for k, v in self.__attrs.items():
-            if isinstance(v, Method) and not (k in dict):
-                dict[k]=v
-
-        for b in self.__bases__: b.__d(dict)
-
-    def __repr__(self):
-        r = getattr(self, '_v_repr', self)
-        if r is self:
-            name = self.__name__
-            m = self.__module__
-            if m:
-                name = '%s.%s' % (m, name)
-            r = "<%s %s>" % (self.__class__.__name__, name)
-            self._v_repr = r
-        return r
-
-    def __call__():
-        # TRICK! Create the call method
-        #
-        # An embedded function is used to allow an optional argument to
-        # __call__ without resorting to a global marker.
-        #
-        # The evility of this trick is a reflection of the underlying
-        # evility of "optional" arguments, arguments whose presense or
-        # absense changes the behavior of the methods.
-        # 
-        # I think the evil is necessary, and perhaps desireable to
-        # provide some consistencey with the PEP 246 adapt method.
-
-        marker = object()
-        
-        def __call__(self, obj, alternate=marker):
-            """Adapt an object to the interface
-
-               The sematics based on those of the PEP 246 adapt function.
-
-               If an object cannot be adapted, then a TypeError is raised::
-
-                 >>> import zope.interface
-                 >>> class I(zope.interface.Interface):
-                 ...     pass
-
-                 >>> I(0)
-                 Traceback (most recent call last):
-                 ...
-                 TypeError: ('Could not adapt', 0, """ \
-                      """<InterfaceClass zope.interface.interface.I>)
-
-               unless an alternate value is provided as a second
-               positional argument::
-
-                 >>> I(0, 'bob')
-                 'bob'
-
-               If an object already implements the interface, then it will be
-               returned::
-
-                 >>> class C(object):
-                 ...     zope.interface.implements(I)
-
-                 >>> obj = C()
-                 >>> I(obj) is obj
-                 True
-
-               If an object implements __conform__, then it will be used::
-
-                 >>> class C(object):
-                 ...     zope.interface.implements(I)
-                 ...     def __conform__(self, proto):
-                 ...          return 0
-
-                 >>> I(C())
-                 0
-
-               Adapter hooks (see __adapt__) will also be used, if present:
-
-                 >>> from zope.interface.interface import adapter_hooks
-                 >>> def adapt_0_to_42(iface, obj):
-                 ...     if obj == 0:
-                 ...         return 42
-
-                 >>> adapter_hooks.append(adapt_0_to_42)
-                 >>> I(0)
-                 42
-
-                 >>> adapter_hooks.remove(adapt_0_to_42)
-                 >>> I(0)
-                 Traceback (most recent call last):
-                 ...
-                 TypeError: ('Could not adapt', 0, """ \
-                      """<InterfaceClass zope.interface.interface.I>)
-
-            """
-            conform = getattr(obj, '__conform__', None)
-            if conform is not None:
-                try:
-                    adapter = conform(self)
-                except TypeError:
-                    # We got a TypeError. It might be an error raised by
-                    # the __conform__ implementation, or *we* may have
-                    # made the TypeError by calling an unbound method
-                    # (object is a class).  In the later case, we behave
-                    # as though there is no __conform__ method. We can
-                    # detect this case by checking whether there is more
-                    # than one traceback object in the traceback chain:
-                    if sys.exc_info()[2].tb_next is not None:
-                        # There is more than one entry in the chain, so
-                        # reraise the error:
-                        raise
-                    # This clever trick is from Phillip Eby
-                else:
-                    if adapter is not None:
-                        return adapter
-
-            adapter = self.__adapt__(obj)
-
-            if adapter is None:
-                if alternate is not marker:
-                    return alternate
-                
-                raise TypeError("Could not adapt", obj, self)
-
-            return adapter
-
-        return __call__
-
-    __call__ = __call__() # TRICK! Make the *real* __call__ method
-
-    def __adapt__(self, obj):
-        """Adapt an object to the reciever
-
-           This method is normally not called directly. It is called by
-           the PEP 246 adapt framework and by the interface __call__
-           operator. 
-
-           The adapt method is responsible for adapting an object to
-           the reciever.
-
-           The default version returns None::
-
-             >>> import zope.interface
-             >>> class I(zope.interface.Interface):
-             ...     pass
-
-             >>> I.__adapt__(0)
-
-           unless the object given provides the interface::
-
-             >>> class C(object):
-             ...     zope.interface.implements(I)
-
-             >>> obj = C()
-             >>> I.__adapt__(obj) is obj
-             True
-
-           Adapter hooks can be provided (or removed) to provide custom
-           adaptation. We'll install a silly hook that adapts 0 to 42.
-           We install a hook by simply adding it to the adapter_hooks
-           list::
-
-             >>> from zope.interface.interface import adapter_hooks
-             >>> def adapt_0_to_42(iface, obj):
-             ...     if obj == 0:
-             ...         return 42
-
-             >>> adapter_hooks.append(adapt_0_to_42)
-             >>> I.__adapt__(0)
-             42
-
-           Hooks must either return an adapter, or None if no adapter can
-           be found.
-
-           Hooks can be uninstalled by removing them from the list::
-
-             >>> adapter_hooks.remove(adapt_0_to_42)
-             >>> I.__adapt__(0)
-
-           """
-        if self.providedBy(obj):
-            return obj
-
-        for hook in adapter_hooks:
-            adapter = hook(self, obj)
-            if adapter is not None:
-                return adapter
-
-    def __reduce__(self):
-        return self.__name__
-
-    def __cmp(self, o1, o2):
-        # Yes, I did mean to name this __cmp, rather than __cmp__.
-        # It is a private method used by __lt__ and __gt__.
-        # I don't want to override __eq__ because I want the default
-        # __eq__, which is really fast.
-        """Make interfaces sortable
-
-        TODO: It would ne nice if:
-
-           More specific interfaces should sort before less specific ones.
-           Otherwise, sort on name and module.
-
-           But this is too complicated, and we're going to punt on it
-           for now.
-
-        For now, sort on interface and module name.
-
-        None is treated as a pseudo interface that implies the loosest
-        contact possible, no contract. For that reason, all interfaces
-        sort before None.
-
-        """
-        if o1 == o2:
-            return 0
-
-        if o1 is None:
-            return 1
-        if o2 is None:
-            return -1
-
-
-        n1 = (getattr(o1, '__name__', ''),
-              getattr(getattr(o1,  '__module__', None), '__name__', ''))
-        n2 = (getattr(o2, '__name__', ''),
-              getattr(getattr(o2,  '__module__', None), '__name__', ''))
-
-        return cmp(n1, n2)
-
-    def __lt__(self, other):
-        c = self.__cmp(self, other)
-        #print '<', self, other, c < 0, c
-        return c < 0
-
-    def __gt__(self, other):
-        c = self.__cmp(self, other)
-        #print '>', self, other, c > 0, c
-        return c > 0
-
-
-adapter_hooks = []
-
-Interface = InterfaceClass("Interface", __module__ = 'zope.interface')
-
-class Attribute(Element):
-    """Attribute descriptions
-    """
-
-    # We can't say this yet because we don't have enough
-    # infrastructure in place.
-    #
-    # implements(IAttribute)
-
-    interface = None
-
-
-class Method(Attribute):
-    """Method interfaces
-
-    The idea here is that you have objects that describe methods.
-    This provides an opportunity for rich meta-data.
-    """
-
-    # We can't say this yet because we don't have enough
-    # infrastructure in place.
-    #
-    # implements(IMethod)
-
-    def __call__(self, *args, **kw):
-        raise BrokenImplementation(self.interface, self.__name__)
-
-    def getSignatureInfo(self):
-        return {'positional': self.positional,
-                'required': self.required,
-                'optional': self.optional,
-                'varargs': self.varargs,
-                'kwargs': self.kwargs,
-                }
-
-    def getSignatureString(self):
-        sig = "("
-        for v in self.positional:
-            sig = sig + v
-            if v in self.optional.keys():
-                sig = sig + "=%s" % `self.optional[v]`
-            sig = sig + ", "
-        if self.varargs:
-            sig = sig + ("*%s, " % self.varargs)
-        if self.kwargs:
-            sig = sig + ("**%s, " % self.kwargs)
-
-        # slice off the last comma and space
-        if self.positional or self.varargs or self.kwargs:
-            sig = sig[:-2]
-
-        sig = sig + ")"
-        return sig
-
-
-def fromFunction(func, interface=None, imlevel=0, name=None):
-    name = name or func.__name__
-    method = Method(name, func.__doc__)
-    defaults = func.func_defaults or ()
-    code = func.func_code
-    # Number of positional arguments
-    na = code.co_argcount-imlevel
-    names = code.co_varnames[imlevel:]
-    opt = {}
-    # Number of required arguments
-    nr = na-len(defaults)
-    if nr < 0:
-        defaults=defaults[-nr:]
-        nr = 0
-
-    # Determine the optional arguments.
-    for i in range(len(defaults)):
-        opt[names[i+nr]] = defaults[i]
-
-    method.positional = names[:na]
-    method.required = names[:nr]
-    method.optional = opt
-
-    argno = na
-
-    # Determine the function's variable argument's name (i.e. *args)
-    if code.co_flags & CO_VARARGS:
-        method.varargs = names[argno]
-        argno = argno + 1
-    else:
-        method.varargs = None
-
-    # Determine the function's keyword argument's name (i.e. **kw)
-    if code.co_flags & CO_VARKEYWORDS:
-        method.kwargs = names[argno]
-    else:
-        method.kwargs = None
-
-    method.interface = interface
-
-    for key, value in func.__dict__.items():
-        method.setTaggedValue(key, value)
-
-    return method
-
-
-def fromMethod(meth, interface=None, name=None):
-    func = meth.im_func
-    return fromFunction(func, interface, imlevel=1, name=name)
-
-
-# Now we can create the interesting interfaces and wire them up:
-def _wire():
-    from zope.interface.declarations import classImplements
-
-    from zope.interface.interfaces import IAttribute
-    classImplements(Attribute, IAttribute)
-
-    from zope.interface.interfaces import IMethod
-    classImplements(Method, IMethod)
-
-    from zope.interface.interfaces import IInterface
-    classImplements(InterfaceClass, IInterface)
-
-# We import this here to deal with module dependencies.
-from zope.interface.declarations import providedBy, implementedBy
-from zope.interface.exceptions import InvalidInterface
-from zope.interface.exceptions import BrokenImplementation
diff --git a/branches/bug1734/src/zope/interface/interfaces.py b/branches/bug1734/src/zope/interface/interfaces.py
deleted file mode 100644
index f40e682f..00000000
--- a/branches/bug1734/src/zope/interface/interfaces.py
+++ /dev/null
@@ -1,681 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interface Package Interfaces
-
-$Id$
-"""
-__docformat__ = 'restructuredtext'
-
-
-from zope.interface import Interface
-from zope.interface.interface import Attribute
-
-class IElement(Interface):
-    """Objects that have basic documentation and tagged values.
-    """
-
-    __name__ = Attribute('__name__', 'The object name')
-    __doc__  = Attribute('__doc__', 'The object doc string')
-
-    def getTaggedValue(tag):
-        """Returns the value associated with 'tag'.
-
-        Raise a KeyError of the tag isn't set
-        """
-
-    def queryTaggedValue(tag, default=None):
-        """Returns the value associated with 'tag'.
-
-        Return the default value of the tag isn't set.
-        """
-
-    def getTaggedValueTags():
-        """Returns a list of all tags."""
-
-    def setTaggedValue(tag, value):
-        """Associates 'value' with 'key'."""
-
-
-class IAttribute(IElement):
-    """Attribute descriptors"""
-
-    interface = Attribute('interface',
-                          'Stores the interface instance in which the '
-                          'attribute is located.')
-
-
-class IMethod(IAttribute):
-    """Method attributes
-    """
-
-    def getSignatureInfo():
-        """Returns the signature information.
-
-        This method returns a dictionary with the following keys:
-
-        o `positional` - All positional arguments.
-
-        o `required` - A list of all required arguments.
-
-        o `optional` - A list of all optional arguments.
-
-        o `varargs' - The name of the varargs argument.
-
-        o `kwargs` - The name of the kwargs argument.
-        """
-
-    def getSignatureString():
-        """Return a signature string suitable for inclusion in documentation.
-
-        This method returns the function signature string. For example, if you
-        have `func(a, b, c=1, d='f')`, then the signature string is `(a, b,
-        c=1, d='f')`.
-        """
-
-class ISpecification(Interface):
-    """Object Behavioral specifications
-    """
-
-    def extends(other, strict=True):
-        """Test whether a specification extends another
-
-        The specification extends other if it has other as a base
-        interface or if one of it's bases extends other.
-
-        If strict is false, then the specification extends itself.
-        
-        """
-
-    def isOrExtends(other):
-        """Test whether the specification is or extends another
-        """
-
-    def weakref(callback=None):
-        """Return a weakref to the specification
-
-        This method is, regrettably, needed to allow weakrefs to be
-        computed to security-proxied specifications.  While the
-        zope.interface package does not require zope.security or
-        zope.proxy, it has to be able to coexist with it.
-
-        """
-
-    __bases__ = Attribute("""Base specifications
-
-    A tuple if specifications from which this specification is
-    directly derived.
-
-    """)
-
-    __sro__ = Attribute("""Specification-resolution order
-
-    A tuple of the specification and all of it's ancestor
-    specifications from most specific to least specific.
-
-    (This is similar to the method-resolution order for new-style classes.)
-    """)
-
-    def get(name, default=None):
-        """Look up the description for a name
-
-        If the named attribute is not defined, the default is
-        returned.
-        """
-
-        
-class IInterface(ISpecification, IElement):
-    """Interface objects
-
-    Interface objects describe the behavior of an object by containing
-    useful information about the object.  This information includes:
-
-      o Prose documentation about the object.  In Python terms, this
-        is called the "doc string" of the interface.  In this element,
-        you describe how the object works in prose language and any
-        other useful information about the object.
-
-      o Descriptions of attributes.  Attribute descriptions include
-        the name of the attribute and prose documentation describing
-        the attributes usage.
-
-      o Descriptions of methods.  Method descriptions can include:
-
-        o Prose "doc string" documentation about the method and its
-          usage.
-
-        o A description of the methods arguments; how many arguments
-          are expected, optional arguments and their default values,
-          the position or arguments in the signature, whether the
-          method accepts arbitrary arguments and whether the method
-          accepts arbitrary keyword arguments.
-
-      o Optional tagged data.  Interface objects (and their attributes and
-        methods) can have optional, application specific tagged data
-        associated with them.  Examples uses for this are examples,
-        security assertions, pre/post conditions, and other possible
-        information you may want to associate with an Interface or its
-        attributes.
-
-    Not all of this information is mandatory.  For example, you may
-    only want the methods of your interface to have prose
-    documentation and not describe the arguments of the method in
-    exact detail.  Interface objects are flexible and let you give or
-    take any of these components.
-
-    Interfaces are created with the Python class statement using
-    either Interface.Interface or another interface, as in::
-
-      from zope.interface import Interface
-
-      class IMyInterface(Interface):
-        '''Interface documentation
-        '''
-
-        def meth(arg1, arg2):
-            '''Documentation for meth
-            '''
-
-        # Note that there is no self argument
-
-     class IMySubInterface(IMyInterface):
-        '''Interface documentation
-        '''
-
-        def meth2():
-            '''Documentation for meth2
-            '''
-
-    You use interfaces in two ways:
-
-    o You assert that your object implement the interfaces.
-
-      There are several ways that you can assert that an object
-      implements an interface::
-
-      1. Call zope.interface.implements in your class definition.
-
-      2. Call zope.interfaces.directlyProvides on your object.
-
-      3. Call 'zope.interface.classImplements' to assert that instances
-         of a class implement an interface.
-
-         For example::
-
-           from zope.interface import classImplements
-
-           classImplements(some_class, some_interface)
-
-         This approach is useful when it is not an option to modify
-         the class source.  Note that this doesn't affect what the
-         class itself implements, but only what its instances
-         implement.
-
-    o You query interface meta-data. See the IInterface methods and
-      attributes for details.
-
-    """
-
-    def providedBy(object):
-        """Test whether the interface is implemented by the object
-
-        Return true of the object asserts that it implements the
-        interface, including asserting that it implements an extended
-        interface.
-        """
-
-    def implementedBy(class_):
-        """Test whether the interface is implemented by instances of the class
-
-        Return true of the class asserts that its instances implement the
-        interface, including asserting that they implement an extended
-        interface.
-        """
-
-    def names(all=False):
-        """Get the interface attribute names
-
-        Return a sequence of the names of the attributes, including
-        methods, included in the interface definition.
-
-        Normally, only directly defined attributes are included. If
-        a true positional or keyword argument is given, then
-        attributes defined by base classes will be included.
-        """
-
-    def namesAndDescriptions(all=False):
-        """Get the interface attribute names and descriptions
-
-        Return a sequence of the names and descriptions of the
-        attributes, including methods, as name-value pairs, included
-        in the interface definition.
-
-        Normally, only directly defined attributes are included. If
-        a true positional or keyword argument is given, then
-        attributes defined by base classes will be included.
-        """
-
-    def __getitem__(name):
-        """Get the description for a name
-
-        If the named attribute is not defined, a KeyError is raised.
-        """
-
-    def direct(name):
-        """Get the description for the name if it was defined by the interface
-
-        If the interface doesn't define the name, returns None.
-        """
-    
-    def validateInvariants(obj, errors=None):
-        """Validate invariants
-
-        Validate object to defined invariants.  If errors is None,
-        raises first Invalid error; if errors is a list, appends all errors
-        to list, then raises Invalid with the errors as the first element
-        of the "args" tuple."""
-
-    def __contains__(name):
-        """Test whether the name is defined by the interface"""
-
-    def __iter__():
-        """Return an iterator over the names defined by the interface
-
-        The names iterated include all of the names defined by the
-        interface directly and indirectly by base interfaces.
-        """
-
-    __module__ = Attribute("""The name of the module defining the interface""")
-
-class IDeclaration(ISpecification):
-    """Interface declaration
-
-    Declarations are used to express the interfaces implemented by
-    classes or provided by objects.
-    
-    """
-
-    def __contains__(interface):
-        """Test whether an interface is in the specification
-
-        Return true if the given interface is one of the interfaces in
-        the specification and false otherwise.
-        """
-
-    def __iter__():
-        """Return an iterator for the interfaces in the specification
-        """
-
-    def flattened():
-        """Return an iterator of all included and extended interfaces
-
-        An iterator is returned for all interfaces either included in
-        or extended by interfaces included in the specifications
-        without duplicates. The interfaces are in "interface
-        resolution order". The interface resolution order is such that
-        base interfaces are listed after interfaces that extend them
-        and, otherwise, interfaces are included in the order that they
-        were defined in the specification.
-        """
-
-    def __sub__(interfaces):
-        """Create an interface specification with some interfaces excluded
-
-        The argument can be an interface or an interface
-        specifications.  The interface or interfaces given in a
-        specification are subtracted from the interface specification.
-
-        Removing an interface that is not in the specification does
-        not raise an error. Doing so has no effect.
-
-        Removing an interface also removes sub-interfaces of the interface.
-
-        """
-
-    def __add__(interfaces):
-        """Create an interface specification with some interfaces added
-
-        The argument can be an interface or an interface
-        specifications.  The interface or interfaces given in a
-        specification are added to the interface specification.
-
-        Adding an interface that is already in the specification does
-        not raise an error. Doing so has no effect.
-        """
-
-    def __nonzero__():
-        """Return a true value of the interface specification is non-empty
-        """
-
-class IInterfaceDeclaration(Interface):
-    """Declare and check the interfaces of objects
-
-    The functions defined in this interface are used to declare the
-    interfaces that objects provide and to query the interfaces that have
-    been declared.
-
-    Interfaces can be declared for objects in two ways:
-
-    - Interfaces are declared for instances of the object's class
-
-    - Interfaces are declared for the object directly.
-
-    The interfaces declared for an object are, therefore, the union of
-    interfaces declared for the object directly and the interfaces
-    declared for instances of the object's class.
-
-    Note that we say that a class implements the interfaces provided
-    by it's instances.  An instance can also provide interfaces
-    directly.  The interfaces provided by an object are the union of
-    the interfaces provided directly and the interfaces implemented by
-    the class.
-    """
-
-    def providedBy(ob):
-        """Return the interfaces provided by an object
-
-        This is the union of the interfaces directly provided by an
-        object and interfaces implemented by it's class.
-
-        The value returned is an IDeclaration.
-        """
-
-    def implementedBy(class_):
-        """Return the interfaces implemented for a class' instances
-
-        The value returned is an IDeclaration.
-        """
-
-    def classImplements(class_, *interfaces):
-        """Declare additional interfaces implemented for instances of a class
-
-        The arguments after the class are one or more interfaces or
-        interface specifications (IDeclaration objects).
-
-        The interfaces given (including the interfaces in the
-        specifications) are added to any interfaces previously
-        declared.
-
-        Consider the following example::
-
-          class C(A, B):
-             ...
-
-          classImplements(C, I1, I2)
-
-
-        Instances of ``C`` provide ``I1``, ``I2``, and whatever interfaces
-        instances of ``A`` and ``B`` provide.
-        """
-
-    def implementer(*interfaces):
-        """Create a decorator for declaring interfaces implemented by a facory
-
-        A callable is returned that makes an implements declaration on
-        objects passed to it.
-        
-        """
-
-    def classImplementsOnly(class_, *interfaces):
-        """Declare the only interfaces implemented by instances of a class
-
-        The arguments after the class are one or more interfaces or
-        interface specifications (IDeclaration objects).
-
-        The interfaces given (including the interfaces in the
-        specifications) replace any previous declarations.
-
-        Consider the following example::
-
-          class C(A, B):
-             ...
-
-          classImplements(C, IA, IB. IC)
-          classImplementsOnly(C. I1, I2)
-
-        Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
-        whatever interfaces instances of ``A`` and ``B`` implement.
-        """
-
-    def directlyProvidedBy(object):
-        """Return the interfaces directly provided by the given object
-
-        The value returned is an IDeclaration.
-        """
-
-    def directlyProvides(object, *interfaces):
-        """Declare interfaces declared directly for an object
-
-        The arguments after the object are one or more interfaces or
-        interface specifications (IDeclaration objects).
-
-        The interfaces given (including the interfaces in the
-        specifications) replace interfaces previously
-        declared for the object.
-
-        Consider the following example::
-
-          class C(A, B):
-             ...
-
-          ob = C()
-          directlyProvides(ob, I1, I2)
-
-        The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
-        instances have been declared for instances of ``C``.
-
-        To remove directly provided interfaces, use ``directlyProvidedBy`` and
-        subtract the unwanted interfaces. For example::
-
-          directlyProvides(ob, directlyProvidedBy(ob)-I2)
-
-        removes I2 from the interfaces directly provided by
-        ``ob``. The object, ``ob`` no longer directly provides ``I2``,
-        although it might still provide ``I2`` if it's class
-        implements ``I2``.
-
-        To add directly provided interfaces, use ``directlyProvidedBy`` and
-        include additional interfaces.  For example::
-
-          directlyProvides(ob, directlyProvidedBy(ob), I2)
-
-        adds I2 to the interfaces directly provided by ob.
-        """
-
-    def implements(*interfaces):
-        """Declare interfaces implemented by instances of a class
-
-        This function is called in a class definition.
-
-        The arguments are one or more interfaces or interface
-        specifications (IDeclaration objects).
-
-        The interfaces given (including the interfaces in the
-        specifications) are added to any interfaces previously
-        declared.
-
-        Previous declarations include declarations for base classes
-        unless implementsOnly was used.
-
-        This function is provided for convenience. It provides a more
-        convenient way to call classImplements. For example::
-
-          implements(I1)
-
-        is equivalent to calling::
-
-          classImplements(C, I1)
-
-        after the class has been created.
-
-        Consider the following example::
-
-          class C(A, B):
-            implements(I1, I2)
-
-
-        Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
-        instances of ``A`` and ``B`` implement.
-        """
-
-    def implementsOnly(*interfaces):
-        """Declare the only interfaces implemented by instances of a class
-
-        This function is called in a class definition.
-
-        The arguments are one or more interfaces or interface
-        specifications (IDeclaration objects).
-
-        Previous declarations including declarations for base classes
-        are overridden.
-
-        This function is provided for convenience. It provides a more
-        convenient way to call classImplementsOnly. For example::
-
-          implementsOnly(I1)
-
-        is equivalent to calling::
-
-          classImplementsOnly(I1)
-
-        after the class has been created.
-
-        Consider the following example::
-
-          class C(A, B):
-            implementsOnly(I1, I2)
-
-
-        Instances of ``C`` implement ``I1``, ``I2``, regardless of what
-        instances of ``A`` and ``B`` implement.
-        """
-
-    def classProvides(*interfaces):
-        """Declare interfaces provided directly by a class
-
-        This function is called in a class definition.
-
-        The arguments are one or more interfaces or interface
-        specifications (IDeclaration objects).
-
-        The given interfaces (including the interfaces in the
-        specifications) are used to create the class's direct-object
-        interface specification.  An error will be raised if the module
-        class has an direct interface specification.  In other words, it is
-        an error to call this function more than once in a class
-        definition.
-
-        Note that the given interfaces have nothing to do with the
-        interfaces implemented by instances of the class.
-
-        This function is provided for convenience. It provides a more
-        convenient way to call directlyProvides for a class. For example::
-
-          classProvides(I1)
-
-        is equivalent to calling::
-
-          directlyProvides(theclass, I1)
-
-        after the class has been created.
-        """
-
-    def moduleProvides(*interfaces):
-        """Declare interfaces provided by a module
-
-        This function is used in a module definition.
-
-        The arguments are one or more interfaces or interface
-        specifications (IDeclaration objects).
-
-        The given interfaces (including the interfaces in the
-        specifications) are used to create the module's direct-object
-        interface specification.  An error will be raised if the module
-        already has an interface specification.  In other words, it is
-        an error to call this function more than once in a module
-        definition.
-
-        This function is provided for convenience. It provides a more
-        convenient way to call directlyProvides for a module. For example::
-
-          moduleImplements(I1)
-
-        is equivalent to::
-
-          directlyProvides(sys.modules[__name__], I1)
-        """
-
-    def Declaration(*interfaces):
-        """Create an interface specification
-
-        The arguments are one or more interfaces or interface
-        specifications (IDeclaration objects).
-
-        A new interface specification (IDeclaration) with
-        the given interfaces is returned.
-        """
-
-class IAdapterRegistry(Interface):
-    """Provide an interface-based registry for adapters
-
-    This registry registers objects that are in some sense "from" a
-    sequence of specification to an interface and a name.
-
-    No specific semantics are assumed for the registered objects,
-    however, the most common application will be to register factories
-    that adapt objects providing required specifications to a provided
-    interface. 
-    
-    """
-
-    def register(required, provided, name, value):
-        """Register a value
-
-        A value is registered for a *sequence* of required specifications, a
-        provided interface, and a name.
-        """
-
-    def lookup(required, provided, name, default=None):
-        """Lookup a value
-
-        A value is looked up based on a *sequence* of required
-        specifications, a provided interface, and a name.
-        """
-
-    def lookupAll(required, provided):
-        """Find all adapters from the required to the provided interfaces
-
-        An iterable object is returned that provides name-value two-tuples.
-        """
-
-    def names(required, provided):
-        """Return the names for which there are registered objects
-        """
-
-    def subscribe(required, provided, subscriber):
-        """Register a subscriber
-
-        A subscriber is registered for a *sequence* of required
-        specifications, a provided interface, and a name.
-
-        Multiple subscribers may be registered for the same (or
-        equivalent) interfaces.
-        """
-
-    def subscriptions(required, provided):
-        """Get a sequence of subscribers
-
-        Subscribers for a *sequence* of required interfaces, and a provided
-        interface are returned.
-        """
-    
diff --git a/branches/bug1734/src/zope/interface/ro.py b/branches/bug1734/src/zope/interface/ro.py
deleted file mode 100644
index 01526bfa..00000000
--- a/branches/bug1734/src/zope/interface/ro.py
+++ /dev/null
@@ -1,63 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Compute a resolution order for an object and it's bases
-
-$Id$
-"""
-
-def ro(object):
-    """Compute a "resolution order" for an object
-    """
-    return mergeOrderings([_flatten(object, [])])
-
-def mergeOrderings(orderings, seen=None):
-    """Merge multiple orderings so that within-ordering order is preserved
-
-    Orderings are constrained in such a way that if an object appears
-    in two or more orderings, then the suffix that begins with the
-    object must be in both orderings.
-
-    For example:
-
-    >>> _mergeOrderings([
-    ... ['x', 'y', 'z'],
-    ... ['q', 'z'],
-    ... [1, 3, 5],
-    ... ['z']
-    ... ])
-    ['x', 'y', 'q', 1, 3, 5, 'z']
-
-    """
-
-    if seen is None:
-        seen = {}
-    result = []
-    orderings.reverse()
-    for ordering in orderings:
-        ordering = list(ordering)
-        ordering.reverse()
-        for o in ordering:
-            if o not in seen:
-                seen[o] = 1
-                result.append(o)
-
-    result.reverse()
-    return result
-
-def _flatten(ob, result):
-    result.append(ob)
-    for base in ob.__bases__:
-        _flatten(base, result)
-
-    return result
diff --git a/branches/bug1734/src/zope/interface/tests/__init__.py b/branches/bug1734/src/zope/interface/tests/__init__.py
deleted file mode 100644
index b711d360..00000000
--- a/branches/bug1734/src/zope/interface/tests/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-#
-# This file is necessary to make this directory a package.
diff --git a/branches/bug1734/src/zope/interface/tests/dummy.py b/branches/bug1734/src/zope/interface/tests/dummy.py
deleted file mode 100644
index f4a4f9d6..00000000
--- a/branches/bug1734/src/zope/interface/tests/dummy.py
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Dummy Module
-
-$Id$
-"""
-from zope.interface import moduleProvides
-from zope.interface.tests.ifoo import IFoo
-from zope.interface import moduleProvides
-
-moduleProvides(IFoo)
-
-def bar(baz):
-    pass
diff --git a/branches/bug1734/src/zope/interface/tests/foodforthought.txt b/branches/bug1734/src/zope/interface/tests/foodforthought.txt
deleted file mode 100644
index 45d961be..00000000
--- a/branches/bug1734/src/zope/interface/tests/foodforthought.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-================================
-Food-based subscription examples
-================================
-
-
-This file gives more subscription examples using a cooking-based example::
-
-    >>> from zope.interface.adapter import AdapterRegistry
-    >>> registry = AdapterRegistry()
-
-    >>> import zope.interface
-    >>> class IAnimal(zope.interface.Interface):
-    ...     pass
-    >>> class IPoultry(IAnimal):
-    ...     pass
-    >>> class IChicken(IPoultry):
-    ...     pass
-    >>> class ISeafood(IAnimal):
-    ...     pass
-
-Adapting to some other interface for which there is no
-subscription adapter returns an empty sequence::
-
-    >>> class IRecipe(zope.interface.Interface):
-    ...     pass
-    >>> class ISausages(IRecipe):
-    ...     pass
-    >>> class INoodles(IRecipe):
-    ...     pass
-    >>> class IKFC(IRecipe):
-    ...     pass
-
-    >>> list(registry.subscriptions([IPoultry], IRecipe))
-    []
-
-unless we define a subscription::
-
-    >>> registry.subscribe([IAnimal], ISausages, 'sausages')
-    >>> list(registry.subscriptions([IPoultry], ISausages))
-    ['sausages']
-
-And define another subscription adapter::
-
-    >>> registry.subscribe([IPoultry], INoodles, 'noodles')
-    >>> meals = list(registry.subscriptions([IPoultry], IRecipe))
-    >>> meals.sort()
-    >>> meals
-    ['noodles', 'sausages']
-
-    >>> registry.subscribe([IChicken], IKFC, 'kfc')
-    >>> meals = list(registry.subscriptions([IChicken], IRecipe))
-    >>> meals.sort()
-    >>> meals
-    ['kfc', 'noodles', 'sausages']
-
-And the answer for poultry hasn't changed::
-
-    >>> meals = list(registry.subscriptions([IPoultry], IRecipe))
-    >>> meals.sort()
-    >>> meals
-    ['noodles', 'sausages']
diff --git a/branches/bug1734/src/zope/interface/tests/ifoo.py b/branches/bug1734/src/zope/interface/tests/ifoo.py
deleted file mode 100644
index 6ae22318..00000000
--- a/branches/bug1734/src/zope/interface/tests/ifoo.py
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""IFoo test module
-
-$Id$
-"""
-from zope.interface import Interface
-
-class IFoo(Interface):
-    """
-        Dummy interface for unit tests.
-    """
-
-    def bar(baz):
-        """
-            Just a note.
-        """
diff --git a/branches/bug1734/src/zope/interface/tests/m1.py b/branches/bug1734/src/zope/interface/tests/m1.py
deleted file mode 100644
index 86adad2e..00000000
--- a/branches/bug1734/src/zope/interface/tests/m1.py
+++ /dev/null
@@ -1,23 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test module that declares an interface
-
-$Id$
-"""
-from zope.interface import Interface, moduleProvides
-
-class I1(Interface): pass
-class I2(Interface): pass
-
-moduleProvides(I1, I2)
diff --git a/branches/bug1734/src/zope/interface/tests/m2.py b/branches/bug1734/src/zope/interface/tests/m2.py
deleted file mode 100644
index 16762dd4..00000000
--- a/branches/bug1734/src/zope/interface/tests/m2.py
+++ /dev/null
@@ -1,17 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test module that doesn't declare an interface
-
-$Id$
-"""
diff --git a/branches/bug1734/src/zope/interface/tests/odd.py b/branches/bug1734/src/zope/interface/tests/odd.py
deleted file mode 100644
index db79da27..00000000
--- a/branches/bug1734/src/zope/interface/tests/odd.py
+++ /dev/null
@@ -1,129 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Odd meta class that doesn't subclass type.
-
-This is used for testing support for ExtensionClass in new interfaces.
-
-  >>> class A(object):
-  ...     __metaclass__ = MetaClass
-  ...     a = 1
-  ...
-  >>> A.__name__
-  'A'
-  >>> A.__bases__
-  ()
-  >>> class B(object):
-  ...     __metaclass__ = MetaClass
-  ...     b = 1
-  ...
-  >>> class C(A, B): pass
-  ...
-  >>> C.__name__
-  'C'
-  >>> int(C.__bases__ == (A, B))
-  1
-  >>> a = A()
-  >>> aa = A()
-  >>> a.a
-  1
-  >>> aa.a
-  1
-  >>> aa.a = 2
-  >>> a.a
-  1
-  >>> aa.a
-  2
-  >>> c = C()
-  >>> c.a
-  1
-  >>> c.b
-  1
-  >>> c.b = 2
-  >>> c.b
-  2
-  >>> C.c = 1
-  >>> c.c
-  1
-  >>> from types import ClassType
-  >>> int(isinstance(C, (type, ClassType)))
-  0
-  >>> int(C.__class__.__class__ is C.__class__)
-  1
-
-$Id$
-"""
-
-# class OddClass is an odd meta class
-
-class MetaMetaClass(type):
-
-    def __getattribute__(self, name):
-        if name == '__class__':
-            return self
-        return type.__getattribute__(self, name)
-    
-
-class MetaClass(object):
-    """Odd classes
-    """
-    __metaclass__ = MetaMetaClass
-
-    def __init__(self, name, bases, dict):
-        self.__name__ = name
-        self.__bases__ = bases
-        self.__dict__.update(dict)
-
-    def __call__(self):
-        return OddInstance(self)
-
-    def __getattr__(self, name):
-        for b in self.__bases__:
-            v = getattr(b, name, self)
-            if v is not self:
-                return v
-        raise AttributeError, name
-
-    def __repr__(self):
-        return "<odd class %s at %s>" % (self.__name__, hex(id(self)))
-
-class OddInstance(object):
-
-    def __init__(self, cls):
-        self.__dict__['__class__'] = cls
-
-    def __getattribute__(self, name):
-        dict = object.__getattribute__(self, '__dict__')
-        if name == '__dict__':
-            return dict
-        v = dict.get(name, self)
-        if v is not self:
-            return v
-        return getattr(dict['__class__'], name)
-
-    def __setattr__(self, name, v):
-        self.__dict__[name] = v
-
-    def __delattr__(self, name):
-        del self.__dict__[name]
-
-    def __repr__(self):
-        return "<odd %s instance at %s>" % (
-            self.__class__.__name__, hex(id(self)))
-        
-
-
-# DocTest:
-if __name__ == "__main__":
-    import doctest, __main__
-    doctest.testmod(__main__, isprivate=lambda *a: False)
diff --git a/branches/bug1734/src/zope/interface/tests/test_adapter.py b/branches/bug1734/src/zope/interface/tests/test_adapter.py
deleted file mode 100644
index f6328a4b..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_adapter.py
+++ /dev/null
@@ -1,297 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Adapter registry tests
-
-$Id$
-"""
-import unittest
-import zope.interface
-from zope.interface.adapter import AdapterRegistry
-import zope.interface
-
-class IF0(zope.interface.Interface):
-    pass
-class IF1(IF0):
-    pass
-
-class IB0(zope.interface.Interface):
-    pass
-class IB1(IB0):
-    pass
-
-class IR0(zope.interface.Interface):
-    pass
-class IR1(IR0):
-    pass
-
-
-def test_orderwith():
-    """
-    >>> Interface = zope.interface.Interface
-    >>> bywith = {(Interface, Interface): 'A0',
-    ...           (IF0,       Interface): 'A1', 
-    ...           (Interface, IB0):       'A2', 
-    ...           (IF0,       IB0):       'A3', 
-    ...           (IF1,       IB0):       'A4', 
-    ...           (IF0,       IB1):       'A5', 
-    ...           (IF1,       IB1):       'A6', 
-    ...          }
-
-    >>> [value for spec, value in zope.interface.adapter.orderwith(bywith)]
-    ['A6', 'A4', 'A5', 'A3', 'A1', 'A2', 'A0']
-    """
-
-
-def test_multi_adapter_get_best_match():
-    """
-    >>> registry = AdapterRegistry()
-
-    >>> class IB2(IB0):
-    ...     pass
-    >>> class IB3(IB2, IB1):
-    ...     pass
-    >>> class IB4(IB1, IB2):
-    ...     pass
-
-    >>> registry.register([None, IB1], IR0, '', 'A1')
-    >>> registry.register([None, IB0], IR0, '', 'A0')
-    >>> registry.register([None, IB2], IR0, '', 'A2')
-
-    >>> registry.lookup((IF1, IB1), IR0, '')
-    'A1'
-    >>> registry.lookup((IF1, IB2), IR0, '')
-    'A2'
-    >>> registry.lookup((IF1, IB0), IR0, '')
-    'A0'
-    >>> registry.lookup((IF1, IB3), IR0, '')
-    'A2'
-    >>> registry.lookup((IF1, IB4), IR0, '')
-    'A1'
-    """
-
-def test_multi_adapter_lookupAll_get_best_matches():
-    """
-    >>> registry = AdapterRegistry()
-
-    >>> class IB2(IB0):
-    ...     pass
-    >>> class IB3(IB2, IB1):
-    ...     pass
-    >>> class IB4(IB1, IB2):
-    ...     pass
-
-    >>> registry.register([None, IB1], IR0, '', 'A1')
-    >>> registry.register([None, IB0], IR0, '', 'A0')
-    >>> registry.register([None, IB2], IR0, '', 'A2')
-
-    >>> registry.lookupAll((IF1, IB1), IR0).next()[1]
-    'A1'
-    >>> registry.lookupAll((IF1, IB2), IR0).next()[1]
-    'A2'
-    >>> registry.lookupAll((IF1, IB0), IR0).next()[1]
-    'A0'
-    >>> registry.lookupAll((IF1, IB3), IR0).next()[1]
-    'A2'
-    >>> registry.lookupAll((IF1, IB4), IR0).next()[1]
-    'A1'
-    """
-
-
-def test_multi_adapter_w_default():
-    """
-    >>> registry = AdapterRegistry()
-    
-    >>> registry.register([None, None], IB1, 'bob', 'A0')
-
-    >>> registry.lookup((IF1, IR1), IB0, 'bob')
-    'A0'
-    
-    >>> registry.register([None, IR0], IB1, 'bob', 'A1')
-
-    >>> registry.lookup((IF1, IR1), IB0, 'bob')
-    'A1'
-    
-    >>> registry.lookup((IF1, IR1), IB0, 'bruce')
-
-    >>> registry.register([None, IR1], IB1, 'bob', 'A2')
-    >>> registry.lookup((IF1, IR1), IB0, 'bob')
-    'A2'
-    """
-
-def test_multi_adapter_w_inherited_and_multiple_registrations():
-    """
-    >>> registry = AdapterRegistry()
-
-    >>> class IX(zope.interface.Interface):
-    ...    pass
-
-    >>> registry.register([IF0, IR0], IB1, 'bob', 'A1')
-    >>> registry.register([IF1, IX], IB1, 'bob', 'AX')
-
-    >>> registry.lookup((IF1, IR1), IB0, 'bob')
-    'A1'
-    """
-
-def test_named_adapter_with_default():
-    """Query a named simple adapter
-
-    >>> registry = AdapterRegistry()
-
-    If we ask for a named adapter, we won't get a result unless there
-    is a named adapter, even if the object implements the interface:
-
-    >>> registry.lookup([IF1], IF0, 'bob')
-
-    >>> registry.register([None], IB1, 'bob', 'A1')
-    >>> registry.lookup([IF1], IB0, 'bob')
-    'A1'
-
-    >>> registry.lookup([IF1], IB0, 'bruce')
-
-    >>> registry.register([None], IB0, 'bob', 'A2')
-    >>> registry.lookup([IF1], IB0, 'bob')
-    'A2'
-    """
-
-def test_multi_adapter_gets_closest_provided():
-    """
-    >>> registry = AdapterRegistry()
-    >>> registry.register([IF1, IR0], IB0, 'bob', 'A1')
-    >>> registry.register((IF1, IR0), IB1, 'bob', 'A2')
-    >>> registry.lookup((IF1, IR1), IB0, 'bob')
-    'A1'
-
-    >>> registry = AdapterRegistry()
-    >>> registry.register([IF1, IR0], IB1, 'bob', 'A2')
-    >>> registry.register([IF1, IR0], IB0, 'bob', 'A1')
-    >>> registry.lookup([IF1, IR0], IB0, 'bob')
-    'A1'
-
-    >>> registry = AdapterRegistry()
-    >>> registry.register([IF1, IR0], IB0, 'bob', 'A1')
-    >>> registry.register([IF1, IR1], IB1, 'bob', 'A2')
-    >>> registry.lookup([IF1, IR1], IB0, 'bob')
-    'A2'
-
-    >>> registry = AdapterRegistry()
-    >>> registry.register([IF1, IR1], IB1, 'bob', 2)
-    >>> registry.register([IF1, IR0], IB0, 'bob', 1)
-    >>> registry.lookup([IF1, IR1], IB0, 'bob')
-    2
-    """
-
-def test_multi_adapter_check_non_default_dont_hide_default():
-    """
-    >>> registry = AdapterRegistry()
-
-    >>> class IX(zope.interface.Interface):
-    ...     pass
-
-    
-    >>> registry.register([None, IR0], IB0, 'bob', 1)
-    >>> registry.register([IF1,   IX], IB0, 'bob', 2)
-    >>> registry.lookup([IF1, IR1], IB0, 'bob')
-    1
-    """
-
-def test_adapter_hook_with_factory_producing_None():
-    """
-    >>> registry = AdapterRegistry()
-    >>> default = object()
-    
-    >>> class Object1(object):
-    ...     zope.interface.implements(IF0)
-    >>> class Object2(object):
-    ...     zope.interface.implements(IF0)
-
-    >>> def factory(context):
-    ...     if isinstance(context, Object1):
-    ...         return 'adapter'
-    ...     return None
-
-    >>> registry.register([IF0], IB0, '', factory)
-
-    >>> registry.adapter_hook(IB0, Object1())
-    'adapter'
-    >>> registry.adapter_hook(IB0, Object2()) is None
-    True
-    >>> registry.adapter_hook(IB0, Object2(), default=default) is default
-    True
-    """
-
-def test_adapter_registry_update_upon_interface_bases_change():
-    """
-    Let's first create a adapter registry and a simple adaptation hook:
-
-    >>> globalRegistry = AdapterRegistry()
-
-    >>> def _hook(iface, ob, lookup=globalRegistry.lookup1):
-    ...     factory = lookup(zope.interface.providedBy(ob), iface)
-    ...     if factory is None:
-    ...         return None
-    ...     else:
-    ...         return factory(ob)
-
-    >>> zope.interface.interface.adapter_hooks.append(_hook)
-
-    Now we create some interfaces and an implementation:
-    
-    >>> class IX(zope.interface.Interface):
-    ...   pass
-
-    >>> class IY(zope.interface.Interface):
-    ...   pass
-
-    >>> class X(object):
-    ...  pass
-
-    >>> class Y(object):
-    ...  zope.interface.implements(IY)
-    ...  def __init__(self, original):
-    ...   self.original=original
-
-    and register an adapter:
-    
-    >>> globalRegistry.register((IX,), IY, '', Y)
-
-    at first, we still expect the adapter lookup from `X` to `IY` to fail:
-    
-    >>> IY(X()) #doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
-    Traceback (most recent call last):
-    ...
-    TypeError: ('Could not adapt',
-                <zope.interface.tests.test_adapter.X object at ...>,
-                <InterfaceClass zope.interface.tests.test_adapter.IY>)
-
-    But after we declare an interface on the class `X`, it should pass:
-
-    >>> zope.interface.classImplementsOnly(X, IX)
-
-    >>> IY(X()) #doctest: +ELLIPSIS
-    <zope.interface.tests.test_adapter.Y object at ...>
-
-    >>> hook = zope.interface.interface.adapter_hooks.pop()
-    """
-
-def test_suite():
-    from zope.testing import doctest, doctestunit
-    return unittest.TestSuite((
-        doctestunit.DocFileSuite('../adapter.txt', '../human.txt',
-                                 'foodforthought.txt',
-                                 globs={'__name__': '__main__'}),
-        doctest.DocTestSuite(),
-        ))
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/zope/interface/tests/test_advice.py b/branches/bug1734/src/zope/interface/tests/test_advice.py
deleted file mode 100644
index b964aa7b..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_advice.py
+++ /dev/null
@@ -1,177 +0,0 @@
-
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for advice
-
-This module was adapted from 'protocols.tests.advice', part of the Python
-Enterprise Application Kit (PEAK).  Please notify the PEAK authors
-(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or
-Zope-specific changes are required, so that the PEAK version of this module
-can be kept in sync.
-
-PEAK is a Python application framework that interoperates with (but does
-not require) Zope 3 and Twisted.  It provides tools for manipulating UML
-models, object-relational persistence, aspect-oriented programming, and more.
-Visit the PEAK home page at http://peak.telecommunity.com for more information.
-
-$Id$
-"""
-
-from unittest import TestCase, makeSuite, TestSuite
-from zope.interface.advice import *
-from types import ClassType
-import sys
-
-def ping(log, value):
-
-    def pong(klass):
-        log.append((value,klass))
-        return [klass]
-
-    addClassAdvisor(pong)
-
-class ClassicClass:
-    __metaclass__ = ClassType
-    classLevelFrameInfo = getFrameInfo(sys._getframe())
-
-class NewStyleClass:
-    __metaclass__ = type
-    classLevelFrameInfo = getFrameInfo(sys._getframe())
-
-moduleLevelFrameInfo = getFrameInfo(sys._getframe())
-
-class FrameInfoTest(TestCase):
-
-    classLevelFrameInfo = getFrameInfo(sys._getframe())
-
-    def checkModuleInfo(self):
-        kind, module, f_locals, f_globals = moduleLevelFrameInfo
-        self.assertEquals(kind, "module")
-        for d in module.__dict__, f_locals, f_globals:
-            self.assert_(d is globals())
-
-    def checkClassicClassInfo(self):
-        kind, module, f_locals, f_globals = ClassicClass.classLevelFrameInfo
-        self.assertEquals(kind, "class")
-
-        self.assert_(f_locals is ClassicClass.__dict__)  # ???
-        for d in module.__dict__, f_globals:
-            self.assert_(d is globals())
-
-    def checkNewStyleClassInfo(self):
-        kind, module, f_locals, f_globals = NewStyleClass.classLevelFrameInfo
-        self.assertEquals(kind, "class")
-
-        for d in module.__dict__, f_globals:
-            self.assert_(d is globals())
-
-    def checkCallInfo(self):
-        kind, module, f_locals, f_globals = getFrameInfo(sys._getframe())
-        self.assertEquals(kind, "function call")
-        self.assert_(f_locals is locals()) # ???
-        for d in module.__dict__, f_globals:
-            self.assert_(d is globals())
-
-
-class AdviceTests(TestCase):
-
-    def checkOrder(self):
-        log = []
-        class Foo(object):
-            ping(log, 1)
-            ping(log, 2)
-            ping(log, 3)
-
-        # Strip the list nesting
-        for i in 1,2,3:
-            self.assert_(isinstance(Foo, list))
-            Foo, = Foo
-
-        self.assertEquals(log, [(1, Foo), (2, [Foo]), (3, [[Foo]])])
-
-    def XXXcheckOutside(self):
-        # Disabled because the check does not work with doctest tests.
-        try:
-            ping([], 1)
-        except SyntaxError:
-            pass
-        else:
-            raise AssertionError(
-                "Should have detected advice outside class body"
-            )
-
-    def checkDoubleType(self):
-        if sys.hexversion >= 0x02030000:
-            return  # you can't duplicate bases in 2.3
-        class aType(type,type):
-            ping([],1)
-        aType, = aType
-        self.assert_(aType.__class__ is type)
-
-    def checkSingleExplicitMeta(self):
-
-        class M(type):
-            pass
-
-        class C(M):
-            __metaclass__ = M
-            ping([],1)
-
-        C, = C
-        self.assert_(C.__class__ is M)
-
-
-    def checkMixedMetas(self):
-
-        class M1(type): pass
-        class M2(type): pass
-
-        class B1: __metaclass__ = M1
-        class B2: __metaclass__ = M2
-
-        try:
-            class C(B1,B2):
-                ping([],1)
-        except TypeError:
-            pass
-        else:
-            raise AssertionError("Should have gotten incompatibility error")
-
-        class M3(M1,M2): pass
-
-        class C(B1,B2):
-            __metaclass__ = M3
-            ping([],1)
-
-        self.assert_(isinstance(C,list))
-        C, = C
-        self.assert_(isinstance(C,M3))
-
-    def checkMetaOfClass(self):
-
-        class metameta(type):
-            pass
-
-        class meta(type):
-            __metaclass__ = metameta
-
-        self.assertEquals(determineMetaclass((meta, type)), metameta)
-
-TestClasses = (AdviceTests, FrameInfoTest)
-
-def test_suite():
-    return TestSuite([makeSuite(t,'check') for t in TestClasses])
-
-if __name__ == '__main__':
-    unittest.main(defaultTest=test_suite)
diff --git a/branches/bug1734/src/zope/interface/tests/test_declarations.py b/branches/bug1734/src/zope/interface/tests/test_declarations.py
deleted file mode 100644
index 38003249..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_declarations.py
+++ /dev/null
@@ -1,366 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test the new API for making and checking interface declarations
-
-$Id$
-"""
-import unittest
-from zope.interface import *
-from zope.testing.doctestunit import DocTestSuite
-from zope.interface import Interface
-
-class I1(Interface): pass
-class I2(Interface): pass
-class I3(Interface): pass
-class I4(Interface): pass
-class I5(Interface): pass
-
-class A(object):
-    implements(I1)
-class B(object):
-    implements(I2)
-class C(A, B):
-    implements(I3)
-
-class COnly(A, B):
-    implementsOnly(I3)
-
-class COnly_old(A, B):
-    __implemented__ = I3
-    
-class D(COnly):
-    implements(I5)
-    
-def test_ObjectSpecification_Simple():
-    """
-    >>> c = C()
-    >>> directlyProvides(c, I4)
-    >>> [i.__name__ for i in providedBy(c)]
-    ['I4', 'I3', 'I1', 'I2']
-    """
-
-def test_ObjectSpecification_Simple_w_only():
-    """
-    >>> c = COnly()
-    >>> directlyProvides(c, I4)
-    >>> [i.__name__ for i in providedBy(c)]
-    ['I4', 'I3']
-    """
-
-def test_ObjectSpecification_Simple_old_style():
-    """
-    >>> c = COnly_old()
-    >>> directlyProvides(c, I4)
-    >>> [i.__name__ for i in providedBy(c)]
-    ['I4', 'I3']
-    """
-
-
-class Test(unittest.TestCase):
-
-    # Note that most of the tests are in the doc strings of the
-    # declarations module.
-
-    def test_backward_compat(self):
-
-        class C1(object): __implemented__ = I1
-        class C2(C1): __implemented__ = I2, I5
-        class C3(C2): __implemented__ = I3, C2.__implemented__
-
-        self.assert_(C3.__implemented__.__class__ is tuple)
-
-        self.assertEqual(
-            [i.getName() for i in providedBy(C3())],
-            ['I3', 'I2', 'I5'],
-            )
-
-        class C4(C3):
-            implements(I4)
-
-        self.assertEqual(
-            [i.getName() for i in providedBy(C4())],
-            ['I4', 'I3', 'I2', 'I5'],
-            )
-
-        self.assertEqual(
-            [i.getName() for i in C4.__implemented__],
-            ['I4', 'I3', 'I2', 'I5'],
-            )
-
-        # Note that C3.__implemented__ should now be a sequence of interfaces
-        self.assertEqual(
-            [i.getName() for i in C3.__implemented__],
-            ['I3', 'I2', 'I5'],
-            )
-        self.failIf(C3.__implemented__.__class__ is tuple)
-
-    def test_module(self):
-        import zope.interface.tests.m1
-        import zope.interface.tests.m2
-        directlyProvides(zope.interface.tests.m2,
-                         zope.interface.tests.m1.I1,
-                         zope.interface.tests.m1.I2,
-                         )
-        self.assertEqual(list(providedBy(zope.interface.tests.m1)),
-                         list(providedBy(zope.interface.tests.m2)),
-                         )
-
-    def test_builtins(self):
-        # Setup
-
-        intspec = implementedBy(int)
-        olddeclared = intspec.declared
-                
-        classImplements(int, I1)
-        class myint(int):
-            implements(I2)
-
-        x = 42
-        self.assertEqual([i.getName() for i in providedBy(x)],
-                         ['I1'])
-
-        x = myint(42)
-        directlyProvides(x, I3)
-        self.assertEqual([i.getName() for i in providedBy(x)],
-                         ['I3', 'I2', 'I1'])
-
-        # cleanup
-        intspec.declared = olddeclared
-        classImplements(int)
-
-        x = 42
-        self.assertEqual([i.getName() for i in providedBy(x)],
-                         [])
-        
-
-def test_signature_w_no_class_interfaces():
-    """
-    >>> from zope.interface import *
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> list(providedBy(c))
-    []
-    
-    >>> class I(Interface):
-    ...    pass
-    >>> directlyProvides(c, I)
-    >>> list(providedBy(c))  == list(directlyProvidedBy(c))
-    1
-    """
-
-def test_classImplement_on_deeply_nested_classes():
-    """This test is in response to a bug found, which is why it's a bit
-    contrived
-
-    >>> from zope.interface import *
-    >>> class B1(object):
-    ...     pass
-    >>> class B2(B1):
-    ...     pass
-    >>> class B3(B2):
-    ...     pass
-    >>> class D(object):
-    ...     implements()
-    >>> class S(B3, D):
-    ...     implements()
-
-    This failed due to a bug in the code for finding __providedBy__
-    descriptors for old-style classes.
-
-    """
-
-def test_pickle_provides_specs():
-    """
-    >>> from pickle import dumps, loads
-    >>> a = A()
-    >>> I2.providedBy(a)
-    0
-    >>> directlyProvides(a, I2)
-    >>> I2.providedBy(a)
-    1
-    >>> a2 = loads(dumps(a))
-    >>> I2.providedBy(a2)
-    1
-    
-    """
-
-def test_that_we_dont_inherit_class_provides():
-    """
-    >>> class X(object):
-    ...     classProvides(I1)
-    >>> class Y(X):
-    ...     pass
-    >>> [i.__name__ for i in X.__provides__]
-    ['I1']
-    >>> Y.__provides__
-    Traceback (most recent call last):
-    ...
-    AttributeError: __provides__
-    
-    """
-
-def test_that_we_dont_inherit_provides_optimizations():
-    """
-
-    When we make a declaration for a class, we install a __provides__
-    descriptors that provides a default for instances that don't have
-    instance-specific declarations:
-    
-    >>> class A(object):
-    ...     implements(I1)
-
-    >>> class B(object):
-    ...     implements(I2)
-
-    >>> [i.__name__ for i in A().__provides__]
-    ['I1']
-    >>> [i.__name__ for i in B().__provides__]
-    ['I2']
-
-    But it's important that we don't use this for subclasses without
-    declarations.  This would cause incorrect results:
-
-    >>> class X(A, B):
-    ...     pass
-
-    >>> X().__provides__
-    Traceback (most recent call last):
-    ...
-    AttributeError: __provides__
-
-    However, if we "induce" a declaration, by calling implementedBy
-    (even indirectly through providedBy):
-
-    >>> [i.__name__ for i in providedBy(X())]
-    ['I1', 'I2']
-
-
-    then the optimization will work:
-    
-    >>> [i.__name__ for i in X().__provides__]
-    ['I1', 'I2']
-    
-    """
-
-def test_classProvides_before_implements():
-    """Special descriptor for class __provides__
-
-    The descriptor caches the implementedBy info, so that
-    we can get declarations for objects without instance-specific
-    interfaces a bit quicker.
-
-        For example::
-
-          >>> from zope.interface import Interface
-          >>> class IFooFactory(Interface):
-          ...     pass
-          >>> class IFoo(Interface):
-          ...     pass
-          >>> class C(object):
-          ...     classProvides(IFooFactory)
-          ...     implements(IFoo)
-          >>> [i.getName() for i in C.__provides__]
-          ['IFooFactory']
-
-          >>> [i.getName() for i in C().__provides__]
-          ['IFoo']
-    """
-
-def test_getting_spec_for_proxied_builtin_class():
-    """
-
-    In general, we should be able to get a spec
-    for a proxied class if someone has declared or
-    asked for a spec before.
-
-    We don't want to depend on proxies in this (zope.interface)
-    package, but we do want to work with proxies.  Proxies have the
-    effect that a class's __dict__ cannot be gotten. Further, for
-    built-in classes, we can't save, and thus, cannot get, any class
-    attributes.  We'll emulate this by treating a plain object as a class:
-
-      >>> cls = object()
-
-    We'll create an implements specification:
-
-      >>> import zope.interface.declarations
-      >>> impl = zope.interface.declarations.Implements(I1, I2)
-
-    Now, we'll emulate a declaration for a built-in type by putting
-    it in BuiltinImplementationSpecifications:
-
-      >>> zope.interface.declarations.BuiltinImplementationSpecifications[
-      ...   cls] = impl
-
-    Now, we should be able to get it back:
-
-      >>> implementedBy(cls) is impl
-      True
-    
-    """
-
-def test_declaration_get():
-    """
-    We can get definitions from a declaration:
-
-        >>> import zope.interface
-        >>> class I1(zope.interface.Interface):
-        ...    a11 = zope.interface.Attribute('a11')
-        ...    a12 = zope.interface.Attribute('a12')
-        >>> class I2(zope.interface.Interface):
-        ...    a21 = zope.interface.Attribute('a21')
-        ...    a22 = zope.interface.Attribute('a22')
-        ...    a12 = zope.interface.Attribute('a212')
-        >>> class I11(I1):
-        ...    a11 = zope.interface.Attribute('a111')
-
-        >>> decl = Declaration(I11, I2)
-        >>> decl.get('a11') is I11.get('a11')
-        True
-        >>> decl.get('a12') is I1.get('a12')
-        True
-        >>> decl.get('a21') is I2.get('a21')
-        True
-        >>> decl.get('a22') is I2.get('a22')
-        True
-        >>> decl.get('a')
-        >>> decl.get('a', 42)
-        42
-
-    We get None even with no interfaces:
-
-        >>> decl = Declaration()
-        >>> decl.get('a11')
-        >>> decl.get('a11', 42)
-        42
-
-    We get new data if e change interface bases:
-
-        >>> decl.__bases__ = I11, I2
-        >>> decl.get('a11') is I11.get('a11')
-        True
-    """
-
-def test_suite():
-    suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(Test))
-    suite.addTest(DocTestSuite("zope.interface.declarations"))
-    suite.addTest(DocTestSuite())
-    
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/zope/interface/tests/test_document.py b/branches/bug1734/src/zope/interface/tests/test_document.py
deleted file mode 100644
index 67648e3f..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_document.py
+++ /dev/null
@@ -1,71 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Documentation tests.
-
-$Id$
-"""
-from unittest import TestCase, main, makeSuite
-
-from zope.interface import Interface, Attribute
-
-class Test(TestCase):
-
-    def testBlech(self):
-        from zope.interface.document import asStructuredText
-
-        self.assertEqual(asStructuredText(I2), '''\
-I2
-
- I2 doc
-
- This interface extends:
-
-  o _I1
-
- Attributes:
-
-  a1 -- no documentation
-
-  a2 -- a2 doc
-
- Methods:
-
-  f21() -- f21 doc
-
-  f22() -- no documentation
-
-  f23() -- f23 doc
-
-''')
-
-
-def test_suite():
-    return makeSuite(Test)
-
-class _I1(Interface):
-    def f11(): pass
-    def f12(): pass
-
-class I2(_I1):
-    "I2 doc"
-
-    a1 = Attribute('a1')
-    a2 = Attribute('a2', 'a2 doc')
-
-    def f21(): "f21 doc"
-    def f22(): pass
-    def f23(): "f23 doc"
-
-if __name__=='__main__':
-    main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/zope/interface/tests/test_element.py b/branches/bug1734/src/zope/interface/tests/test_element.py
deleted file mode 100644
index 2567e5fb..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_element.py
+++ /dev/null
@@ -1,44 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test Element meta-class.
-
-$Id$
-"""
-
-import unittest
-from zope.interface.interface import Element
-
-class TestElement(unittest.TestCase):
-
-    def test_taggedValues(self):
-        """Test that we can update tagged values of more than one element
-        """
-        
-        e1 = Element("foo")
-        e2 = Element("bar")
-        e1.setTaggedValue("x", 1)
-        e2.setTaggedValue("x", 2)
-        self.assertEqual(e1.getTaggedValue("x"), 1)
-        self.assertEqual(e2.getTaggedValue("x"), 2)
-        
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(TestElement))
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main(defaultTest=test_suite)
diff --git a/branches/bug1734/src/zope/interface/tests/test_interface.py b/branches/bug1734/src/zope/interface/tests/test_interface.py
deleted file mode 100644
index 271defd1..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_interface.py
+++ /dev/null
@@ -1,296 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test Interface implementation
-
-$Id$
-"""
-import unittest
-from zope.testing.doctestunit import DocTestSuite
-from zope.interface.tests.unitfixtures import *  # hehehe
-from zope.interface.exceptions import BrokenImplementation, Invalid
-from zope.interface import implementedBy, providedBy
-from zope.interface import Interface, directlyProvides, Attribute
-
-class InterfaceTests(unittest.TestCase):
-
-    def testInterfaceSetOnAttributes(self):
-        self.assertEqual(FooInterface['foobar'].interface,
-                         FooInterface)
-        self.assertEqual(FooInterface['aMethod'].interface,
-                         FooInterface)
-
-    def testClassImplements(self):
-        self.assert_(IC.implementedBy(C))
-
-        self.assert_(I1.implementedBy(A))
-        self.assert_(I1.implementedBy(B))
-        self.assert_(not I1.implementedBy(C))
-        self.assert_(I1.implementedBy(D))
-        self.assert_(I1.implementedBy(E))
-
-        self.assert_(not I2.implementedBy(A))
-        self.assert_(I2.implementedBy(B))
-        self.assert_(not I2.implementedBy(C))
-
-        # No longer after interfacegeddon
-        # self.assert_(not I2.implementedBy(D))
-
-        self.assert_(not I2.implementedBy(E))
-
-    def testUtil(self):
-        self.assert_(IC in implementedBy(C))
-        self.assert_(I1 in implementedBy(A))
-        self.assert_(not I1 in implementedBy(C))
-        self.assert_(I2 in implementedBy(B))
-        self.assert_(not I2 in implementedBy(C))
-
-        self.assert_(IC in providedBy(C()))
-        self.assert_(I1 in providedBy(A()))
-        self.assert_(not I1 in providedBy(C()))
-        self.assert_(I2 in providedBy(B()))
-        self.assert_(not I2 in providedBy(C()))
-
-
-    def testObjectImplements(self):
-        self.assert_(IC.providedBy(C()))
-
-        self.assert_(I1.providedBy(A()))
-        self.assert_(I1.providedBy(B()))
-        self.assert_(not I1.providedBy(C()))
-        self.assert_(I1.providedBy(D()))
-        self.assert_(I1.providedBy(E()))
-
-        self.assert_(not I2.providedBy(A()))
-        self.assert_(I2.providedBy(B()))
-        self.assert_(not I2.providedBy(C()))
-
-        # Not after interface geddon
-        # self.assert_(not I2.providedBy(D()))
-
-        self.assert_(not I2.providedBy(E()))
-
-    def testDeferredClass(self):
-        a = A()
-        self.assertRaises(BrokenImplementation, a.ma)
-
-
-    def testInterfaceExtendsInterface(self):
-        self.assert_(BazInterface.extends(BobInterface))
-        self.assert_(BazInterface.extends(BarInterface))
-        self.assert_(BazInterface.extends(FunInterface))
-        self.assert_(not BobInterface.extends(FunInterface))
-        self.assert_(not BobInterface.extends(BarInterface))
-        self.assert_(BarInterface.extends(FunInterface))
-        self.assert_(not BarInterface.extends(BazInterface))
-
-    def testVerifyImplementation(self):
-        from zope.interface.verify import verifyClass
-        self.assert_(verifyClass(FooInterface, Foo))
-        self.assert_(Interface.providedBy(I1))
-
-    def test_names(self):
-        names = list(_I2.names()); names.sort()
-        self.assertEqual(names, ['f21', 'f22', 'f23'])
-        names = list(_I2.names(all=True)); names.sort()
-        self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
-
-    def test_namesAndDescriptions(self):
-        names = [nd[0] for nd in _I2.namesAndDescriptions()]; names.sort()
-        self.assertEqual(names, ['f21', 'f22', 'f23'])
-        names = [nd[0] for nd in _I2.namesAndDescriptions(1)]; names.sort()
-        self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
-
-        for name, d in _I2.namesAndDescriptions(1):
-            self.assertEqual(name, d.__name__)
-
-    def test_getDescriptionFor(self):
-        self.assertEqual(_I2.getDescriptionFor('f11').__name__, 'f11')
-        self.assertEqual(_I2.getDescriptionFor('f22').__name__, 'f22')
-        self.assertEqual(_I2.queryDescriptionFor('f33', self), self)
-        self.assertRaises(KeyError, _I2.getDescriptionFor, 'f33')
-
-    def test___getitem__(self):
-        self.assertEqual(_I2['f11'].__name__, 'f11')
-        self.assertEqual(_I2['f22'].__name__, 'f22')
-        self.assertEqual(_I2.get('f33', self), self)
-        self.assertRaises(KeyError, _I2.__getitem__, 'f33')
-
-    def test___contains__(self):
-        self.failUnless('f11' in _I2)
-        self.failIf('f33' in _I2)
-
-    def test___iter__(self):
-        names = list(iter(_I2))
-        names.sort()
-        self.assertEqual(names, ['a1', 'f11', 'f12', 'f21', 'f22', 'f23'])
-
-    def testAttr(self):
-        description = _I2.getDescriptionFor('a1')
-        self.assertEqual(description.__name__, 'a1')
-        self.assertEqual(description.__doc__, 'This is an attribute')
-
-    def testFunctionAttributes(self):
-        # Make sure function attributes become tagged values.
-        meth = _I1['f12']
-        self.assertEqual(meth.getTaggedValue('optional'), 1)
-    
-    def testInvariant(self):
-        # set up
-        o = InvariantC()
-        directlyProvides(o, IInvariant)
-        # a helper
-        def errorsEqual(self, o, error_len, error_msgs, interface=None):
-            if interface is None:
-                interface = IInvariant
-            self.assertRaises(Invalid, interface.validateInvariants, o)
-            e = []
-            try:
-                interface.validateInvariants(o, e)
-            except Invalid, error:
-                self.assertEquals(error.args[0], e)
-            else:
-                self._assert(0) # validateInvariants should always raise 
-                # Invalid
-            self.assertEquals(len(e), error_len)
-            msgs = [error.args[0] for error in e]
-            msgs.sort()
-            for msg in msgs:
-                self.assertEquals(msg, error_msgs.pop(0))
-        # the tests
-        self.assertEquals(IInvariant.getTaggedValue('invariants'), 
-                          [ifFooThenBar])
-        self.assertEquals(IInvariant.validateInvariants(o), None)
-        o.bar = 27
-        self.assertEquals(IInvariant.validateInvariants(o), None)
-        o.foo = 42
-        self.assertEquals(IInvariant.validateInvariants(o), None)
-        del o.bar
-        errorsEqual(self, o, 1, ['If Foo, then Bar!'])
-        # nested interfaces with invariants:
-        self.assertEquals(ISubInvariant.getTaggedValue('invariants'), 
-                          [BarGreaterThanFoo])
-        o = InvariantC()
-        directlyProvides(o, ISubInvariant)
-        o.foo = 42
-        # even though the interface has changed, we should still only have one 
-        # error.
-        errorsEqual(self, o, 1, ['If Foo, then Bar!'], ISubInvariant)
-        # however, if we set foo to 0 (Boolean False) and bar to a negative 
-        # number then we'll get the new error
-        o.foo = 2
-        o.bar = 1
-        errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'], 
-                    ISubInvariant)
-        # and if we set foo to a positive number and boo to 0, we'll
-        # get both errors!
-        o.foo = 1
-        o.bar = 0
-        errorsEqual(self, o, 2, ['If Foo, then Bar!',
-                                 'Please, Boo MUST be greater than Foo!'],
-                    ISubInvariant)
-        # for a happy ending, we'll make the invariants happy
-        o.foo = 1
-        o.bar = 2
-        self.assertEquals(IInvariant.validateInvariants(o), None) # woohoo
-        # now we'll do two invariants on the same interface, 
-        # just to make sure that a small
-        # multi-invariant interface is at least minimally tested.
-        o = InvariantC()
-        directlyProvides(o, IInvariant)
-        o.foo = 42
-        old_invariants = IInvariant.getTaggedValue('invariants')
-        invariants = old_invariants[:]
-        invariants.append(BarGreaterThanFoo) # if you really need to mutate,
-        # then this would be the way to do it.  Probably a bad idea, though. :-)
-        IInvariant.setTaggedValue('invariants', invariants)
-        #
-        # even though the interface has changed, we should still only have one 
-        # error.
-        errorsEqual(self, o, 1, ['If Foo, then Bar!'])
-        # however, if we set foo to 0 (Boolean False) and bar to a negative 
-        # number then we'll get the new error
-        o.foo = 2
-        o.bar = 1
-        errorsEqual(self, o, 1, ['Please, Boo MUST be greater than Foo!'])
-        # and if we set foo to a positive number and boo to 0, we'll
-        # get both errors!
-        o.foo = 1
-        o.bar = 0
-        errorsEqual(self, o, 2, ['If Foo, then Bar!',
-                                 'Please, Boo MUST be greater than Foo!'])
-        # for another happy ending, we'll make the invariants happy again
-        o.foo = 1
-        o.bar = 2
-        self.assertEquals(IInvariant.validateInvariants(o), None) # bliss
-        # clean up
-        IInvariant.setTaggedValue('invariants', old_invariants)
-
-    def test___doc___element(self):
-        class I(Interface):
-            "xxx"
-
-        self.assertEqual(I.__doc__, "xxx")
-        self.assertEqual(list(I), [])
-
-        class I(Interface):
-            "xxx"
-
-            __doc__ = Attribute('the doc')
-
-        self.assertEqual(I.__doc__, "")
-        self.assertEqual(list(I), ['__doc__'])
-
-    def testIssue228(self):
-        # Test for http://collector.zope.org/Zope3-dev/228
-        class I(Interface):
-            "xxx"
-        class Bad:
-            __providedBy__ = None
-        # Old style classes don't have a '__class__' attribute
-        self.failUnlessRaises(AttributeError, I.providedBy, Bad)
-
-
-class _I1(Interface):
-
-    a1 = Attribute("This is an attribute")
-
-    def f11(): pass
-    def f12(): pass
-    f12.optional = 1
-
-class _I1_(_I1): pass
-class _I1__(_I1_): pass
-
-class _I2(_I1__):
-    def f21(): pass
-    def f22(): pass
-    f23 = f22
-
-
-def test_suite():
-    from zope.testing import doctest
-    suite = unittest.makeSuite(InterfaceTests)
-    suite.addTest(doctest.DocTestSuite("zope.interface.interface"))
-    suite.addTest(doctest.DocFileSuite(
-        '../README.txt',
-        globs={'__name__': '__main__'},
-        optionflags=doctest.NORMALIZE_WHITESPACE,
-        ))
-    return suite
-
-def main():
-    unittest.TextTestRunner().run(test_suite())
-
-if __name__=="__main__":
-    main()
diff --git a/branches/bug1734/src/zope/interface/tests/test_odd_declarations.py b/branches/bug1734/src/zope/interface/tests/test_odd_declarations.py
deleted file mode 100644
index 391afb1c..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_odd_declarations.py
+++ /dev/null
@@ -1,204 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test interface declarations against ExtensionClass-like classes.
-
-These tests are to make sure we do something sane in the presense of
-classic ExtensionClass classes and instances.
-
-$Id$
-"""
-import unittest, odd
-from zope.interface import Interface, implements, implementsOnly
-from zope.interface import directlyProvides, providedBy, directlyProvidedBy
-from zope.interface import classImplements, classImplementsOnly, implementedBy
-
-class I1(Interface): pass
-class I2(Interface): pass
-class I3(Interface): pass
-class I31(I3): pass
-class I4(Interface): pass
-class I5(Interface): pass
-
-class Odd(object): __metaclass__ = odd.MetaClass
-
-class B(Odd): __implemented__ = I2
-
-
-# TODO: We are going to need more magic to make classProvides work with odd
-#       classes. This will work in the next iteration. For now, we'll use
-#       a different mechanism.
-
-# from zope.interface import classProvides
-
-class A(Odd):
-    implements(I1)
-
-class C(A, B):
-    implements(I31)
-
-
-class Test(unittest.TestCase):
-
-    def test_ObjectSpecification(self):
-        c = C()
-        directlyProvides(c, I4)
-        self.assertEqual([i.getName() for i in providedBy(c)],
-                         ['I4', 'I31', 'I1', 'I2']
-                         )
-        self.assertEqual([i.getName() for i in providedBy(c).flattened()],
-                         ['I4', 'I31', 'I3', 'I1', 'I2', 'Interface']
-                         )
-        self.assert_(I1 in providedBy(c))
-        self.failIf(I3 in providedBy(c))
-        self.assert_(providedBy(c).extends(I3))
-        self.assert_(providedBy(c).extends(I31))
-        self.failIf(providedBy(c).extends(I5))
-
-        class COnly(A, B):
-            implementsOnly(I31)
-
-        class D(COnly):
-            implements(I5)
-
-        classImplements(D, I5)
-
-        c = D()
-        directlyProvides(c, I4)
-        self.assertEqual([i.getName() for i in providedBy(c)],
-                         ['I4', 'I5', 'I31'])
-        self.assertEqual([i.getName() for i in providedBy(c).flattened()],
-                         ['I4', 'I5', 'I31', 'I3', 'Interface'])
-        self.failIf(I1 in providedBy(c))
-        self.failIf(I3 in providedBy(c))
-        self.assert_(providedBy(c).extends(I3))
-        self.failIf(providedBy(c).extends(I1))
-        self.assert_(providedBy(c).extends(I31))
-        self.assert_(providedBy(c).extends(I5))
-
-        class COnly(A, B): __implemented__ = I31
-        class D(COnly):
-            implements(I5)
-
-        classImplements(D, I5)
-        c = D()
-        directlyProvides(c, I4)
-        self.assertEqual([i.getName() for i in providedBy(c)],
-                         ['I4', 'I5', 'I31'])
-        self.assertEqual([i.getName() for i in providedBy(c).flattened()],
-                         ['I4', 'I5', 'I31', 'I3', 'Interface'])
-        self.failIf(I1 in providedBy(c))
-        self.failIf(I3 in providedBy(c))
-        self.assert_(providedBy(c).extends(I3))
-        self.failIf(providedBy(c).extends(I1))
-        self.assert_(providedBy(c).extends(I31))
-        self.assert_(providedBy(c).extends(I5))
-
-    def test_classImplements(self):
-        class A(Odd):
-          implements(I3)
-
-        class B(Odd):
-          implements(I4)
-
-        class C(A, B):
-          pass
-        classImplements(C, I1, I2)
-        self.assertEqual([i.getName() for i in implementedBy(C)],
-                         ['I1', 'I2', 'I3', 'I4'])
-        classImplements(C, I5)
-        self.assertEqual([i.getName() for i in implementedBy(C)],
-                         ['I1', 'I2', 'I5', 'I3', 'I4'])
-
-    def test_classImplementsOnly(self):
-        class A(Odd):
-            implements(I3)
-
-        class B(Odd):
-            implements(I4)
-
-        class C(A, B):
-          pass
-        classImplementsOnly(C, I1, I2)
-        self.assertEqual([i.__name__ for i in implementedBy(C)],
-                         ['I1', 'I2'])
-
-
-    def test_directlyProvides(self):
-        class IA1(Interface): pass
-        class IA2(Interface): pass
-        class IB(Interface): pass
-        class IC(Interface): pass
-        class A(Odd):
-            implements(IA1, IA2)
-
-        class B(Odd):
-            implements(IB)
-
-        class C(A, B):
-            implements(IC)
-
-
-        ob = C()
-        directlyProvides(ob, I1, I2)
-        self.assert_(I1 in providedBy(ob))
-        self.assert_(I2 in providedBy(ob))
-        self.assert_(IA1 in providedBy(ob))
-        self.assert_(IA2 in providedBy(ob))
-        self.assert_(IB in providedBy(ob))
-        self.assert_(IC in providedBy(ob))
-
-        directlyProvides(ob, directlyProvidedBy(ob)-I2)
-        self.assert_(I1 in providedBy(ob))
-        self.failIf(I2 in providedBy(ob))
-        self.failIf(I2 in providedBy(ob))
-        directlyProvides(ob, directlyProvidedBy(ob), I2)
-        self.assert_(I2 in providedBy(ob))
-
-    def test_directlyProvides_fails_for_odd_class(self):
-        self.assertRaises(TypeError, directlyProvides, C, I5)
-
-    # XXX see above
-    def XXX_test_classProvides_fails_for_odd_class(self):
-        try:
-            class A(Odd):
-                classProvides(I1)
-        except TypeError:
-            pass # Sucess
-        self.assert_(False,
-                     "Shouldn't be able to use directlyProvides on odd class."
-                     )
-
-    def test_implementedBy(self):
-        class I2(I1): pass
-
-        class C1(Odd):
-          implements(I2)
-
-        class C2(C1):
-          implements(I3)
-
-        self.assertEqual([i.getName() for i in implementedBy(C2)],
-                         ['I3', 'I2'])
-
-
-
-
-def test_suite():
-    suite = unittest.TestSuite()
-    suite.addTest(unittest.makeSuite(Test))
-    return suite
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/branches/bug1734/src/zope/interface/tests/test_sorting.py b/branches/bug1734/src/zope/interface/tests/test_sorting.py
deleted file mode 100644
index 31f575d3..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_sorting.py
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test interface sorting
-
-$Id$
-"""
-
-from unittest import TestCase, TestSuite, main, makeSuite
-
-from zope.interface import Interface
-
-class I1(Interface): pass
-class I2(I1): pass
-class I3(I1): pass
-class I4(Interface): pass
-class I5(I4): pass
-class I6(I2): pass
-
-
-class Test(TestCase):
-
-    def test(self):
-        l = [I1, I3, I5, I6, I4, I2]
-        l.sort()
-        self.assertEqual(l, [I1, I2, I3, I4, I5, I6])
-
-    def test_w_None(self):
-        l = [I1, None, I3, I5, None, I6, I4, I2]
-        l.sort()
-        self.assertEqual(l, [I1, I2, I3, I4, I5, I6, None, None])
-
-def test_suite():
-    return TestSuite((
-        makeSuite(Test),
-        ))
-
-if __name__=='__main__':
-    main(defaultTest='test_suite')
diff --git a/branches/bug1734/src/zope/interface/tests/test_verify.py b/branches/bug1734/src/zope/interface/tests/test_verify.py
deleted file mode 100644
index d7f583bc..00000000
--- a/branches/bug1734/src/zope/interface/tests/test_verify.py
+++ /dev/null
@@ -1,196 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Interface Verify tests
-
-$Id$
-"""
-from zope.interface import Interface, implements, classImplements, Attribute
-from zope.interface.verify import verifyClass, verifyObject
-from zope.interface.exceptions import DoesNotImplement, BrokenImplementation
-from zope.interface.exceptions import BrokenMethodImplementation
-
-import unittest
-
-class Test(unittest.TestCase):
-
-    def testNotImplemented(self):
-
-        class C(object): pass
-
-        class I(Interface): pass
-
-        self.assertRaises(DoesNotImplement, verifyClass, I, C)
-
-        classImplements(C, I)
-
-        verifyClass(I, C)
-
-    def testMissingAttr(self):
-
-        class I(Interface):
-            def f(): pass
-
-        class C(object):
-            implements(I)
-
-        self.assertRaises(BrokenImplementation, verifyClass, I, C)
-
-        C.f=lambda self: None
-
-        verifyClass(I, C)
-
-    def testMissingAttr_with_Extended_Interface(self):
-
-        class II(Interface):
-            def f():
-                pass
-
-        class I(II):
-            pass
-
-        class C(object):
-            implements(I)
-
-        self.assertRaises(BrokenImplementation, verifyClass, I, C)
-
-        C.f=lambda self: None
-
-        verifyClass(I, C)
-
-    def testWrongArgs(self):
-
-        class I(Interface):
-            def f(a): pass
-
-        class C(object):
-            def f(self, b): pass
-
-            implements(I)
-
-        # We no longer require names to match.
-        #self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
-
-        C.f=lambda self, a: None
-
-        verifyClass(I, C)
-
-        C.f=lambda self, **kw: None
-
-        self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
-
-        C.f=lambda self, a, *args: None
-
-        verifyClass(I, C)
-
-        C.f=lambda self, a, *args, **kw: None
-
-        verifyClass(I, C)
-
-        C.f=lambda self, *args: None
-
-        verifyClass(I, C)
-
-    def testExtraArgs(self):
-
-        class I(Interface):
-            def f(a): pass
-
-        class C(object):
-            def f(self, a, b): pass
-
-            implements(I)
-
-        self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
-
-        C.f=lambda self, a: None
-
-        verifyClass(I, C)
-
-        C.f=lambda self, a, b=None: None
-
-        verifyClass(I, C)
-
-    def testNoVar(self):
-
-        class I(Interface):
-            def f(a, *args): pass
-
-        class C(object):
-            def f(self, a): pass
-
-            implements(I)
-
-        self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
-
-        C.f=lambda self, a, *foo: None
-
-        verifyClass(I, C)
-
-    def testNoKW(self):
-
-        class I(Interface):
-            def f(a, **args): pass
-
-        class C(object):
-            def f(self, a): pass
-
-            implements(I)
-
-        self.assertRaises(BrokenMethodImplementation, verifyClass, I, C)
-
-        C.f=lambda self, a, **foo: None
-
-        verifyClass(I, C)
-
-    def testModule(self):
-
-        from zope.interface.tests.ifoo import IFoo
-        from zope.interface.tests import dummy
-
-        verifyObject(IFoo, dummy)
-
-    def testMethodForAttr(self):
-        
-        class IFoo(Interface):
-             foo = Attribute("The foo Attribute")
-
-
-        class Foo:
-             implements(IFoo)
-
-             def foo(self):
-                 pass
-
-        verifyClass(IFoo, Foo)
-
-    def testNonMethodForMethod(self):
-
-        class IBar(Interface):
-             def foo():
-                 pass
-
-        class Bar:
-            implements(IBar)
-
-            foo = 1
-
-        self.assertRaises(BrokenMethodImplementation, verifyClass, IBar, Bar)
-        
-
-def test_suite():
-    loader=unittest.TestLoader()
-    return loader.loadTestsFromTestCase(Test)
-
-if __name__=='__main__':
-    unittest.TextTestRunner().run(test_suite())
diff --git a/branches/bug1734/src/zope/interface/tests/unitfixtures.py b/branches/bug1734/src/zope/interface/tests/unitfixtures.py
deleted file mode 100644
index bdd697c7..00000000
--- a/branches/bug1734/src/zope/interface/tests/unitfixtures.py
+++ /dev/null
@@ -1,142 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Unit Test Fixtures
-
-$Id$
-"""
-from zope.interface import Interface, invariant
-from zope.interface.interface import Attribute
-from zope.interface.exceptions import Invalid
-
-class mytest(Interface):
-    pass
-
-class C(object):
-    def m1(self, a, b):
-        "return 1"
-        return 1
-
-    def m2(self, a, b):
-        "return 2"
-        return 2
-
-# testInstancesOfClassImplements
-
-#  YAGNI IC=Interface.impliedInterface(C)
-class IC(Interface):
-    def m1(a, b):
-        "return 1"
-
-    def m2(a, b):
-        "return 2"
-
-
-
-C.__implemented__=IC
-
-class I1(Interface):
-    def ma():
-        "blah"
-
-class I2(I1): pass
-
-class I3(Interface): pass
-
-class I4(Interface): pass
-
-class A(I1.deferred()):
-    __implemented__=I1
-
-class B(object):
-    __implemented__=I2, I3
-
-class D(A, B): pass
-
-class E(A, B):
-    __implemented__ = A.__implemented__, C.__implemented__
-
-
-class FooInterface(Interface):
-    """ This is an Abstract Base Class """
-
-    foobar = Attribute("fuzzed over beyond all recognition")
-
-    def aMethod(foo, bar, bingo):
-        """ This is aMethod """
-
-    def anotherMethod(foo=6, bar="where you get sloshed", bingo=(1,3,)):
-        """ This is anotherMethod """
-
-    def wammy(zip, *argues):
-        """ yadda yadda """
-
-    def useless(**keywords):
-        """ useless code is fun! """
-
-class Foo(object):
-    """ A concrete class """
-
-    __implemented__ = FooInterface,
-
-    foobar = "yeah"
-
-    def aMethod(self, foo, bar, bingo):
-        """ This is aMethod """
-        return "barf!"
-
-    def anotherMethod(self, foo=6, bar="where you get sloshed", bingo=(1,3,)):
-        """ This is anotherMethod """
-        return "barf!"
-
-    def wammy(self, zip, *argues):
-        """ yadda yadda """
-        return "barf!"
-
-    def useless(self, **keywords):
-        """ useless code is fun! """
-        return "barf!"
-
-foo_instance = Foo()
-
-class Blah(object):
-    pass
-
-new = Interface.__class__
-FunInterface = new('FunInterface')
-BarInterface = new('BarInterface', [FunInterface])
-BobInterface = new('BobInterface')
-BazInterface = new('BazInterface', [BobInterface, BarInterface])
-
-# fixtures for invariant tests
-def ifFooThenBar(obj):
-    if getattr(obj, 'foo', None) and not getattr(obj, 'bar', None):
-        raise Invalid('If Foo, then Bar!')
-class IInvariant(Interface):
-    foo = Attribute('foo')
-    bar = Attribute('bar; must eval to Boolean True if foo does')
-    invariant(ifFooThenBar)
-def BarGreaterThanFoo(obj):
-    foo = getattr(obj, 'foo', None)
-    bar = getattr(obj, 'bar', None)
-    if foo is not None and isinstance(foo, type(bar)):
-        # type checking should be handled elsewhere (like, say, 
-        # schema); these invariants should be intra-interface 
-        # constraints.  This is a hacky way to do it, maybe, but you
-        # get the idea
-        if not bar > foo:
-            raise Invalid('Please, Boo MUST be greater than Foo!')
-class ISubInvariant(IInvariant):
-    invariant(BarGreaterThanFoo)
-class InvariantC(object):
-    pass
diff --git a/branches/bug1734/src/zope/interface/verify.py b/branches/bug1734/src/zope/interface/verify.py
deleted file mode 100644
index 34dba0fe..00000000
--- a/branches/bug1734/src/zope/interface/verify.py
+++ /dev/null
@@ -1,111 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Verify interface implementations
-
-$Id$
-"""
-from zope.interface.exceptions import BrokenImplementation, DoesNotImplement
-from zope.interface.exceptions import BrokenMethodImplementation
-from types import FunctionType, MethodType
-from zope.interface.interface import fromMethod, fromFunction, Method
-
-# This will be monkey-patched when running under Zope 2, so leave this
-# here:
-MethodTypes = (MethodType, )
-
-
-def _verify(iface, candidate, tentative=0, vtype=None):
-    """Verify that 'candidate' might correctly implements 'iface'.
-
-    This involves:
-
-      o Making sure the candidate defines all the necessary methods
-
-      o Making sure the methods have the correct signature
-
-      o Making sure the candidate asserts that it implements the interface
-
-    Note that this isn't the same as verifying that the class does
-    implement the interface.
-
-    If optional tentative is true, suppress the "is implemented by" test.
-    """
-
-    if vtype == 'c':
-        tester = iface.implementedBy
-    else:
-        tester = iface.providedBy
-
-    if not tentative and not tester(candidate):
-        raise DoesNotImplement(iface)
-
-    # Here the `desc` is either an `Attribute` or `Method` instance
-    for name, desc in iface.namesAndDescriptions(1):
-        if not hasattr(candidate, name):
-            if (not isinstance(desc, Method)) and vtype == 'c':
-                # We can't verify non-methods on classes, since the
-                # class may provide attrs in it's __init__.
-                continue
-            
-            raise BrokenImplementation(iface, name)
-
-        attr = getattr(candidate, name)
-        if not isinstance(desc, Method):
-            # If it's not a method, there's nothing else we can test
-            continue
-        
-        if isinstance(attr, FunctionType):
-            # should never get here, since classes should not provide functions
-            meth = fromFunction(attr, iface, name=name)
-        elif (isinstance(attr, MethodTypes)
-              and type(attr.im_func) is FunctionType):
-            meth = fromMethod(attr, iface, name)
-        else:
-            if not callable(attr):
-                raise BrokenMethodImplementation(name, "Not a method")
-            # sigh, it's callable, but we don't know how to intrspect it, so
-            # we have to give it a pass.
-            continue
-
-        # Make sure that the required and implemented method signatures are
-        # the same.
-        desc = desc.getSignatureInfo()
-        meth = meth.getSignatureInfo()
-
-        mess = _incompat(desc, meth)
-        if mess:
-            raise BrokenMethodImplementation(name, mess)
-
-    return True
-
-def verifyClass(iface, candidate, tentative=0):
-    return _verify(iface, candidate, tentative, vtype='c')
-
-def verifyObject(iface, candidate, tentative=0):
-    return _verify(iface, candidate, tentative, vtype='o')
-
-def _incompat(required, implemented):
-    #if (required['positional'] !=
-    #    implemented['positional'][:len(required['positional'])]
-    #    and implemented['kwargs'] is None):
-    #    return 'imlementation has different argument names'
-    if len(implemented['required']) > len(required['required']):
-        return 'implementation requires too many arguments'
-    if ((len(implemented['positional']) < len(required['positional']))
-        and not implemented['varargs']):
-        return "implementation doesn't allow enough arguments"
-    if required['kwargs'] and not implemented['kwargs']:
-        return "implementation doesn't support keyword arguments"
-    if required['varargs'] and not implemented['varargs']:
-        return "implementation doesn't support variable arguments"
diff --git a/branches/bug1734/src/zope/proxy/DEPENDENCIES.cfg b/branches/bug1734/src/zope/proxy/DEPENDENCIES.cfg
deleted file mode 100644
index 3c3868d6..00000000
--- a/branches/bug1734/src/zope/proxy/DEPENDENCIES.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-zope.interface
-zope.testing
diff --git a/branches/bug1734/src/zope/proxy/SETUP.cfg b/branches/bug1734/src/zope/proxy/SETUP.cfg
deleted file mode 100644
index 1c9f5cf0..00000000
--- a/branches/bug1734/src/zope/proxy/SETUP.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-# Packaging information for zpkg.
-
-header proxy.h
-
-<extension _zope_proxy_proxy>
-  source      _zope_proxy_proxy.c
-  depends-on  proxy.h
-</extension>
diff --git a/branches/bug1734/src/zope/proxy/__init__.py b/branches/bug1734/src/zope/proxy/__init__.py
deleted file mode 100644
index 9556fe26..00000000
--- a/branches/bug1734/src/zope/proxy/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""More convenience functions for dealing with proxies.
-
-$Id$
-"""
-from zope.interface import moduleProvides
-from zope.proxy.interfaces import IProxyIntrospection
-from types import ClassType
-from zope.proxy._zope_proxy_proxy import *
-from zope.proxy._zope_proxy_proxy import _CAPI
-
-moduleProvides(IProxyIntrospection)
-__all__ = tuple(IProxyIntrospection)
-
-def ProxyIterator(p):
-    yield p
-    while isProxy(p):
-        p = getProxiedObject(p)
-        yield p
diff --git a/branches/bug1734/src/zope/proxy/_zope_proxy_proxy.c b/branches/bug1734/src/zope/proxy/_zope_proxy_proxy.c
deleted file mode 100644
index 3a66486b..00000000
--- a/branches/bug1734/src/zope/proxy/_zope_proxy_proxy.c
+++ /dev/null
@@ -1,1098 +0,0 @@
-/*############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-############################################################################*/
-
-/*
- *   This file is also used as a really extensive macro in
- *   ../app/container/_zope_app_container_contained.c.  If you need to
- *   change this file, you need to "svn copy" it to ../app/container/.
- *
- *   This approach is taken to allow the sources for the two packages
- *   to be compilable when the relative locations of these aren't
- *   related in the same way as they are in a checkout.
- *
- *   This will be revisited in the future, but works for now.
- */
-
-#include "Python.h"
-#include "modsupport.h"
-
-#define PROXY_MODULE
-#include "proxy.h"
-
-static PyTypeObject ProxyType;
-
-#define Proxy_Check(wrapper)   (PyObject_TypeCheck((wrapper), &ProxyType))
-
-static PyObject *
-empty_tuple = NULL;
-
-
-/*
- *   Slot methods.
- */
-
-static PyObject *
-wrap_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
-{
-    PyObject *result = NULL;
-    PyObject *object;
-
-    if (PyArg_UnpackTuple(args, "__new__", 1, 1, &object)) {
-        if (kwds != NULL && PyDict_Size(kwds) != 0) {
-            PyErr_SetString(PyExc_TypeError,
-                            "proxy.__new__ does not accept keyword args");
-            return NULL;
-        }
-        result = PyType_GenericNew(type, args, kwds);
-        if (result != NULL) {
-            ProxyObject *wrapper = (ProxyObject *) result;
-            Py_INCREF(object);
-            wrapper->proxy_object = object;
-        }
-    }
-    return result;
-}
-
-static int
-wrap_init(PyObject *self, PyObject *args, PyObject *kwds)
-{
-    int result = -1;
-    PyObject *object;
-
-    if (PyArg_UnpackTuple(args, "__init__", 1, 1, &object)) {
-        ProxyObject *wrapper = (ProxyObject *)self;
-        if (kwds != NULL && PyDict_Size(kwds) != 0) {
-            PyErr_SetString(PyExc_TypeError,
-                            "proxy.__init__ does not accept keyword args");
-            return -1;
-        }
-        /* If the object in this proxy is not the one we
-         * received in args, replace it with the new one.
-         */
-        if (wrapper->proxy_object != object) {
-            PyObject *temp = wrapper->proxy_object;
-            Py_INCREF(object);
-            wrapper->proxy_object = object;
-            Py_DECREF(temp);
-        }
-        result = 0;
-    }
-    return result;
-}
-
-static int
-wrap_traverse(PyObject *self, visitproc visit, void *arg)
-{
-    PyObject *ob = Proxy_GET_OBJECT(self);
-    if (ob != NULL)
-        return visit(ob, arg);
-    else
-        return 0;
-}
-
-static int
-wrap_clear(PyObject *self)
-{
-    ProxyObject *proxy = (ProxyObject *)self;
-    PyObject *temp = proxy->proxy_object;
-
-    if (temp != NULL) {
-        proxy->proxy_object = NULL;
-        Py_DECREF(temp);
-    }
-    return 0;
-}
-
-static PyObject *
-wrap_richcompare(PyObject* self, PyObject* other, int op)
-{
-    if (Proxy_Check(self)) {
-        self = Proxy_GET_OBJECT(self);
-    }
-    else {
-        other = Proxy_GET_OBJECT(other);
-    }
-    return PyObject_RichCompare(self, other, op);
-}
-
-static PyObject *
-wrap_iter(PyObject *self)
-{
-    return PyObject_GetIter(Proxy_GET_OBJECT(self));
-}
-
-static PyObject *
-wrap_iternext(PyObject *self)
-{
-    return PyIter_Next(Proxy_GET_OBJECT(self));
-}
-
-static void
-wrap_dealloc(PyObject *self)
-{
-    (void) wrap_clear(self);
-    self->ob_type->tp_free(self);
-}
-
-/* A variant of _PyType_Lookup that doesn't look in ProxyType.
- *
- * If argument search_wrappertype is nonzero, we can look in WrapperType.
- */
-PyObject *
-WrapperType_Lookup(PyTypeObject *type, PyObject *name)
-{
-    int i, n;
-    PyObject *mro, *res, *base, *dict;
-
-    /* Look in tp_dict of types in MRO */
-    mro = type->tp_mro;
-
-    /* If mro is NULL, the type is either not yet initialized
-       by PyType_Ready(), or already cleared by type_clear().
-       Either way the safest thing to do is to return NULL. */
-    if (mro == NULL)
-        return NULL;
-
-    assert(PyTuple_Check(mro));
-
-    n = PyTuple_GET_SIZE(mro) 
-      - 1; /* We don't want to look at the last item, which is object. */
-
-    for (i = 0; i < n; i++) {
-        base = PyTuple_GET_ITEM(mro, i);
-
-        if (((PyTypeObject *)base) != &ProxyType) {
-            if (PyClass_Check(base))
-                dict = ((PyClassObject *)base)->cl_dict;
-            else {
-                assert(PyType_Check(base));
-                dict = ((PyTypeObject *)base)->tp_dict;
-            }
-            assert(dict && PyDict_Check(dict));
-            res = PyDict_GetItem(dict, name);
-            if (res != NULL)
-                return res;
-        }
-    }
-    return NULL;
-}
-
-
-static PyObject *
-wrap_getattro(PyObject *self, PyObject *name)
-{
-    PyObject *wrapped;
-    PyObject *descriptor;
-    PyObject *res = NULL;
-    char *name_as_string;
-    int maybe_special_name;
-
-#ifdef Py_USING_UNICODE
-    /* The Unicode to string conversion is done here because the
-       existing tp_getattro slots expect a string object as name
-       and we wouldn't want to break those. */
-    if (PyUnicode_Check(name)) {
-        name = PyUnicode_AsEncodedString(name, NULL, NULL);
-        if (name == NULL)
-            return NULL;
-    }
-    else
-#endif
-    if (!PyString_Check(name)){
-        PyErr_SetString(PyExc_TypeError, "attribute name must be string");
-        return NULL;
-    }
-    else
-        Py_INCREF(name);
-
-    name_as_string = PyString_AS_STRING(name);
-    wrapped = Proxy_GET_OBJECT(self);
-    if (wrapped == NULL) {
-        PyErr_Format(PyExc_RuntimeError,
-            "object is NULL; requested to get attribute '%s'",
-            name_as_string);
-        goto finally;
-    }
-
-    maybe_special_name = name_as_string[0] == '_' && name_as_string[1] == '_';
-
-    if (!(maybe_special_name && strcmp(name_as_string, "__class__") == 0)) {
-
-        descriptor = WrapperType_Lookup(self->ob_type, name);
-
-        if (descriptor != NULL) {
-            if (PyType_HasFeature(descriptor->ob_type, Py_TPFLAGS_HAVE_CLASS)
-                && descriptor->ob_type->tp_descr_get != NULL) {
-                res = descriptor->ob_type->tp_descr_get(
-                        descriptor,
-                        self,
-                        (PyObject *)self->ob_type);
-            } else {
-                Py_INCREF(descriptor);
-                res = descriptor;
-            }
-            goto finally;
-        }
-    }
-    res = PyObject_GetAttr(wrapped, name);
-
-finally:
-    Py_DECREF(name);
-    return res;
-}
-
-static int
-wrap_setattro(PyObject *self, PyObject *name, PyObject *value)
-{
-    PyObject *wrapped;
-    PyObject *descriptor;
-    int res = -1;
-
-#ifdef Py_USING_UNICODE
-    /* The Unicode to string conversion is done here because the
-       existing tp_setattro slots expect a string object as name
-       and we wouldn't want to break those. */
-    if (PyUnicode_Check(name)) {
-        name = PyUnicode_AsEncodedString(name, NULL, NULL);
-        if (name == NULL)
-            return -1;
-    }
-    else
-#endif
-    if (!PyString_Check(name)){
-        PyErr_SetString(PyExc_TypeError, "attribute name must be string");
-        return -1;
-    }
-    else
-        Py_INCREF(name);
-
-    descriptor = WrapperType_Lookup(self->ob_type, name);
-    if (descriptor != NULL) {
-        if (PyType_HasFeature(descriptor->ob_type, Py_TPFLAGS_HAVE_CLASS) &&
-            descriptor->ob_type->tp_descr_set != NULL) {
-            res = descriptor->ob_type->tp_descr_set(descriptor, self, value);
-        } else {
-            PyErr_Format(PyExc_TypeError,
-                "Tried to set attribute '%s' on wrapper, but it is not"
-                " a data descriptor", PyString_AS_STRING(name));
-        }
-        goto finally;
-    }
-
-    wrapped = Proxy_GET_OBJECT(self);
-    if (wrapped == NULL) {
-        PyErr_Format(PyExc_RuntimeError,
-            "object is NULL; requested to set attribute '%s'",
-            PyString_AS_STRING(name));
-        goto finally;
-    }
-    res = PyObject_SetAttr(wrapped, name, value);
-
-finally:
-    Py_DECREF(name);
-    return res;
-}
-
-static int
-wrap_print(PyObject *wrapper, FILE *fp, int flags)
-{
-    return PyObject_Print(Proxy_GET_OBJECT(wrapper), fp, flags);
-}
-
-static PyObject *
-wrap_str(PyObject *wrapper) {
-    return PyObject_Str(Proxy_GET_OBJECT(wrapper));
-}
-
-static PyObject *
-wrap_repr(PyObject *wrapper)
-{
-    return PyObject_Repr(Proxy_GET_OBJECT(wrapper));
-}
-
-
-static int
-wrap_compare(PyObject *wrapper, PyObject *v)
-{
-    return PyObject_Compare(Proxy_GET_OBJECT(wrapper), v);
-}
-
-static long
-wrap_hash(PyObject *self)
-{
-    return PyObject_Hash(Proxy_GET_OBJECT(self));
-}
-
-static PyObject *
-wrap_call(PyObject *self, PyObject *args, PyObject *kw)
-{
-    if (kw)
-        return PyEval_CallObjectWithKeywords(Proxy_GET_OBJECT(self),
-					     args, kw);
-    else
-        return PyObject_CallObject(Proxy_GET_OBJECT(self), args);
-}
-
-/*
- *   Number methods
- */
-
-/*
- * Number methods.
- */
-
-static PyObject *
-call_int(PyObject *self)
-{
-    PyNumberMethods *nb = self->ob_type->tp_as_number;
-    if (nb == NULL || nb->nb_int == NULL) {
-        PyErr_SetString(PyExc_TypeError,
-                        "object can't be converted to int");
-        return NULL;
-    }
-    return nb->nb_int(self);
-}
-
-static PyObject *
-call_long(PyObject *self)
-{
-    PyNumberMethods *nb = self->ob_type->tp_as_number;
-    if (nb == NULL || nb->nb_long == NULL) {
-        PyErr_SetString(PyExc_TypeError,
-                        "object can't be converted to long");
-        return NULL;
-    }
-    return nb->nb_long(self);
-}
-
-static PyObject *
-call_float(PyObject *self)
-{
-    PyNumberMethods *nb = self->ob_type->tp_as_number;
-    if (nb == NULL || nb->nb_float== NULL) {
-        PyErr_SetString(PyExc_TypeError,
-                        "object can't be converted to float");
-        return NULL;
-    }
-    return nb->nb_float(self);
-}
-
-static PyObject *
-call_oct(PyObject *self)
-{
-    PyNumberMethods *nb = self->ob_type->tp_as_number;
-    if (nb == NULL || nb->nb_oct== NULL) {
-        PyErr_SetString(PyExc_TypeError,
-                        "object can't be converted to oct");
-        return NULL;
-    }
-    return nb->nb_oct(self);
-}
-
-static PyObject *
-call_hex(PyObject *self)
-{
-    PyNumberMethods *nb = self->ob_type->tp_as_number;
-    if (nb == NULL || nb->nb_hex == NULL) {
-        PyErr_SetString(PyExc_TypeError,
-                        "object can't be converted to hex");
-        return NULL;
-    }
-    return nb->nb_hex(self);
-}
-
-static PyObject *
-call_ipow(PyObject *self, PyObject *other)
-{
-    /* PyNumber_InPlacePower has three args.  How silly. :-) */
-    return PyNumber_InPlacePower(self, other, Py_None);
-}
-
-typedef PyObject *(*function1)(PyObject *);
-
-static PyObject *
-check1(ProxyObject *self, char *opname, function1 operation)
-{
-    PyObject *result = NULL;
-
-    result = operation(Proxy_GET_OBJECT(self));
-#if 0
-    if (result != NULL)
-        /* XXX create proxy for result? */
-        ;
-#endif
-    return result;
-}
-
-static PyObject *
-check2(PyObject *self, PyObject *other,
-       char *opname, char *ropname, binaryfunc operation)
-{
-    PyObject *result = NULL;
-    PyObject *object;
-
-    if (Proxy_Check(self)) {
-        object = Proxy_GET_OBJECT(self);
-        result = operation(object, other);
-    }
-    else if (Proxy_Check(other)) {
-        object = Proxy_GET_OBJECT(other);
-        result = operation(self, object);
-    }
-    else {
-        Py_INCREF(Py_NotImplemented);
-        return Py_NotImplemented;
-    }
-#if 0
-    if (result != NULL)
-        /* XXX create proxy for result? */
-        ;
-#endif
-    return result;
-}
-
-static PyObject *
-check2i(ProxyObject *self, PyObject *other,
-	char *opname, binaryfunc operation)
-{
-	PyObject *result = NULL;
-	PyObject *object = Proxy_GET_OBJECT(self);
-
-        result = operation(object, other);
-        if (result == object) {
-            /* If the operation was really carried out inplace,
-               don't create a new proxy, but use the old one. */
-            Py_INCREF(self);
-            Py_DECREF(object);
-            result = (PyObject *)self;
-        }
-#if 0
-        else if (result != NULL)
-            /* XXX create proxy for result? */
-            ;
-#endif
-	return result;
-}
-
-#define UNOP(NAME, CALL) \
-	static PyObject *wrap_##NAME(PyObject *self) \
-	{ return check1((ProxyObject *)self, "__"#NAME"__", CALL); }
-
-#define BINOP(NAME, CALL) \
-	static PyObject *wrap_##NAME(PyObject *self, PyObject *other) \
-	{ return check2(self, other, "__"#NAME"__", "__r"#NAME"__", CALL); }
-
-#define INPLACE(NAME, CALL) \
-	static PyObject *wrap_i##NAME(PyObject *self, PyObject *other) \
-	{ return check2i((ProxyObject *)self, other, "__i"#NAME"__", CALL); }
-
-BINOP(add, PyNumber_Add)
-BINOP(sub, PyNumber_Subtract)
-BINOP(mul, PyNumber_Multiply)
-BINOP(div, PyNumber_Divide)
-BINOP(mod, PyNumber_Remainder)
-BINOP(divmod, PyNumber_Divmod)
-
-static PyObject *
-wrap_pow(PyObject *self, PyObject *other, PyObject *modulus)
-{
-    PyObject *result = NULL;
-    PyObject *object;
-
-    if (Proxy_Check(self)) {
-        object = Proxy_GET_OBJECT(self);
-        result = PyNumber_Power(object, other, modulus);
-    }
-    else if (Proxy_Check(other)) {
-        object = Proxy_GET_OBJECT(other);
-        result = PyNumber_Power(self, object, modulus);
-    }
-    else if (modulus != NULL && Proxy_Check(modulus)) {
-        object = Proxy_GET_OBJECT(modulus);
-        result = PyNumber_Power(self, other, modulus);
-    }
-    else {
-        Py_INCREF(Py_NotImplemented);
-        return Py_NotImplemented;
-    }
-    return result;
-}
-
-BINOP(lshift, PyNumber_Lshift)
-BINOP(rshift, PyNumber_Rshift)
-BINOP(and, PyNumber_And)
-BINOP(xor, PyNumber_Xor)
-BINOP(or, PyNumber_Or)
-
-static int
-wrap_coerce(PyObject **p_self, PyObject **p_other)
-{
-    PyObject *self = *p_self;
-    PyObject *other = *p_other;
-    PyObject *object;
-    PyObject *left;
-    PyObject *right;
-    int r;
-
-    assert(Proxy_Check(self));
-    object = Proxy_GET_OBJECT(self);
-
-    left = object;
-    right = other;
-    r = PyNumber_CoerceEx(&left, &right);
-    if (r != 0)
-        return r;
-    /* Now left and right have been INCREF'ed.  Any new value that
-       comes out is proxied; any unchanged value is left unchanged. */
-    if (left == object) {
-        /* Keep the old proxy */
-        Py_INCREF(self);
-        Py_DECREF(left);
-        left = self;
-    }
-#if 0
-    else {
-        /* XXX create proxy for left? */
-    }
-    if (right != other) {
-        /* XXX create proxy for right? */
-    }
-#endif
-    *p_self = left;
-    *p_other = right;
-    return 0;
-}
-
-UNOP(neg, PyNumber_Negative)
-UNOP(pos, PyNumber_Positive)
-UNOP(abs, PyNumber_Absolute)
-UNOP(invert, PyNumber_Invert)
-
-UNOP(int, call_int)
-UNOP(long, call_long)
-UNOP(float, call_float)
-UNOP(oct, call_oct)
-UNOP(hex, call_hex)
-
-INPLACE(add, PyNumber_InPlaceAdd)
-INPLACE(sub, PyNumber_InPlaceSubtract)
-INPLACE(mul, PyNumber_InPlaceMultiply)
-INPLACE(div, PyNumber_InPlaceDivide)
-INPLACE(mod, PyNumber_InPlaceRemainder)
-INPLACE(pow, call_ipow)
-INPLACE(lshift, PyNumber_InPlaceLshift)
-INPLACE(rshift, PyNumber_InPlaceRshift)
-INPLACE(and, PyNumber_InPlaceAnd)
-INPLACE(xor, PyNumber_InPlaceXor)
-INPLACE(or, PyNumber_InPlaceOr)
-
-BINOP(floordiv, PyNumber_FloorDivide)
-BINOP(truediv, PyNumber_TrueDivide)
-INPLACE(floordiv, PyNumber_InPlaceFloorDivide)
-INPLACE(truediv, PyNumber_InPlaceTrueDivide)
-
-static int
-wrap_nonzero(PyObject *self)
-{
-    return PyObject_IsTrue(Proxy_GET_OBJECT(self));
-}
-
-/*
- *   Sequence methods
- */
-
-static int
-wrap_length(PyObject *self)
-{
-    return PyObject_Length(Proxy_GET_OBJECT(self));
-}
-
-static PyObject *
-wrap_slice(PyObject *self, int start, int end)
-{
-    return PySequence_GetSlice(Proxy_GET_OBJECT(self), start, end);
-}
-
-static int
-wrap_ass_slice(PyObject *self, int i, int j, PyObject *value)
-{
-    return PySequence_SetSlice(Proxy_GET_OBJECT(self), i, j, value);
-}
-
-static int
-wrap_contains(PyObject *self, PyObject *value)
-{
-    return PySequence_Contains(Proxy_GET_OBJECT(self), value);
-}
-
-/*
- *   Mapping methods
- */
-
-static PyObject *
-wrap_getitem(PyObject *wrapper, PyObject *v) {
-    return PyObject_GetItem(Proxy_GET_OBJECT(wrapper), v);
-}
-
-static int
-wrap_setitem(PyObject *self, PyObject *key, PyObject *value)
-{
-    if (value == NULL)
-	return PyObject_DelItem(Proxy_GET_OBJECT(self), key);
-    else
-	return PyObject_SetItem(Proxy_GET_OBJECT(self), key, value);
-}
-
-/*
- *   Normal methods
- */
-
-static char
-reduce__doc__[] =
-"__reduce__()\n"
-"Raise an exception; this prevents proxies from being picklable by\n"
-"default, even if the underlying object is picklable.";
-
-static PyObject *
-wrap_reduce(PyObject *self)
-{
-    PyObject *pickle_error = NULL;
-    PyObject *pickle = PyImport_ImportModule("pickle");
-
-    if (pickle == NULL)
-        PyErr_Clear();
-    else {
-        pickle_error = PyObject_GetAttrString(pickle, "PicklingError");
-        if (pickle_error == NULL)
-            PyErr_Clear();
-    }
-    if (pickle_error == NULL) {
-        pickle_error = PyExc_RuntimeError;
-        Py_INCREF(pickle_error);
-    }
-    PyErr_SetString(pickle_error,
-                    "proxy instances cannot be pickled");
-    Py_DECREF(pickle_error);
-    return NULL;
-}
-
-static PyNumberMethods
-wrap_as_number = {
-    wrap_add,				/* nb_add */
-    wrap_sub,				/* nb_subtract */
-    wrap_mul,				/* nb_multiply */
-    wrap_div,				/* nb_divide */
-    wrap_mod,				/* nb_remainder */
-    wrap_divmod,			/* nb_divmod */
-    wrap_pow,				/* nb_power */
-    wrap_neg,				/* nb_negative */
-    wrap_pos,				/* nb_positive */
-    wrap_abs,				/* nb_absolute */
-    wrap_nonzero,			/* nb_nonzero */
-    wrap_invert,			/* nb_invert */
-    wrap_lshift,			/* nb_lshift */
-    wrap_rshift,			/* nb_rshift */
-    wrap_and,				/* nb_and */
-    wrap_xor,				/* nb_xor */
-    wrap_or,				/* nb_or */
-    wrap_coerce,			/* nb_coerce */
-    wrap_int,				/* nb_int */
-    wrap_long,				/* nb_long */
-    wrap_float,				/* nb_float */
-    wrap_oct,				/* nb_oct */
-    wrap_hex,				/* nb_hex */
-
-    /* Added in release 2.0 */
-    /* These require the Py_TPFLAGS_HAVE_INPLACEOPS flag */
-    wrap_iadd,				/* nb_inplace_add */
-    wrap_isub,				/* nb_inplace_subtract */
-    wrap_imul,				/* nb_inplace_multiply */
-    wrap_idiv,				/* nb_inplace_divide */
-    wrap_imod,				/* nb_inplace_remainder */
-    (ternaryfunc)wrap_ipow,		/* nb_inplace_power */
-    wrap_ilshift,			/* nb_inplace_lshift */
-    wrap_irshift,			/* nb_inplace_rshift */
-    wrap_iand,				/* nb_inplace_and */
-    wrap_ixor,				/* nb_inplace_xor */
-    wrap_ior,				/* nb_inplace_or */
-
-    /* Added in release 2.2 */
-    /* These require the Py_TPFLAGS_HAVE_CLASS flag */
-    wrap_floordiv,			/* nb_floor_divide */
-    wrap_truediv,			/* nb_true_divide */
-    wrap_ifloordiv,			/* nb_inplace_floor_divide */
-    wrap_itruediv,			/* nb_inplace_true_divide */
-};
-
-static PySequenceMethods
-wrap_as_sequence = {
-    wrap_length,			/* sq_length */
-    0,					/* sq_concat */
-    0,					/* sq_repeat */
-    0,					/* sq_item */
-    wrap_slice,				/* sq_slice */
-    0,					/* sq_ass_item */
-    wrap_ass_slice,			/* sq_ass_slice */
-    wrap_contains,			/* sq_contains */
-};
-
-static PyMappingMethods
-wrap_as_mapping = {
-    wrap_length,			/* mp_length */
-    wrap_getitem,			/* mp_subscript */
-    wrap_setitem,			/* mp_ass_subscript */
-};
-
-static PyMethodDef
-wrap_methods[] = {
-    {"__reduce__", (PyCFunction)wrap_reduce, METH_NOARGS, reduce__doc__},
-    {NULL, NULL},
-};
-
-/*
- * Note that the numeric methods are not supported.  This is primarily
- * because of the way coercion-less operations are performed with
- * new-style numbers; since we can't tell which side of the operation
- * is 'self', we can't ensure we'd unwrap the right thing to perform
- * the actual operation.  We also can't afford to just unwrap both
- * sides the way weakrefs do, since we don't know what semantics will
- * be associated with the wrapper itself.
- */
-
-statichere PyTypeObject
-ProxyType = {
-    PyObject_HEAD_INIT(NULL)  /* PyObject_HEAD_INIT(&PyType_Type) */  
-    0,
-    "zope.proxy.ProxyBase",
-    sizeof(ProxyObject),
-    0,
-    wrap_dealloc,			/* tp_dealloc */
-    wrap_print,				/* tp_print */
-    0,					/* tp_getattr */
-    0,					/* tp_setattr */
-    wrap_compare,			/* tp_compare */
-    wrap_repr,				/* tp_repr */
-    &wrap_as_number,			/* tp_as_number */
-    &wrap_as_sequence,			/* tp_as_sequence */
-    &wrap_as_mapping,			/* tp_as_mapping */
-    wrap_hash,				/* tp_hash */
-    wrap_call,				/* tp_call */
-    wrap_str,				/* tp_str */
-    wrap_getattro,			/* tp_getattro */
-    wrap_setattro,			/* tp_setattro */
-    0,					/* tp_as_buffer */
-    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC
-        | Py_TPFLAGS_CHECKTYPES | Py_TPFLAGS_BASETYPE, /* tp_flags */
-    0,					/* tp_doc */
-    wrap_traverse,			/* tp_traverse */
-    wrap_clear,				/* tp_clear */
-    wrap_richcompare,			/* tp_richcompare */
-    0,					/* tp_weaklistoffset */
-    wrap_iter,				/* tp_iter */
-    wrap_iternext,			/* tp_iternext */
-    wrap_methods,			/* tp_methods */
-    0,					/* tp_members */
-    0,					/* tp_getset */
-    0,					/* tp_base */
-    0,					/* tp_dict */
-    0,					/* tp_descr_get */
-    0,					/* tp_descr_set */
-    0,					/* tp_dictoffset */
-    wrap_init,				/* tp_init */
-    0,                   		/* tp_alloc */
-    wrap_new,				/* tp_new */
-    0, /*_PyObject_GC_Del,*/		/* tp_free */
-};
-
-static PyObject *
-create_proxy(PyObject *object)
-{
-    PyObject *result = NULL;
-    PyObject *args;
-
-    args = PyTuple_New(1);
-    if (args != NULL) {
-        Py_INCREF(object);
-        PyTuple_SET_ITEM(args, 0, object);
-        result = PyObject_CallObject((PyObject *)&ProxyType, args);
-        Py_DECREF(args);
-    }
-    return result;
-}
-
-static int
-api_check(PyObject *obj)
-{
-    return obj ? Proxy_Check(obj) : 0;
-}
-
-static PyObject *
-api_create(PyObject *object)
-{
-    if (object == NULL) {
-        PyErr_SetString(PyExc_ValueError,
-                        "cannot create proxy around NULL");
-        return NULL;
-    }
-    return create_proxy(object);
-}
-
-static PyObject *
-api_getobject(PyObject *proxy)
-{
-    if (proxy == NULL) {
-        PyErr_SetString(PyExc_RuntimeError,
-			"cannot pass NULL to ProxyAPI.getobject()");
-        return NULL;
-    }
-    if (Proxy_Check(proxy))
-        return Proxy_GET_OBJECT(proxy);
-    else {
-        PyErr_Format(PyExc_TypeError, "expected proxy object, got %s",
-		     proxy->ob_type->tp_name);
-        return NULL;
-    }
-}
-
-static ProxyInterface
-wrapper_capi = {
-    &ProxyType,
-    api_check,
-    api_create,
-    api_getobject,
-};
-
-static PyObject *api_object = NULL;
-
-
-static char
-getobject__doc__[] =
-"getProxiedObject(proxy) --> object\n"
-"\n"
-"Get the underlying object for proxy, or the object itself, if it is\n"
-"not a proxy.";
-
-static PyObject *
-wrapper_getobject(PyObject *unused, PyObject *obj)
-{
-  if (Proxy_Check(obj)) 
-    obj = Proxy_GET_OBJECT(obj);
-  
-  if (obj == NULL)
-    obj = Py_None;
-
-  Py_INCREF(obj);
-  return obj;
-}
-
-static char
-isProxy__doc__[] =
-"Check whether the given object is a proxy\n"
-"\n"
-"If proxytype is not None, checkes whether the object is\n"
-"proxied by the given proxytype.\n"
-;
-
-static PyObject *
-wrapper_isProxy(PyObject *unused, PyObject *args)
-{
-  PyObject *obj, *result;
-  PyTypeObject *proxytype=&ProxyType;
-
-  if (! PyArg_ParseTuple(args, "O|O!:isProxy", 
-                         &obj, &PyType_Type, &proxytype)
-      )
-    return NULL;
-
-  while (obj && Proxy_Check(obj))
-  {
-    if (PyObject_TypeCheck(obj, proxytype))
-      {
-        result = Py_True;
-	Py_INCREF(result);
-        return result;
-      }
-    obj = Proxy_GET_OBJECT(obj);
-  }
-  result = Py_False;
-  Py_INCREF(result);
-  return result;
-}
-
-static char
-removeAllProxies__doc__[] =
-"removeAllProxies(proxy) --> object\n"
-"\n"
-"Get the proxied object with no proxies\n"
-"\n"
-"If obj is not a proxied object, return obj.\n"
-"\n"
-"The returned object has no proxies.\n"
-;
-
-static PyObject *
-wrapper_removeAllProxies(PyObject *unused, PyObject *obj)
-{
-  while (obj && Proxy_Check(obj)) 
-    obj = Proxy_GET_OBJECT(obj);
-  
-  if (obj == NULL)
-    obj = Py_None;
-
-  Py_INCREF(obj);
-  return obj;
-}
-
-static char
-sameProxiedObjects__doc__[] = 
-"Check whether two objects are the same or proxies of the same object";
-
-static PyObject *
-wrapper_sameProxiedObjects(PyObject *unused, PyObject *args)
-{
-  PyObject *ob1, *ob2;
-
-  if (! PyArg_ParseTuple(args, "OO:sameProxiedObjects", &ob1, &ob2))
-    return NULL;
-
-  while (ob1 && Proxy_Check(ob1)) 
-    ob1 = Proxy_GET_OBJECT(ob1);
-
-  while (ob2 && Proxy_Check(ob2)) 
-    ob2 = Proxy_GET_OBJECT(ob2);
-
-  if (ob1 == ob2)
-    ob1 = Py_True;
-  else
-    ob1 = Py_False;
-
-  Py_INCREF(ob1);
-  return ob1;
-}
-
-
-static char
-queryProxy__doc__[] =
-"Look for a proxy of the given type around the object\n"
-"\n"
-"If no such proxy can be found, return the default.\n"
-;
-
-static PyObject *
-wrapper_queryProxy(PyObject *unused, PyObject *args)
-{
-  PyObject *obj, *result=Py_None;
-  PyTypeObject *proxytype=&ProxyType;
-
-  if (! PyArg_ParseTuple(args, "O|O!O:queryProxy", 
-                         &obj, &PyType_Type, &proxytype, &result)
-      )
-    return NULL;
-
-  while (obj && Proxy_Check(obj))
-  {
-    if (PyObject_TypeCheck(obj, proxytype))
-      {
-        Py_INCREF(obj);
-        return obj;
-      }
-    obj = Proxy_GET_OBJECT(obj);
-  }
-
-  Py_INCREF(result);
-  return result;
-}
-
-static char
-queryInnerProxy__doc__[] =
-"Look for the inner-most proxy of the given type around the object\n"
-"\n"
-"If no such proxy can be found, return the default.\n"
-"\n"
-"If there is such a proxy, return the inner-most one.\n"
-;
-
-static PyObject *
-wrapper_queryInnerProxy(PyObject *unused, PyObject *args)
-{
-  PyObject *obj, *result=Py_None;
-  PyTypeObject *proxytype=&ProxyType;
-
-  if (! PyArg_ParseTuple(args, "O|O!O:queryInnerProxy", 
-                         &obj, &PyType_Type, &proxytype, &result)
-      )
-    return NULL;
-
-  while (obj && Proxy_Check(obj))
-  {
-    if (PyObject_TypeCheck(obj, proxytype))
-      result = obj;
-    obj = Proxy_GET_OBJECT(obj);
-  }
-
-  Py_INCREF(result);
-  return result;
-}
-
-static char
-module___doc__[] =
-"Association between an object, a context object, and a dictionary.\n\
-\n\
-The context object and dictionary give additional context information\n\
-associated with a reference to the basic object.  The wrapper objects\n\
-act as proxies for the original object.";
-
-
-static PyMethodDef
-module_functions[] = {
-    {"getProxiedObject", wrapper_getobject, METH_O, getobject__doc__},
-    {"isProxy", wrapper_isProxy, METH_VARARGS, isProxy__doc__},
-    {"sameProxiedObjects", wrapper_sameProxiedObjects, METH_VARARGS, 
-     sameProxiedObjects__doc__},
-    {"queryProxy", wrapper_queryProxy, METH_VARARGS, queryProxy__doc__},
-    {"queryInnerProxy", wrapper_queryInnerProxy, METH_VARARGS, 
-     queryInnerProxy__doc__},
-    {"removeAllProxies", wrapper_removeAllProxies, METH_O, 
-     removeAllProxies__doc__},
-    {NULL}
-};
-
-void
-init_zope_proxy_proxy(void)
-{
-    PyObject *m = Py_InitModule3("_zope_proxy_proxy", 
-                                 module_functions, module___doc__);
-
-    if (m == NULL)
-        return;
-
-    if (empty_tuple == NULL)
-        empty_tuple = PyTuple_New(0);
-
-    ProxyType.tp_free = _PyObject_GC_Del;
-
-    if (PyType_Ready(&ProxyType) < 0)
-        return;
-
-    Py_INCREF(&ProxyType);
-    PyModule_AddObject(m, "ProxyBase", (PyObject *)&ProxyType);
-
-    if (api_object == NULL) {
-        api_object = PyCObject_FromVoidPtr(&wrapper_capi, NULL);
-        if (api_object == NULL)
-            return;
-    }
-    Py_INCREF(api_object);
-    PyModule_AddObject(m, "_CAPI", api_object);
-}
diff --git a/branches/bug1734/src/zope/proxy/interfaces.py b/branches/bug1734/src/zope/proxy/interfaces.py
deleted file mode 100644
index 4d62a54f..00000000
--- a/branches/bug1734/src/zope/proxy/interfaces.py
+++ /dev/null
@@ -1,62 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE
-#
-##############################################################################
-"""Proxy-related interfaces.
-
-$Id$
-"""
-
-from zope.interface import Interface
-
-class IProxyIntrospection(Interface):
-    """Provides methods for indentifying proxies and extracting proxied objects
-    """
-
-    def isProxy(obj, proxytype=None):
-        """Check whether the given object is a proxy
-
-        If proxytype is not None, checkes whether the object is
-        proxied by the given proxytype.
-        """
-
-    def sameProxiedObjects(ob1, ob2):
-        """Check whether ob1 and ob2 are the same or proxies of the same object
-        """
-
-    def getProxiedObject(obj):
-        """Get the proxied Object
-
-        If the object isn't proxied, then just return the object.
-        """
-
-    def removeAllProxies(obj):
-        """Get the proxied object with no proxies
-
-        If obj is not a proxied object, return obj.
-
-        The returned object has no proxies.
-        """
-
-    def queryProxy(obj, proxytype, default=None):
-        """Look for a proxy of the given type around the object
-
-        If no such proxy can be found, return the default.
-        """
-
-    def queryInnerProxy(obj, proxytype, default=None):
-        """Look for the inner-most proxy of the given type around the object
-
-        If no such proxy can be found, return the default.
-
-        If there is such a proxy, return the inner-most one.
-        """
diff --git a/branches/bug1734/src/zope/proxy/proxy.h b/branches/bug1734/src/zope/proxy/proxy.h
deleted file mode 100644
index 06c3d74a..00000000
--- a/branches/bug1734/src/zope/proxy/proxy.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _proxy_H_
-#define _proxy_H_ 1
-
-typedef struct {
-    PyObject_HEAD
-    PyObject *proxy_object;
-} ProxyObject;
-
-#define Proxy_GET_OBJECT(ob)   (((ProxyObject *)(ob))->proxy_object)
-
-typedef struct {
-    PyTypeObject *proxytype;
-    int (*check)(PyObject *obj);
-    PyObject *(*create)(PyObject *obj);
-    PyObject *(*getobject)(PyObject *proxy);
-} ProxyInterface;
-
-
-#ifndef PROXY_MODULE
-
-/* These are only defined in the public interface, and are not
- * available within the module implementation.  There we use the
- * classic Python/C API only.
- */
-
-static ProxyInterface *_proxy_api = NULL;
-
-static int
-Proxy_Import(void)
-{
-    if (_proxy_api == NULL) {
-        PyObject *m = PyImport_ImportModule("zope.proxy");
-        if (m != NULL) {
-            PyObject *tmp = PyObject_GetAttrString(m, "_CAPI");
-            if (tmp != NULL) {
-                if (PyCObject_Check(tmp))
-                    _proxy_api = (ProxyInterface *)
-                        PyCObject_AsVoidPtr(tmp);
-                Py_DECREF(tmp);
-            }
-        }
-    }
-    return (_proxy_api == NULL) ? -1 : 0;
-}
-
-#define ProxyType               (*_proxy_api->proxytype)
-#define Proxy_Check(obj)        (_proxy_api->check((obj)))
-#define Proxy_CheckExact(obj)   ((obj)->ob_type == ProxyType)
-#define Proxy_New(obj)          (_proxy_api->create((obj)))
-#define Proxy_GetObject(proxy)  (_proxy_api->getobject((proxy)))
-
-#endif /* PROXY_MODULE */
-
-#endif /* _proxy_H_ */
diff --git a/branches/bug1734/src/zope/proxy/tests/__init__.py b/branches/bug1734/src/zope/proxy/tests/__init__.py
deleted file mode 100644
index b711d360..00000000
--- a/branches/bug1734/src/zope/proxy/tests/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-#
-# This file is necessary to make this directory a package.
diff --git a/branches/bug1734/src/zope/proxy/tests/test_proxy.py b/branches/bug1734/src/zope/proxy/tests/test_proxy.py
deleted file mode 100644
index 6b7994e4..00000000
--- a/branches/bug1734/src/zope/proxy/tests/test_proxy.py
+++ /dev/null
@@ -1,565 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Test base proxy class.
-
-$Id$
-"""
-import pickle
-import sys
-import unittest
-
-from zope.testing.doctestunit import DocTestSuite
-from zope.proxy import ProxyBase
-
-class Thing:
-    """This class is expected to be a classic class."""
-
-class Comparable(object):
-    def __init__(self, value):
-        self.value = value
-
-    def __eq__(self, other):
-        if hasattr(other, "value"):
-            other = other.value
-        return self.value == other
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def __lt__(self, other):
-        if hasattr(other, "value"):
-            other = other.value
-        return self.value < other
-
-    def __ge__(self, other):
-        return not self.__lt__(other)
-
-    def __le__(self, other):
-        if hasattr(other, "value"):
-            other = other.value
-        return self.value <= other
-
-    def __gt__(self, other):
-        return not self.__le__(other)
-
-    def __repr__(self):
-        return "<Comparable: %r>" % self.value
-
-
-class ProxyTestCase(unittest.TestCase):
-
-    proxy_class = ProxyBase
-
-    def setUp(self):
-        self.x = Thing()
-        self.p = self.new_proxy(self.x)
-
-    def new_proxy(self, o):
-        return self.proxy_class(o)
-
-    def test_constructor(self):
-        o = object()
-        self.assertRaises(TypeError, self.proxy_class, o, o)
-        self.assertRaises(TypeError, self.proxy_class, o, key='value')
-        self.assertRaises(TypeError, self.proxy_class, key='value')
-
-    def test_subclass_constructor(self):
-        class MyProxy(self.proxy_class):
-            def __new__(cls, *args, **kwds):
-                return super(MyProxy, cls).__new__(cls, *args, **kwds)
-            def __init__(self, *args, **kwds):
-                super(MyProxy, self).__init__(*args, **kwds)
-        o1 = object()
-        o2 = object()
-        o = MyProxy((o1, o2))
-
-        self.assertEquals(o1, o[0])
-        self.assertEquals(o2, o[1])
-
-        self.assertRaises(TypeError, MyProxy, o1, o2)
-        self.assertRaises(TypeError, MyProxy, o1, key='value')
-        self.assertRaises(TypeError, MyProxy, key='value')
-
-        # Check that are passed to __init__() overrides what's passed
-        # to __new__().
-        class MyProxy2(self.proxy_class):
-            def __new__(cls, *args, **kwds):
-                return super(MyProxy2, cls).__new__(cls, 'value')
-
-        p = MyProxy2('splat!')
-        self.assertEquals(list(p), list('splat!'))
-
-        class MyProxy3(MyProxy2):
-            def __init__(self, arg):
-                if list(self) != list('value'):
-                    raise AssertionError("list(self) != list('value')")
-                super(MyProxy3, self).__init__('another')
-
-        p = MyProxy3('notused')
-        self.assertEquals(list(p), list('another'))
-
-    def test_proxy_attributes(self):
-        o = Thing()
-        o.foo = 1
-        w = self.new_proxy(o)
-        self.assert_(w.foo == 1)
-
-    def test___class__(self):
-        o = object()
-        w = self.new_proxy(o)
-        self.assert_(w.__class__ is o.__class__)
-
-    def test_pickle_prevention(self):
-        w = self.new_proxy(Thing())
-        self.assertRaises(pickle.PicklingError,
-                          pickle.dumps, w)
-
-    def test_proxy_equality(self):
-        w = self.new_proxy('foo')
-        self.assertEquals(w, 'foo')
-
-        o1 = Comparable(1)
-        o2 = Comparable(1.0)
-        o3 = Comparable("splat!")
-
-        w1 = self.new_proxy(o1)
-        w2 = self.new_proxy(o2)
-        w3 = self.new_proxy(o3)
-
-        self.assertEquals(o1, w1)
-        self.assertEquals(o1, w2)
-        self.assertEquals(o2, w1)
-        self.assertEquals(w1, o2)
-        self.assertEquals(w2, o1)
-
-        self.assertNotEquals(o3, w1)
-        self.assertNotEquals(w1, o3)
-        self.assertNotEquals(w3, o1)
-        self.assertNotEquals(o1, w3)
-
-    def test_proxy_ordering_lt(self):
-        o1 = Comparable(1)
-        o2 = Comparable(2.0)
-
-        w1 = self.new_proxy(o1)
-        w2 = self.new_proxy(o2)
-
-        self.assert_(w1 < w2)
-        self.assert_(w1 <= w2)
-        self.assert_(o1 < w2)
-        self.assert_(o1 <= w2)
-        self.assert_(w1 < o2)
-        self.assert_(w2 <= o2)
-
-    def test_proxy_callable(self):
-        w = self.new_proxy({}.get)
-        self.assert_(callable(w))
-
-    def test_proxy_item_protocol(self):
-        w = self.new_proxy({})
-        self.assertRaises(KeyError, lambda: w[1])
-        w[1] = 'a'
-        self.assertEquals(w[1], 'a')
-        del w[1]
-        self.assertRaises(KeyError, lambda: w[1])
-        def del_w_1():
-            del w[1]
-        self.assertRaises(KeyError, del_w_1)
-
-    def test_wrapped_iterable(self):
-        a = [1, 2, 3]
-        b = []
-        for x in self.new_proxy(a):
-            b.append(x)
-        self.assertEquals(a, b)
-
-    def test_iteration_over_proxy(self):
-        # Wrap an iterator before starting iteration.
-        # PyObject_GetIter() will still be called on the proxy.
-        a = [1, 2, 3]
-        b = []
-        for x in self.new_proxy(iter(a)):
-            b.append(x)
-        self.assertEquals(a, b)
-        t = tuple(self.new_proxy(iter(a)))
-        self.assertEquals(t, (1, 2, 3))
-
-    def test_iteration_using_proxy(self):
-        # Wrap an iterator within the iteration protocol, expecting it
-        # still to work.  PyObject_GetIter() will not be called on the
-        # proxy, so the tp_iter slot won't unwrap it.
-
-        class Iterable(object):
-            def __init__(self, test, data):
-                self.test = test
-                self.data = data
-            def __iter__(self):
-                return self.test.new_proxy(iter(self.data))
-
-        a = [1, 2, 3]
-        b = []
-        for x in Iterable(self, a):
-            b.append(x)
-        self.assertEquals(a, b)
-
-    def test_bool_wrapped_None(self):
-        w = self.new_proxy(None)
-        self.assertEquals(not w, 1)
-
-    # Numeric ops.
-
-    unops = [
-        "-x", "+x", "abs(x)", "~x",
-        "int(x)", "long(x)", "float(x)",
-        ]
-
-    def test_unops(self):
-        P = self.new_proxy
-        for expr in self.unops:
-            x = 1
-            y = eval(expr)
-            x = P(1)
-            z = eval(expr)
-            self.assertEqual(z, y,
-                             "x=%r; expr=%r" % (x, expr))
-
-    def test_odd_unops(self):
-        # unops that don't return a proxy
-        P = self.new_proxy
-        for func in hex, oct, lambda x: not x:
-            self.assertEqual(func(P(100)), func(100))
-
-    binops = [
-        "x+y", "x-y", "x*y", "x/y", "divmod(x, y)", "x**y", "x//y",
-        "x<<y", "x>>y", "x&y", "x|y", "x^y",
-        ]
-
-    def test_binops(self):
-        P = self.new_proxy
-        for expr in self.binops:
-            first = 1
-            for x in [1, P(1)]:
-                for y in [2, P(2)]:
-                    if first:
-                        z = eval(expr)
-                        first = 0
-                    else:
-                        self.assertEqual(eval(expr), z,
-                                         "x=%r; y=%r; expr=%r" % (x, y, expr))
-
-    def test_inplace(self):
-        # TODO: should test all inplace operators...
-        P = self.new_proxy
-
-        pa = P(1)
-        pa += 2
-        self.assertEqual(pa, 3)
-
-        a = [1, 2, 3]
-        pa = qa = P(a)
-        pa += [4, 5, 6]
-        self.failUnless(pa is qa)
-        self.assertEqual(a, [1, 2, 3, 4, 5, 6])
-
-        pa = P(2)
-        pa **= 2
-        self.assertEqual(pa, 4)
-
-    def test_coerce(self):
-        P = self.new_proxy
-
-        # Before 2.3, coerce() of two proxies returns them unchanged
-        fixed_coerce = sys.version_info >= (2, 3, 0)
-
-        x = P(1)
-        y = P(2)
-        a, b = coerce(x, y)
-        self.failUnless(a is x and b is y)
-
-        x = P(1)
-        y = P(2.1)
-        a, b = coerce(x, y)
-        self.failUnless(a == 1.0)
-        self.failUnless(b is y)
-        if fixed_coerce:
-            self.failUnless(a.__class__ is float, a.__class__)
-
-        x = P(1.1)
-        y = P(2)
-        a, b = coerce(x, y)
-        self.failUnless(a is x)
-        self.failUnless(b == 2.0)
-        if fixed_coerce:
-            self.failUnless(b.__class__ is float, b.__class__)
-
-        x = P(1)
-        y = 2
-        a, b = coerce(x, y)
-        self.failUnless(a is x)
-        self.failUnless(b is y)
-
-        x = P(1)
-        y = 2.1
-        a, b = coerce(x, y)
-        self.failUnless(a.__class__ is float, a.__class__)
-        self.failUnless(b is y)
-
-        x = P(1.1)
-        y = 2
-        a, b = coerce(x, y)
-        self.failUnless(a is x)
-        self.failUnless(b.__class__ is float, b.__class__)
-
-        x = 1
-        y = P(2)
-        a, b = coerce(x, y)
-        self.failUnless(a is x)
-        self.failUnless(b is y)
-
-        x = 1.1
-        y = P(2)
-        a, b = coerce(x, y)
-        self.failUnless(a is x)
-        self.failUnless(b.__class__ is float, b.__class__)
-
-        x = 1
-        y = P(2.1)
-        a, b = coerce(x, y)
-        self.failUnless(a.__class__ is float, a.__class__)
-        self.failUnless(b is y)
-
-
-def test_isProxy():
-    """
-    >>> from zope.proxy import ProxyBase, isProxy
-    >>> class P1(ProxyBase):
-    ...     pass
-    >>> class P2(ProxyBase):
-    ...     pass
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> int(isProxy(c))
-    0
-    >>> p = P1(c)
-    >>> int(isProxy(p))
-    1
-    >>> int(isProxy(p, P1))
-    1
-    >>> int(isProxy(p, P2))
-    0
-    >>> p = P2(p)
-    >>> int(isProxy(p, P1))
-    1
-    >>> int(isProxy(p, P2))
-    1
-
-    """
-
-def test_getProxiedObject():
-    """
-    >>> from zope.proxy import ProxyBase, getProxiedObject
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> int(getProxiedObject(c) is c)
-    1
-    >>> p = ProxyBase(c)
-    >>> int(getProxiedObject(p) is c)
-    1
-    >>> p2 = ProxyBase(p)
-    >>> int(getProxiedObject(p2) is p)
-    1
-
-    """
-
-def test_ProxyIterator():
-    """
-    >>> from zope.proxy import ProxyBase, ProxyIterator
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> p1 = ProxyBase(c)
-    >>> class P(ProxyBase):
-    ...     pass
-    >>> p2 = P(p1)
-    >>> p3 = ProxyBase(p2)
-    >>> list(ProxyIterator(p3)) == [p3, p2, p1, c]
-    1
-    """
-
-def test_removeAllProxies():
-    """
-    >>> from zope.proxy import ProxyBase, removeAllProxies
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> int(removeAllProxies(c) is c)
-    1
-    >>> p = ProxyBase(c)
-    >>> int(removeAllProxies(p) is c)
-    1
-    >>> p2 = ProxyBase(p)
-    >>> int(removeAllProxies(p2) is c)
-    1
-
-    """
-
-def test_queryProxy():
-    """
-    >>> from zope.proxy import ProxyBase, queryProxy
-    >>> class P1(ProxyBase):
-    ...    pass
-    >>> class P2(ProxyBase):
-    ...    pass
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> queryProxy(c, P1)
-    >>> queryProxy(c, P1, 42)
-    42
-    >>> p1 = P1(c)
-    >>> int(queryProxy(p1, P1) is p1)
-    1
-    >>> queryProxy(c, P2)
-    >>> queryProxy(c, P2, 42)
-    42
-    >>> p2 = P2(p1)
-    >>> int(queryProxy(p2, P1) is p1)
-    1
-    >>> int(queryProxy(p2, P2) is p2)
-    1
-    >>> int(queryProxy(p2, ProxyBase) is p2)
-    1
-    
-    """
-
-def test_queryInnerProxy():
-    """
-    >>> from zope.proxy import ProxyBase, queryProxy, queryInnerProxy
-    >>> class P1(ProxyBase):
-    ...    pass
-    >>> class P2(ProxyBase):
-    ...    pass
-    >>> class C(object):
-    ...     pass
-    >>> c = C()
-    >>> queryInnerProxy(c, P1)
-    >>> queryInnerProxy(c, P1, 42)
-    42
-    >>> p1 = P1(c)
-    >>> int(queryProxy(p1, P1) is p1)
-    1
-    >>> queryInnerProxy(c, P2)
-    >>> queryInnerProxy(c, P2, 42)
-    42
-    >>> p2 = P2(p1)
-    >>> int(queryInnerProxy(p2, P1) is p1)
-    1
-    >>> int(queryInnerProxy(p2, P2) is p2)
-    1
-    >>> int(queryInnerProxy(p2, ProxyBase) is p1)
-    1
-
-    >>> p3 = P1(p2)
-    >>> int(queryProxy(p3, P1) is p3)
-    1
-    >>> int(queryInnerProxy(p3, P1) is p1)
-    1
-    >>> int(queryInnerProxy(p3, P2) is p2)
-    1
-    
-    """
-
-def test_sameProxiedObjects():
-    """
-    >>> from zope.proxy import ProxyBase, sameProxiedObjects
-    >>> class C(object):
-    ...     pass
-    >>> c1 = C()
-    >>> c2 = C()
-    >>> int(sameProxiedObjects(c1, c1))
-    1
-    >>> int(sameProxiedObjects(ProxyBase(c1), c1))
-    1
-    >>> int(sameProxiedObjects(ProxyBase(c1), ProxyBase(c1)))
-    1
-    >>> int(sameProxiedObjects(ProxyBase(ProxyBase(c1)), c1))
-    1
-    >>> int(sameProxiedObjects(c1, ProxyBase(c1)))
-    1
-    >>> int(sameProxiedObjects(c1, ProxyBase(ProxyBase(c1))))
-    1
-    >>> int(sameProxiedObjects(c1, c2))
-    0
-    >>> int(sameProxiedObjects(ProxyBase(c1), c2))
-    0
-    >>> int(sameProxiedObjects(ProxyBase(c1), ProxyBase(c2)))
-    0
-    >>> int(sameProxiedObjects(ProxyBase(ProxyBase(c1)), c2))
-    0
-    >>> int(sameProxiedObjects(c1, ProxyBase(c2)))
-    0
-    >>> int(sameProxiedObjects(c1, ProxyBase(ProxyBase(c2))))
-    0
-    """
-
-def test_subclassing_proxies():
-    """You can subclass ProxyBase
-
-    If you subclass a proxy, instances of the subclass have access to
-    data defined in the class, including descriptors.
-
-    Your subclass instances don't get instance dictionaries, but they
-    can have slots.
-
-    >>> class MyProxy(ProxyBase):
-    ...    __slots__ = 'x', 'y'
-    ...
-    ...    def f(self):
-    ...        return self.x
-
-    >>> l = [1, 2, 3]
-    >>> p = MyProxy(l)
-
-    I can use attributes defined by the class, including slots:
-    
-    >>> p.x = 'x'
-    >>> p.x
-    'x'
-    >>> p.f()
-    'x'
-
-    I can also use attributes of the proxied object:
-    
-    >>> p
-    [1, 2, 3]
-    >>> p.pop()
-    3
-    >>> p
-    [1, 2]
-    
-    """
-
-
-def test_suite():
-    suite = unittest.makeSuite(ProxyTestCase)
-    suite.addTest(DocTestSuite())
-    return suite
-
-if __name__ == "__main__":
-    runner = unittest.TextTestRunner(sys.stdout)
-    result = runner.run(test_suite())
-    newerrs = len(result.errors) + len(result.failures)
-    sys.exit(newerrs and 1 or 0)
diff --git a/branches/bug1734/src/zope/testing/DEPENDENCIES.cfg b/branches/bug1734/src/zope/testing/DEPENDENCIES.cfg
deleted file mode 100644
index bf63132b..00000000
--- a/branches/bug1734/src/zope/testing/DEPENDENCIES.cfg
+++ /dev/null
@@ -1 +0,0 @@
-zope.exceptions
diff --git a/branches/bug1734/src/zope/testing/__init__.py b/branches/bug1734/src/zope/testing/__init__.py
deleted file mode 100644
index ed6bfa7d..00000000
--- a/branches/bug1734/src/zope/testing/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Set up testing environment
-
-$Id$
-"""
-import os
-
-def patchTracebackModule():
-    """Use the ExceptionFormatter to show more info in tracebacks.
-    """
-    from zope.exceptions.exceptionformatter import format_exception
-    import traceback
-    traceback.format_exception = format_exception
-
-# Don't use the new exception formatter by default, since it
-# doesn't show filenames.
-if os.environ.get('NEW_ZOPE_EXCEPTION_FORMATTER', 0):
-    patchTracebackModule()
diff --git a/branches/bug1734/src/zope/testing/cleanup.py b/branches/bug1734/src/zope/testing/cleanup.py
deleted file mode 100644
index c2a41032..00000000
--- a/branches/bug1734/src/zope/testing/cleanup.py
+++ /dev/null
@@ -1,65 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Provide a standard cleanup registry
-
-Unit tests that change global data should include the CleanUp base
-class, which provides simpler setUp and tearDown methods that call
-global-data cleanup routines::
-
-  class Test(CleanUp, unittest.TestCase):
-
-      ....
-
-If custom setUp or tearDown are needed, then the base routines should
-be called, as in::
-
-  def tearDown(self):
-      super(Test, self).tearDown()
-      ....
-
-Cleanup routines for global data should be registered by passing them to
-addCleanup::
-
-
-  addCleanUp(pigRegistry._clear)
-
-
-$Id$
-"""
-_cleanups = []
-
-def addCleanUp(func, args=(), kw={}):
-    """Register a cleanup routines
-
-    Pass a function to be called to cleanup global data.
-    Optional argument tuple and keyword arguments may be passed.
-    """
-    _cleanups.append((func, args, kw))
-
-class CleanUp(object):
-    """Mix-in class providing clean-up setUp and tearDown routines."""
-
-    def cleanUp(self):
-        """Clean up global data."""
-        cleanUp()
-
-    setUp = tearDown = cleanUp
-
-
-def cleanUp():
-    """Clean up global data."""
-    for func, args, kw in _cleanups:
-        func(*args, **kw)
-
-setUp = tearDown = cleanUp
diff --git a/branches/bug1734/src/zope/testing/doctest.py b/branches/bug1734/src/zope/testing/doctest.py
deleted file mode 100644
index 8933ed3b..00000000
--- a/branches/bug1734/src/zope/testing/doctest.py
+++ /dev/null
@@ -1,2704 +0,0 @@
-# Module doctest.
-# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
-# Major enhancements and refactoring by:
-#     Jim Fulton
-#     Edward Loper
-
-# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
-
-r"""Module doctest -- a framework for running examples in docstrings.
-
-In simplest use, end each module M to be tested with:
-
-def _test():
-    import doctest
-    doctest.testmod()
-
-if __name__ == "__main__":
-    _test()
-
-Then running the module as a script will cause the examples in the
-docstrings to get executed and verified:
-
-python M.py
-
-This won't display anything unless an example fails, in which case the
-failing example(s) and the cause(s) of the failure(s) are printed to stdout
-(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
-line of output is "Test failed.".
-
-Run it with the -v switch instead:
-
-python M.py -v
-
-and a detailed report of all examples tried is printed to stdout, along
-with assorted summaries at the end.
-
-You can force verbose mode by passing "verbose=True" to testmod, or prohibit
-it by passing "verbose=False".  In either of those cases, sys.argv is not
-examined by testmod.
-
-There are a variety of other ways to run doctests, including integration
-with the unittest framework, and support for running non-Python text
-files containing doctests.  There are also many ways to override parts
-of doctest's default behaviors.  See the Library Reference Manual for
-details.
-"""
-
-__docformat__ = 'reStructuredText en'
-
-__all__ = [
-    # 0, Option Flags
-    'register_optionflag',
-    'DONT_ACCEPT_TRUE_FOR_1',
-    'DONT_ACCEPT_BLANKLINE',
-    'NORMALIZE_WHITESPACE',
-    'ELLIPSIS',
-    'IGNORE_EXCEPTION_DETAIL',
-    'COMPARISON_FLAGS',
-    'REPORT_UDIFF',
-    'REPORT_CDIFF',
-    'REPORT_NDIFF',
-    'REPORT_ONLY_FIRST_FAILURE',
-    'REPORTING_FLAGS',
-    # 1. Utility Functions
-    'is_private',
-    # 2. Example & DocTest
-    'Example',
-    'DocTest',
-    # 3. Doctest Parser
-    'DocTestParser',
-    # 4. Doctest Finder
-    'DocTestFinder',
-    # 5. Doctest Runner
-    'DocTestRunner',
-    'OutputChecker',
-    'DocTestFailure',
-    'UnexpectedException',
-    'DebugRunner',
-    # 6. Test Functions
-    'testmod',
-    'testfile',
-    'run_docstring_examples',
-    # 7. Tester
-    'Tester',
-    # 8. Unittest Support
-    'DocTestSuite',
-    'DocFileSuite',
-    'set_unittest_reportflags',
-    # 9. Debugging Support
-    'script_from_examples',
-    'testsource',
-    'debug_src',
-    'debug',
-]
-
-import __future__
-
-import sys, traceback, inspect, linecache, os, re, types
-import unittest, difflib, pdb, tempfile
-import warnings
-from StringIO import StringIO
-
-# Don't whine about the deprecated is_private function in this
-# module's tests.
-warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
-                        __name__, 0)
-
-real_pdb_set_trace = pdb.set_trace
-
-# There are 4 basic classes:
-#  - Example: a <source, want> pair, plus an intra-docstring line number.
-#  - DocTest: a collection of examples, parsed from a docstring, plus
-#    info about where the docstring came from (name, filename, lineno).
-#  - DocTestFinder: extracts DocTests from a given object's docstring and
-#    its contained objects' docstrings.
-#  - DocTestRunner: runs DocTest cases, and accumulates statistics.
-#
-# So the basic picture is:
-#
-#                             list of:
-# +------+                   +---------+                   +-------+
-# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
-# +------+                   +---------+                   +-------+
-#                            | Example |
-#                            |   ...   |
-#                            | Example |
-#                            +---------+
-
-# Option constants.
-
-OPTIONFLAGS_BY_NAME = {}
-def register_optionflag(name):
-    flag = 1 << len(OPTIONFLAGS_BY_NAME)
-    OPTIONFLAGS_BY_NAME[name] = flag
-    return flag
-
-DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
-DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
-NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
-ELLIPSIS = register_optionflag('ELLIPSIS')
-IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
-
-COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
-                    DONT_ACCEPT_BLANKLINE |
-                    NORMALIZE_WHITESPACE |
-                    ELLIPSIS |
-                    IGNORE_EXCEPTION_DETAIL)
-
-REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
-REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
-REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
-REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
-
-REPORTING_FLAGS = (REPORT_UDIFF |
-                   REPORT_CDIFF |
-                   REPORT_NDIFF |
-                   REPORT_ONLY_FIRST_FAILURE)
-
-# Special string markers for use in `want` strings:
-BLANKLINE_MARKER = '<BLANKLINE>'
-ELLIPSIS_MARKER = '...'
-
-######################################################################
-## Table of Contents
-######################################################################
-#  1. Utility Functions
-#  2. Example & DocTest -- store test cases
-#  3. DocTest Parser -- extracts examples from strings
-#  4. DocTest Finder -- extracts test cases from objects
-#  5. DocTest Runner -- runs test cases
-#  6. Test Functions -- convenient wrappers for testing
-#  7. Tester Class -- for backwards compatibility
-#  8. Unittest Support
-#  9. Debugging Support
-# 10. Example Usage
-
-######################################################################
-## 1. Utility Functions
-######################################################################
-
-def is_private(prefix, base):
-    """prefix, base -> true iff name prefix + "." + base is "private".
-
-    Prefix may be an empty string, and base does not contain a period.
-    Prefix is ignored (although functions you write conforming to this
-    protocol may make use of it).
-    Return true iff base begins with an (at least one) underscore, but
-    does not both begin and end with (at least) two underscores.
-
-    >>> is_private("a.b", "my_func")
-    False
-    >>> is_private("____", "_my_func")
-    True
-    >>> is_private("someclass", "__init__")
-    False
-    >>> is_private("sometypo", "__init_")
-    True
-    >>> is_private("x.y.z", "_")
-    True
-    >>> is_private("_x.y.z", "__")
-    False
-    >>> is_private("", "")  # senseless but consistent
-    False
-    """
-    warnings.warn("is_private is deprecated; it wasn't useful; "
-                  "examine DocTestFinder.find() lists instead",
-                  DeprecationWarning, stacklevel=2)
-    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
-
-def _extract_future_flags(globs):
-    """
-    Return the compiler-flags associated with the future features that
-    have been imported into the given namespace (globs).
-    """
-    flags = 0
-    for fname in __future__.all_feature_names:
-        feature = globs.get(fname, None)
-        if feature is getattr(__future__, fname):
-            flags |= feature.compiler_flag
-    return flags
-
-def _normalize_module(module, depth=2):
-    """
-    Return the module specified by `module`.  In particular:
-      - If `module` is a module, then return module.
-      - If `module` is a string, then import and return the
-        module with that name.
-      - If `module` is None, then return the calling module.
-        The calling module is assumed to be the module of
-        the stack frame at the given depth in the call stack.
-    """
-    if inspect.ismodule(module):
-        return module
-    elif isinstance(module, (str, unicode)):
-        return __import__(module, globals(), locals(), ["*"])
-    elif module is None:
-        return sys.modules[sys._getframe(depth).f_globals['__name__']]
-    else:
-        raise TypeError("Expected a module, string, or None")
-
-def _indent(s, indent=4):
-    """
-    Add the given number of space characters to the beginning every
-    non-blank line in `s`, and return the result.
-    """
-    # This regexp matches the start of non-blank lines:
-    return re.sub('(?m)^(?!$)', indent*' ', s)
-
-def _exception_traceback(exc_info):
-    """
-    Return a string containing a traceback message for the given
-    exc_info tuple (as returned by sys.exc_info()).
-    """
-    # Get a traceback message.
-    excout = StringIO()
-    exc_type, exc_val, exc_tb = exc_info
-    traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
-    return excout.getvalue()
-
-# Override some StringIO methods.
-class _SpoofOut(StringIO):
-    def getvalue(self):
-        result = StringIO.getvalue(self)
-        # If anything at all was written, make sure there's a trailing
-        # newline.  There's no way for the expected output to indicate
-        # that a trailing newline is missing.
-        if result and not result.endswith("\n"):
-            result += "\n"
-        # Prevent softspace from screwing up the next test case, in
-        # case they used print with a trailing comma in an example.
-        if hasattr(self, "softspace"):
-            del self.softspace
-        return result
-
-    def truncate(self,   size=None):
-        StringIO.truncate(self, size)
-        if hasattr(self, "softspace"):
-            del self.softspace
-
-# Worst-case linear-time ellipsis matching.
-def _ellipsis_match(want, got):
-    """
-    Essentially the only subtle case:
-    >>> _ellipsis_match('aa...aa', 'aaa')
-    False
-    """
-    if ELLIPSIS_MARKER not in want:
-        return want == got
-
-    # Find "the real" strings.
-    ws = want.split(ELLIPSIS_MARKER)
-    assert len(ws) >= 2
-
-    # Deal with exact matches possibly needed at one or both ends.
-    startpos, endpos = 0, len(got)
-    w = ws[0]
-    if w:   # starts with exact match
-        if got.startswith(w):
-            startpos = len(w)
-            del ws[0]
-        else:
-            return False
-    w = ws[-1]
-    if w:   # ends with exact match
-        if got.endswith(w):
-            endpos -= len(w)
-            del ws[-1]
-        else:
-            return False
-
-    if startpos > endpos:
-        # Exact end matches required more characters than we have, as in
-        # _ellipsis_match('aa...aa', 'aaa')
-        return False
-
-    # For the rest, we only need to find the leftmost non-overlapping
-    # match for each piece.  If there's no overall match that way alone,
-    # there's no overall match period.
-    for w in ws:
-        # w may be '' at times, if there are consecutive ellipses, or
-        # due to an ellipsis at the start or end of `want`.  That's OK.
-        # Search for an empty string succeeds, and doesn't change startpos.
-        startpos = got.find(w, startpos, endpos)
-        if startpos < 0:
-            return False
-        startpos += len(w)
-
-    return True
-
-def _comment_line(line):
-    "Return a commented form of the given line"
-    line = line.rstrip()
-    if line:
-        return '# '+line
-    else:
-        return '#'
-
-class _OutputRedirectingPdb(pdb.Pdb):
-    """
-    A specialized version of the python debugger that redirects stdout
-    to a given stream when interacting with the user.  Stdout is *not*
-    redirected when traced code is executed.
-    """
-    def __init__(self, out):
-        self.__out = out
-        self.__debugger_used = False
-        pdb.Pdb.__init__(self)
-
-    def set_trace(self):
-        self.__debugger_used = True
-        pdb.Pdb.set_trace(self)
-
-    def set_continue(self):
-        # Calling set_continue unconditionally would break unit test coverage
-        # reporting, as Bdb.set_continue calls sys.settrace(None).
-        if self.__debugger_used:
-            pdb.Pdb.set_continue(self)
-
-    def trace_dispatch(self, *args):
-        # Redirect stdout to the given stream.
-        save_stdout = sys.stdout
-        sys.stdout = self.__out
-        # Call Pdb's trace dispatch method.
-        result = pdb.Pdb.trace_dispatch(self, *args)
-        # Restore stdout.
-        sys.stdout = save_stdout
-        return result
-
-# [XX] Normalize with respect to os.path.pardir?
-def _module_relative_path(module, path):
-    if not inspect.ismodule(module):
-        raise TypeError, 'Expected a module: %r' % module
-    if path.startswith('/'):
-        raise ValueError, 'Module-relative files may not have absolute paths'
-
-    # Find the base directory for the path.
-    if hasattr(module, '__file__'):
-        # A normal module/package
-        basedir = os.path.split(module.__file__)[0]
-    elif module.__name__ == '__main__':
-        # An interactive session.
-        if len(sys.argv)>0 and sys.argv[0] != '':
-            basedir = os.path.split(sys.argv[0])[0]
-        else:
-            basedir = os.curdir
-    else:
-        # A module w/o __file__ (this includes builtins)
-        raise ValueError("Can't resolve paths relative to the module " +
-                         module + " (it has no __file__)")
-
-    # Combine the base directory and the path.
-    return os.path.join(basedir, *(path.split('/')))
-
-######################################################################
-## 2. Example & DocTest
-######################################################################
-## - An "example" is a <source, want> pair, where "source" is a
-##   fragment of source code, and "want" is the expected output for
-##   "source."  The Example class also includes information about
-##   where the example was extracted from.
-##
-## - A "doctest" is a collection of examples, typically extracted from
-##   a string (such as an object's docstring).  The DocTest class also
-##   includes information about where the string was extracted from.
-
-class Example:
-    """
-    A single doctest example, consisting of source code and expected
-    output.  `Example` defines the following attributes:
-
-      - source: A single Python statement, always ending with a newline.
-        The constructor adds a newline if needed.
-
-      - want: The expected output from running the source code (either
-        from stdout, or a traceback in case of exception).  `want` ends
-        with a newline unless it's empty, in which case it's an empty
-        string.  The constructor adds a newline if needed.
-
-      - exc_msg: The exception message generated by the example, if
-        the example is expected to generate an exception; or `None` if
-        it is not expected to generate an exception.  This exception
-        message is compared against the return value of
-        `traceback.format_exception_only()`.  `exc_msg` ends with a
-        newline unless it's `None`.  The constructor adds a newline
-        if needed.
-
-      - lineno: The line number within the DocTest string containing
-        this Example where the Example begins.  This line number is
-        zero-based, with respect to the beginning of the DocTest.
-
-      - indent: The example's indentation in the DocTest string.
-        I.e., the number of space characters that preceed the
-        example's first prompt.
-
-      - options: A dictionary mapping from option flags to True or
-        False, which is used to override default options for this
-        example.  Any option flags not contained in this dictionary
-        are left at their default value (as specified by the
-        DocTestRunner's optionflags).  By default, no options are set.
-    """
-    def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
-                 options=None):
-        # Normalize inputs.
-        if not source.endswith('\n'):
-            source += '\n'
-        if want and not want.endswith('\n'):
-            want += '\n'
-        if exc_msg is not None and not exc_msg.endswith('\n'):
-            exc_msg += '\n'
-        # Store properties.
-        self.source = source
-        self.want = want
-        self.lineno = lineno
-        self.indent = indent
-        if options is None: options = {}
-        self.options = options
-        self.exc_msg = exc_msg
-
-class DocTest:
-    """
-    A collection of doctest examples that should be run in a single
-    namespace.  Each `DocTest` defines the following attributes:
-
-      - examples: the list of examples.
-
-      - globs: The namespace (aka globals) that the examples should
-        be run in.
-
-      - name: A name identifying the DocTest (typically, the name of
-        the object whose docstring this DocTest was extracted from).
-
-      - filename: The name of the file that this DocTest was extracted
-        from, or `None` if the filename is unknown.
-
-      - lineno: The line number within filename where this DocTest
-        begins, or `None` if the line number is unavailable.  This
-        line number is zero-based, with respect to the beginning of
-        the file.
-
-      - docstring: The string that the examples were extracted from,
-        or `None` if the string is unavailable.
-    """
-    def __init__(self, examples, globs, name, filename, lineno, docstring):
-        """
-        Create a new DocTest containing the given examples.  The
-        DocTest's globals are initialized with a copy of `globs`.
-        """
-        assert not isinstance(examples, basestring), \
-               "DocTest no longer accepts str; use DocTestParser instead"
-        self.examples = examples
-        self.docstring = docstring
-        self.globs = globs.copy()
-        self.name = name
-        self.filename = filename
-        self.lineno = lineno
-
-    def __repr__(self):
-        if len(self.examples) == 0:
-            examples = 'no examples'
-        elif len(self.examples) == 1:
-            examples = '1 example'
-        else:
-            examples = '%d examples' % len(self.examples)
-        return ('<DocTest %s from %s:%s (%s)>' %
-                (self.name, self.filename, self.lineno, examples))
-
-
-    # This lets us sort tests by name:
-    def __cmp__(self, other):
-        if not isinstance(other, DocTest):
-            return -1
-        return cmp((self.name, self.filename, self.lineno, id(self)),
-                   (other.name, other.filename, other.lineno, id(other)))
-
-######################################################################
-## 3. DocTestParser
-######################################################################
-
-class DocTestParser:
-    """
-    A class used to parse strings containing doctest examples.
-    """
-    # This regular expression is used to find doctest examples in a
-    # string.  It defines three groups: `source` is the source code
-    # (including leading indentation and prompts); `indent` is the
-    # indentation of the first (PS1) line of the source code; and
-    # `want` is the expected output (including leading indentation).
-    _EXAMPLE_RE = re.compile(r'''
-        # Source consists of a PS1 line followed by zero or more PS2 lines.
-        (?P<source>
-            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
-            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
-        \n?
-        # Want consists of any non-blank lines that do not start with PS1.
-        (?P<want> (?:(?![ ]*$)    # Not a blank line
-                     (?![ ]*>>>)  # Not a line starting with PS1
-                     .*$\n?       # But any other line
-                  )*)
-        ''', re.MULTILINE | re.VERBOSE)
-
-    # A regular expression for handling `want` strings that contain
-    # expected exceptions.  It divides `want` into three pieces:
-    #    - the traceback header line (`hdr`)
-    #    - the traceback stack (`stack`)
-    #    - the exception message (`msg`), as generated by
-    #      traceback.format_exception_only()
-    # `msg` may have multiple lines.  We assume/require that the
-    # exception message is the first non-indented line starting with a word
-    # character following the traceback header line.
-    _EXCEPTION_RE = re.compile(r"""
-        # Grab the traceback header.  Different versions of Python have
-        # said different things on the first traceback line.
-        ^(?P<hdr> Traceback\ \(
-            (?: most\ recent\ call\ last
-            |   innermost\ last
-            ) \) :
-        )
-        \s* $                # toss trailing whitespace on the header.
-        (?P<stack> .*?)      # don't blink: absorb stuff until...
-        ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
-        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
-
-    # A callable returning a true value iff its argument is a blank line
-    # or contains a single comment.
-    _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
-
-    def parse(self, string, name='<string>'):
-        """
-        Divide the given string into examples and intervening text,
-        and return them as a list of alternating Examples and strings.
-        Line numbers for the Examples are 0-based.  The optional
-        argument `name` is a name identifying this string, and is only
-        used for error messages.
-        """
-        string = string.expandtabs()
-        # If all lines begin with the same indentation, then strip it.
-        min_indent = self._min_indent(string)
-        if min_indent > 0:
-            string = '\n'.join([l[min_indent:] for l in string.split('\n')])
-
-        output = []
-        charno, lineno = 0, 0
-        # Find all doctest examples in the string:
-        for m in self._EXAMPLE_RE.finditer(string):
-            # Add the pre-example text to `output`.
-            output.append(string[charno:m.start()])
-            # Update lineno (lines before this example)
-            lineno += string.count('\n', charno, m.start())
-            # Extract info from the regexp match.
-            (source, options, want, exc_msg) = \
-                     self._parse_example(m, name, lineno)
-            # Create an Example, and add it to the list.
-            if not self._IS_BLANK_OR_COMMENT(source):
-                output.append( Example(source, want, exc_msg,
-                                    lineno=lineno,
-                                    indent=min_indent+len(m.group('indent')),
-                                    options=options) )
-            # Update lineno (lines inside this example)
-            lineno += string.count('\n', m.start(), m.end())
-            # Update charno.
-            charno = m.end()
-        # Add any remaining post-example text to `output`.
-        output.append(string[charno:])
-        return output
-
-    def get_doctest(self, string, globs, name, filename, lineno):
-        """
-        Extract all doctest examples from the given string, and
-        collect them into a `DocTest` object.
-
-        `globs`, `name`, `filename`, and `lineno` are attributes for
-        the new `DocTest` object.  See the documentation for `DocTest`
-        for more information.
-        """
-        return DocTest(self.get_examples(string, name), globs,
-                       name, filename, lineno, string)
-
-    def get_examples(self, string, name='<string>'):
-        """
-        Extract all doctest examples from the given string, and return
-        them as a list of `Example` objects.  Line numbers are
-        0-based, because it's most common in doctests that nothing
-        interesting appears on the same line as opening triple-quote,
-        and so the first interesting line is called \"line 1\" then.
-
-        The optional argument `name` is a name identifying this
-        string, and is only used for error messages.
-        """
-        return [x for x in self.parse(string, name)
-                if isinstance(x, Example)]
-
-    def _parse_example(self, m, name, lineno):
-        """
-        Given a regular expression match from `_EXAMPLE_RE` (`m`),
-        return a pair `(source, want)`, where `source` is the matched
-        example's source code (with prompts and indentation stripped);
-        and `want` is the example's expected output (with indentation
-        stripped).
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        # Get the example's indentation level.
-        indent = len(m.group('indent'))
-
-        # Divide source into lines; check that they're properly
-        # indented; and then strip their indentation & prompts.
-        source_lines = m.group('source').split('\n')
-        self._check_prompt_blank(source_lines, indent, name, lineno)
-        self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
-        source = '\n'.join([sl[indent+4:] for sl in source_lines])
-
-        # Divide want into lines; check that it's properly indented; and
-        # then strip the indentation.  Spaces before the last newline should
-        # be preserved, so plain rstrip() isn't good enough.
-        want = m.group('want')
-        want_lines = want.split('\n')
-        if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
-            del want_lines[-1]  # forget final newline & spaces after it
-        self._check_prefix(want_lines, ' '*indent, name,
-                           lineno + len(source_lines))
-        want = '\n'.join([wl[indent:] for wl in want_lines])
-
-        # If `want` contains a traceback message, then extract it.
-        m = self._EXCEPTION_RE.match(want)
-        if m:
-            exc_msg = m.group('msg')
-        else:
-            exc_msg = None
-
-        # Extract options from the source.
-        options = self._find_options(source, name, lineno)
-
-        return source, options, want, exc_msg
-
-    # This regular expression looks for option directives in the
-    # source code of an example.  Option directives are comments
-    # starting with "doctest:".  Warning: this may give false
-    # positives for string-literals that contain the string
-    # "#doctest:".  Eliminating these false positives would require
-    # actually parsing the string; but we limit them by ignoring any
-    # line containing "#doctest:" that is *followed* by a quote mark.
-    _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
-                                      re.MULTILINE)
-
-    def _find_options(self, source, name, lineno):
-        """
-        Return a dictionary containing option overrides extracted from
-        option directives in the given source string.
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        options = {}
-        # (note: with the current regexp, this will match at most once:)
-        for m in self._OPTION_DIRECTIVE_RE.finditer(source):
-            option_strings = m.group(1).replace(',', ' ').split()
-            for option in option_strings:
-                if (option[0] not in '+-' or
-                    option[1:] not in OPTIONFLAGS_BY_NAME):
-                    raise ValueError('line %r of the doctest for %s '
-                                     'has an invalid option: %r' %
-                                     (lineno+1, name, option))
-                flag = OPTIONFLAGS_BY_NAME[option[1:]]
-                options[flag] = (option[0] == '+')
-        if options and self._IS_BLANK_OR_COMMENT(source):
-            raise ValueError('line %r of the doctest for %s has an option '
-                             'directive on a line with no example: %r' %
-                             (lineno, name, source))
-        return options
-
-    # This regular expression finds the indentation of every non-blank
-    # line in a string.
-    _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
-
-    def _min_indent(self, s):
-        "Return the minimum indentation of any non-blank line in `s`"
-        indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
-        if len(indents) > 0:
-            return min(indents)
-        else:
-            return 0
-
-    def _check_prompt_blank(self, lines, indent, name, lineno):
-        """
-        Given the lines of a source string (including prompts and
-        leading indentation), check to make sure that every prompt is
-        followed by a space character.  If any line is not followed by
-        a space character, then raise ValueError.
-        """
-        for i, line in enumerate(lines):
-            if len(line) >= indent+4 and line[indent+3] != ' ':
-                raise ValueError('line %r of the docstring for %s '
-                                 'lacks blank after %s: %r' %
-                                 (lineno+i+1, name,
-                                  line[indent:indent+3], line))
-
-    def _check_prefix(self, lines, prefix, name, lineno):
-        """
-        Check that every line in the given list starts with the given
-        prefix; if any line does not, then raise a ValueError.
-        """
-        for i, line in enumerate(lines):
-            if line and not line.startswith(prefix):
-                raise ValueError('line %r of the docstring for %s has '
-                                 'inconsistent leading whitespace: %r' %
-                                 (lineno+i+1, name, line))
-
-
-######################################################################
-## 4. DocTest Finder
-######################################################################
-
-class DocTestFinder:
-    """
-    A class used to extract the DocTests that are relevant to a given
-    object, from its docstring and the docstrings of its contained
-    objects.  Doctests can currently be extracted from the following
-    object types: modules, functions, classes, methods, staticmethods,
-    classmethods, and properties.
-    """
-
-    def __init__(self, verbose=False, parser=DocTestParser(),
-                 recurse=True, _namefilter=None, exclude_empty=True):
-        """
-        Create a new doctest finder.
-
-        The optional argument `parser` specifies a class or
-        function that should be used to create new DocTest objects (or
-        objects that implement the same interface as DocTest).  The
-        signature for this factory function should match the signature
-        of the DocTest constructor.
-
-        If the optional argument `recurse` is false, then `find` will
-        only examine the given object, and not any contained objects.
-
-        If the optional argument `exclude_empty` is false, then `find`
-        will include tests for objects with empty docstrings.
-        """
-        self._parser = parser
-        self._verbose = verbose
-        self._recurse = recurse
-        self._exclude_empty = exclude_empty
-        # _namefilter is undocumented, and exists only for temporary backward-
-        # compatibility support of testmod's deprecated isprivate mess.
-        self._namefilter = _namefilter
-
-    def find(self, obj, name=None, module=None, globs=None,
-             extraglobs=None):
-        """
-        Return a list of the DocTests that are defined by the given
-        object's docstring, or by any of its contained objects'
-        docstrings.
-
-        The optional parameter `module` is the module that contains
-        the given object.  If the module is not specified or is None, then
-        the test finder will attempt to automatically determine the
-        correct module.  The object's module is used:
-
-            - As a default namespace, if `globs` is not specified.
-            - To prevent the DocTestFinder from extracting DocTests
-              from objects that are imported from other modules.
-            - To find the name of the file containing the object.
-            - To help find the line number of the object within its
-              file.
-
-        Contained objects whose module does not match `module` are ignored.
-
-        If `module` is False, no attempt to find the module will be made.
-        This is obscure, of use mostly in tests:  if `module` is False, or
-        is None but cannot be found automatically, then all objects are
-        considered to belong to the (non-existent) module, so all contained
-        objects will (recursively) be searched for doctests.
-
-        The globals for each DocTest is formed by combining `globs`
-        and `extraglobs` (bindings in `extraglobs` override bindings
-        in `globs`).  A new copy of the globals dictionary is created
-        for each DocTest.  If `globs` is not specified, then it
-        defaults to the module's `__dict__`, if specified, or {}
-        otherwise.  If `extraglobs` is not specified, then it defaults
-        to {}.
-
-        """
-        # If name was not specified, then extract it from the object.
-        if name is None:
-            name = getattr(obj, '__name__', None)
-            if name is None:
-                raise ValueError("DocTestFinder.find: name must be given "
-                        "when obj.__name__ doesn't exist: %r" %
-                                 (type(obj),))
-
-        # Find the module that contains the given object (if obj is
-        # a module, then module=obj.).  Note: this may fail, in which
-        # case module will be None.
-        if module is False:
-            module = None
-        elif module is None:
-            module = inspect.getmodule(obj)
-
-        # Read the module's source code.  This is used by
-        # DocTestFinder._find_lineno to find the line number for a
-        # given object's docstring.
-        try:
-            file = inspect.getsourcefile(obj) or inspect.getfile(obj)
-            source_lines = linecache.getlines(file)
-            if not source_lines:
-                source_lines = None
-        except TypeError:
-            source_lines = None
-
-        # Initialize globals, and merge in extraglobs.
-        if globs is None:
-            if module is None:
-                globs = {}
-            else:
-                globs = module.__dict__.copy()
-        else:
-            globs = globs.copy()
-        if extraglobs is not None:
-            globs.update(extraglobs)
-
-        # Recursively expore `obj`, extracting DocTests.
-        tests = []
-        self._find(tests, obj, name, module, source_lines, globs, {})
-        return tests
-
-    def _filter(self, obj, prefix, base):
-        """
-        Return true if the given object should not be examined.
-        """
-        return (self._namefilter is not None and
-                self._namefilter(prefix, base))
-
-    def _from_module(self, module, object):
-        """
-        Return true if the given object is defined in the given
-        module.
-        """
-        if module is None:
-            return True
-        elif inspect.isfunction(object):
-            return module.__dict__ is object.func_globals
-        elif inspect.isclass(object):
-            return module.__name__ == object.__module__
-        elif inspect.getmodule(object) is not None:
-            return module is inspect.getmodule(object)
-        elif hasattr(object, '__module__'):
-            return module.__name__ == object.__module__
-        elif isinstance(object, property):
-            return True # [XX] no way not be sure.
-        else:
-            raise ValueError("object must be a class or function")
-
-    def _find(self, tests, obj, name, module, source_lines, globs, seen):
-        """
-        Find tests for the given object and any contained objects, and
-        add them to `tests`.
-        """
-        if self._verbose:
-            print 'Finding tests in %s' % name
-
-        # If we've already processed this object, then ignore it.
-        if id(obj) in seen:
-            return
-        seen[id(obj)] = 1
-
-        # Find a test for this object, and add it to the list of tests.
-        test = self._get_test(obj, name, module, globs, source_lines)
-        if test is not None:
-            tests.append(test)
-
-        # Look for tests in a module's contained objects.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                valname = '%s.%s' % (name, valname)
-                # Recurse to functions & classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val)) and
-                    self._from_module(module, val)):
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen)
-
-        # Look for tests in a module's __test__ dictionary.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in getattr(obj, '__test__', {}).items():
-                if not isinstance(valname, basestring):
-                    raise ValueError("DocTestFinder.find: __test__ keys "
-                                     "must be strings: %r" %
-                                     (type(valname),))
-                if not (inspect.isfunction(val) or inspect.isclass(val) or
-                        inspect.ismethod(val) or inspect.ismodule(val) or
-                        isinstance(val, basestring)):
-                    raise ValueError("DocTestFinder.find: __test__ values "
-                                     "must be strings, functions, methods, "
-                                     "classes, or modules: %r" %
-                                     (type(val),))
-                valname = '%s.__test__.%s' % (name, valname)
-                self._find(tests, val, valname, module, source_lines,
-                           globs, seen)
-
-        # Look for tests in a class's contained objects.
-        if inspect.isclass(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                # Special handling for staticmethod/classmethod.
-                if isinstance(val, staticmethod):
-                    val = getattr(obj, valname)
-                if isinstance(val, classmethod):
-                    val = getattr(obj, valname).im_func
-
-                # Recurse to methods, properties, and nested classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val) or
-                      isinstance(val, property)) and
-                      self._from_module(module, val)):
-                    valname = '%s.%s' % (name, valname)
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen)
-
-    def _get_test(self, obj, name, module, globs, source_lines):
-        """
-        Return a DocTest for the given object, if it defines a docstring;
-        otherwise, return None.
-        """
-        # Extract the object's docstring.  If it doesn't have one,
-        # then return None (no test for this object).
-        if isinstance(obj, basestring):
-            docstring = obj
-        else:
-            try:
-                if obj.__doc__ is None:
-                    docstring = ''
-                else:
-                    docstring = obj.__doc__
-                    if not isinstance(docstring, basestring):
-                        docstring = str(docstring)
-            except (TypeError, AttributeError):
-                docstring = ''
-
-        # Find the docstring's location in the file.
-        lineno = self._find_lineno(obj, source_lines)
-
-        # Don't bother if the docstring is empty.
-        if self._exclude_empty and not docstring:
-            return None
-
-        # Return a DocTest for this object.
-        if module is None:
-            filename = None
-        else:
-            filename = getattr(module, '__file__', module.__name__)
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-        return self._parser.get_doctest(docstring, globs, name,
-                                        filename, lineno)
-
-    def _find_lineno(self, obj, source_lines):
-        """
-        Return a line number of the given object's docstring.  Note:
-        this method assumes that the object has a docstring.
-        """
-        lineno = None
-
-        # Find the line number for modules.
-        if inspect.ismodule(obj):
-            lineno = 0
-
-        # Find the line number for classes.
-        # Note: this could be fooled if a class is defined multiple
-        # times in a single file.
-        if inspect.isclass(obj):
-            if source_lines is None:
-                return None
-            pat = re.compile(r'^\s*class\s*%s\b' %
-                             getattr(obj, '__name__', '-'))
-            for i, line in enumerate(source_lines):
-                if pat.match(line):
-                    lineno = i
-                    break
-
-        # Find the line number for functions & methods.
-        if inspect.ismethod(obj): obj = obj.im_func
-        if inspect.isfunction(obj): obj = obj.func_code
-        if inspect.istraceback(obj): obj = obj.tb_frame
-        if inspect.isframe(obj): obj = obj.f_code
-        if inspect.iscode(obj):
-            lineno = getattr(obj, 'co_firstlineno', None)-1
-
-        # Find the line number where the docstring starts.  Assume
-        # that it's the first line that begins with a quote mark.
-        # Note: this could be fooled by a multiline function
-        # signature, where a continuation line begins with a quote
-        # mark.
-        if lineno is not None:
-            if source_lines is None:
-                return lineno+1
-            pat = re.compile('(^|.*:)\s*\w*("|\')')
-            for lineno in range(lineno, len(source_lines)):
-                if pat.match(source_lines[lineno]):
-                    return lineno
-
-        # We couldn't find the line number.
-        return None
-
-######################################################################
-## 5. DocTest Runner
-######################################################################
-
-class DocTestRunner:
-    """
-    A class used to run DocTest test cases, and accumulate statistics.
-    The `run` method is used to process a single DocTest case.  It
-    returns a tuple `(f, t)`, where `t` is the number of test cases
-    tried, and `f` is the number of test cases that failed.
-
-        >>> tests = DocTestFinder().find(_TestClass)
-        >>> runner = DocTestRunner(verbose=False)
-        >>> for test in tests:
-        ...     print runner.run(test)
-        (0, 2)
-        (0, 1)
-        (0, 2)
-        (0, 2)
-
-    The `summarize` method prints a summary of all the test cases that
-    have been run by the runner, and returns an aggregated `(f, t)`
-    tuple:
-
-        >>> runner.summarize(verbose=1)
-        4 items passed all tests:
-           2 tests in _TestClass
-           2 tests in _TestClass.__init__
-           2 tests in _TestClass.get
-           1 tests in _TestClass.square
-        7 tests in 4 items.
-        7 passed and 0 failed.
-        Test passed.
-        (0, 7)
-
-    The aggregated number of tried examples and failed examples is
-    also available via the `tries` and `failures` attributes:
-
-        >>> runner.tries
-        7
-        >>> runner.failures
-        0
-
-    The comparison between expected outputs and actual outputs is done
-    by an `OutputChecker`.  This comparison may be customized with a
-    number of option flags; see the documentation for `testmod` for
-    more information.  If the option flags are insufficient, then the
-    comparison may also be customized by passing a subclass of
-    `OutputChecker` to the constructor.
-
-    The test runner's display output can be controlled in two ways.
-    First, an output function (`out) can be passed to
-    `TestRunner.run`; this function will be called with strings that
-    should be displayed.  It defaults to `sys.stdout.write`.  If
-    capturing the output is not sufficient, then the display output
-    can be also customized by subclassing DocTestRunner, and
-    overriding the methods `report_start`, `report_success`,
-    `report_unexpected_exception`, and `report_failure`.
-    """
-    # This divider string is used to separate failure messages, and to
-    # separate sections of the summary.
-    DIVIDER = "*" * 70
-
-    def __init__(self, checker=None, verbose=None, optionflags=0):
-        """
-        Create a new test runner.
-
-        Optional keyword arg `checker` is the `OutputChecker` that
-        should be used to compare the expected outputs and actual
-        outputs of doctest examples.
-
-        Optional keyword arg 'verbose' prints lots of stuff if true,
-        only failures if false; by default, it's true iff '-v' is in
-        sys.argv.
-
-        Optional argument `optionflags` can be used to control how the
-        test runner compares expected output to actual output, and how
-        it displays failures.  See the documentation for `testmod` for
-        more information.
-        """
-        self._checker = checker or OutputChecker()
-        if verbose is None:
-            verbose = '-v' in sys.argv
-        self._verbose = verbose
-        self.optionflags = optionflags
-        self.original_optionflags = optionflags
-
-        # Keep track of the examples we've run.
-        self.tries = 0
-        self.failures = 0
-        self._name2ft = {}
-
-        # Create a fake output target for capturing doctest output.
-        self._fakeout = _SpoofOut()
-
-    #/////////////////////////////////////////////////////////////////
-    # Reporting methods
-    #/////////////////////////////////////////////////////////////////
-
-    def report_start(self, out, test, example):
-        """
-        Report that the test runner is about to process the given
-        example.  (Only displays a message if verbose=True)
-        """
-        if self._verbose:
-            if example.want:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting:\n' + _indent(example.want))
-            else:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting nothing\n')
-
-    def report_success(self, out, test, example, got):
-        """
-        Report that the given example ran successfully.  (Only
-        displays a message if verbose=True)
-        """
-        if self._verbose:
-            out("ok\n")
-
-    def report_failure(self, out, test, example, got):
-        """
-        Report that the given example failed.
-        """
-        out(self._failure_header(test, example) +
-            self._checker.output_difference(example, got, self.optionflags))
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        """
-        Report that the given example raised an unexpected exception.
-        """
-        out(self._failure_header(test, example) +
-            'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
-
-    def _failure_header(self, test, example):
-        out = [self.DIVIDER]
-        if test.filename:
-            if test.lineno is not None and example.lineno is not None:
-                lineno = test.lineno + example.lineno + 1
-            else:
-                lineno = '?'
-            out.append('File "%s", line %s, in %s' %
-                       (test.filename, lineno, test.name))
-        else:
-            out.append('Line %s, in %s' % (example.lineno+1, test.name))
-        out.append('Failed example:')
-        source = example.source
-        out.append(_indent(source))
-        return '\n'.join(out)
-
-    #/////////////////////////////////////////////////////////////////
-    # DocTest Running
-    #/////////////////////////////////////////////////////////////////
-
-    def __run(self, test, compileflags, out):
-        """
-        Run the examples in `test`.  Write the outcome of each example
-        with one of the `DocTestRunner.report_*` methods, using the
-        writer function `out`.  `compileflags` is the set of compiler
-        flags that should be used to execute examples.  Return a tuple
-        `(f, t)`, where `t` is the number of examples tried, and `f`
-        is the number of examples that failed.  The examples are run
-        in the namespace `test.globs`.
-        """
-        # Keep track of the number of failures and tries.
-        failures = tries = 0
-
-        # Save the option flags (since option directives can be used
-        # to modify them).
-        original_optionflags = self.optionflags
-
-        SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
-
-        check = self._checker.check_output
-
-        # Process each example.
-        for examplenum, example in enumerate(test.examples):
-
-            # If REPORT_ONLY_FIRST_FAILURE is set, then supress
-            # reporting after the first failure.
-            quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
-                     failures > 0)
-
-            # Merge in the example's options.
-            self.optionflags = original_optionflags
-            if example.options:
-                for (optionflag, val) in example.options.items():
-                    if val:
-                        self.optionflags |= optionflag
-                    else:
-                        self.optionflags &= ~optionflag
-
-            # Record that we started this example.
-            tries += 1
-            if not quiet:
-                self.report_start(out, test, example)
-
-            # Use a special filename for compile(), so we can retrieve
-            # the source code during interactive debugging (see
-            # __patched_linecache_getlines).
-            filename = '<doctest %s[%d]>' % (test.name, examplenum)
-
-            # Run the example in the given context (globs), and record
-            # any exception that gets raised.  (But don't intercept
-            # keyboard interrupts.)
-            try:
-                # Don't blink!  This is where the user's code gets run.
-                exec compile(example.source, filename, "single",
-                             compileflags, 1) in test.globs
-                self.debugger.set_continue() # ==== Example Finished ====
-                exception = None
-            except KeyboardInterrupt:
-                raise
-            except:
-                exception = sys.exc_info()
-                self.debugger.set_continue() # ==== Example Finished ====
-
-            got = self._fakeout.getvalue()  # the actual output
-            self._fakeout.truncate(0)
-            outcome = FAILURE   # guilty until proved innocent or insane
-
-            # If the example executed without raising any exceptions,
-            # verify its output.
-            if exception is None:
-                if check(example.want, got, self.optionflags):
-                    outcome = SUCCESS
-
-            # The example raised an exception:  check if it was expected.
-            else:
-                exc_info = sys.exc_info()
-                exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
-                if not quiet:
-                    got += _exception_traceback(exc_info)
-
-                # If `example.exc_msg` is None, then we weren't expecting
-                # an exception.
-                if example.exc_msg is None:
-                    outcome = BOOM
-
-                # We expected an exception:  see whether it matches.
-                elif check(example.exc_msg, exc_msg, self.optionflags):
-                    outcome = SUCCESS
-
-                # Another chance if they didn't care about the detail.
-                elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
-                    m1 = re.match(r'[^:]*:', example.exc_msg)
-                    m2 = re.match(r'[^:]*:', exc_msg)
-                    if m1 and m2 and check(m1.group(0), m2.group(0),
-                                           self.optionflags):
-                        outcome = SUCCESS
-
-            # Report the outcome.
-            if outcome is SUCCESS:
-                if not quiet:
-                    self.report_success(out, test, example, got)
-            elif outcome is FAILURE:
-                if not quiet:
-                    self.report_failure(out, test, example, got)
-                failures += 1
-            elif outcome is BOOM:
-                if not quiet:
-                    self.report_unexpected_exception(out, test, example,
-                                                     exc_info)
-                failures += 1
-            else:
-                assert False, ("unknown outcome", outcome)
-
-        # Restore the option flags (in case they were modified)
-        self.optionflags = original_optionflags
-
-        # Record and return the number of failures and tries.
-        self.__record_outcome(test, failures, tries)
-        return failures, tries
-
-    def __record_outcome(self, test, f, t):
-        """
-        Record the fact that the given DocTest (`test`) generated `f`
-        failures out of `t` tried examples.
-        """
-        f2, t2 = self._name2ft.get(test.name, (0,0))
-        self._name2ft[test.name] = (f+f2, t+t2)
-        self.failures += f
-        self.tries += t
-
-    __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
-                                         r'(?P<name>[\w\.]+)'
-                                         r'\[(?P<examplenum>\d+)\]>$')
-    def __patched_linecache_getlines(self, filename):
-        m = self.__LINECACHE_FILENAME_RE.match(filename)
-        if m and m.group('name') == self.test.name:
-            example = self.test.examples[int(m.group('examplenum'))]
-            return example.source.splitlines(True)
-        else:
-            return self.save_linecache_getlines(filename)
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        """
-        Run the examples in `test`, and display the results using the
-        writer function `out`.
-
-        The examples are run in the namespace `test.globs`.  If
-        `clear_globs` is true (the default), then this namespace will
-        be cleared after the test runs, to help with garbage
-        collection.  If you would like to examine the namespace after
-        the test completes, then use `clear_globs=False`.
-
-        `compileflags` gives the set of flags that should be used by
-        the Python compiler when running the examples.  If not
-        specified, then it will default to the set of future-import
-        flags that apply to `globs`.
-
-        The output of each example is checked using
-        `DocTestRunner.check_output`, and the results are formatted by
-        the `DocTestRunner.report_*` methods.
-        """
-        self.test = test
-
-        if compileflags is None:
-            compileflags = _extract_future_flags(test.globs)
-
-        save_stdout = sys.stdout
-        if out is None:
-            out = save_stdout.write
-        sys.stdout = self._fakeout
-
-        # Patch pdb.set_trace to restore sys.stdout during interactive
-        # debugging (so it's not still redirected to self._fakeout).
-        # Note that the interactive output will go to *our*
-        # save_stdout, even if that's not the real sys.stdout; this
-        # allows us to write test cases for the set_trace behavior.
-        save_set_trace = pdb.set_trace
-        self.debugger = _OutputRedirectingPdb(save_stdout)
-        self.debugger.reset()
-        pdb.set_trace = self.debugger.set_trace
-
-        # Patch linecache.getlines, so we can see the example's source
-        # when we're inside the debugger.
-        self.save_linecache_getlines = linecache.getlines
-        linecache.getlines = self.__patched_linecache_getlines
-
-        try:
-            return self.__run(test, compileflags, out)
-        finally:
-            sys.stdout = save_stdout
-            pdb.set_trace = save_set_trace
-            linecache.getlines = self.save_linecache_getlines
-            if clear_globs:
-                test.globs.clear()
-
-    #/////////////////////////////////////////////////////////////////
-    # Summarization
-    #/////////////////////////////////////////////////////////////////
-    def summarize(self, verbose=None):
-        """
-        Print a summary of all the test cases that have been run by
-        this DocTestRunner, and return a tuple `(f, t)`, where `f` is
-        the total number of failed examples, and `t` is the total
-        number of tried examples.
-
-        The optional `verbose` argument controls how detailed the
-        summary is.  If the verbosity is not specified, then the
-        DocTestRunner's verbosity is used.
-        """
-        if verbose is None:
-            verbose = self._verbose
-        notests = []
-        passed = []
-        failed = []
-        totalt = totalf = 0
-        for x in self._name2ft.items():
-            name, (f, t) = x
-            assert f <= t
-            totalt += t
-            totalf += f
-            if t == 0:
-                notests.append(name)
-            elif f == 0:
-                passed.append( (name, t) )
-            else:
-                failed.append(x)
-        if verbose:
-            if notests:
-                print len(notests), "items had no tests:"
-                notests.sort()
-                for thing in notests:
-                    print "   ", thing
-            if passed:
-                print len(passed), "items passed all tests:"
-                passed.sort()
-                for thing, count in passed:
-                    print " %3d tests in %s" % (count, thing)
-        if failed:
-            print self.DIVIDER
-            print len(failed), "items had failures:"
-            failed.sort()
-            for thing, (f, t) in failed:
-                print " %3d of %3d in %s" % (f, t, thing)
-        if verbose:
-            print totalt, "tests in", len(self._name2ft), "items."
-            print totalt - totalf, "passed and", totalf, "failed."
-        if totalf:
-            print "***Test Failed***", totalf, "failures."
-        elif verbose:
-            print "Test passed."
-        return totalf, totalt
-
-    #/////////////////////////////////////////////////////////////////
-    # Backward compatibility cruft to maintain doctest.master.
-    #/////////////////////////////////////////////////////////////////
-    def merge(self, other):
-        d = self._name2ft
-        for name, (f, t) in other._name2ft.items():
-            if name in d:
-                print "*** DocTestRunner.merge: '" + name + "' in both" \
-                    " testers; summing outcomes."
-                f2, t2 = d[name]
-                f = f + f2
-                t = t + t2
-            d[name] = f, t
-
-class OutputChecker:
-    """
-    A class used to check the whether the actual output from a doctest
-    example matches the expected output.  `OutputChecker` defines two
-    methods: `check_output`, which compares a given pair of outputs,
-    and returns true if they match; and `output_difference`, which
-    returns a string describing the differences between two outputs.
-    """
-    def check_output(self, want, got, optionflags):
-        """
-        Return True iff the actual output from an example (`got`)
-        matches the expected output (`want`).  These strings are
-        always considered to match if they are identical; but
-        depending on what option flags the test runner is using,
-        several non-exact match types are also possible.  See the
-        documentation for `TestRunner` for more information about
-        option flags.
-        """
-        # Handle the common case first, for efficiency:
-        # if they're string-identical, always return true.
-        if got == want:
-            return True
-
-        # The values True and False replaced 1 and 0 as the return
-        # value for boolean comparisons in Python 2.3.
-        if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
-            if (got,want) == ("True\n", "1\n"):
-                return True
-            if (got,want) == ("False\n", "0\n"):
-                return True
-
-        # <BLANKLINE> can be used as a special sequence to signify a
-        # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            # Replace <BLANKLINE> in want with a blank line.
-            want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
-                          '', want)
-            # If a line in got contains only spaces, then remove the
-            # spaces.
-            got = re.sub('(?m)^\s*?$', '', got)
-            if got == want:
-                return True
-
-        # This flag causes doctest to ignore any differences in the
-        # contents of whitespace strings.  Note that this can be used
-        # in conjunction with the ELLIPSIS flag.
-        if optionflags & NORMALIZE_WHITESPACE:
-            got = ' '.join(got.split())
-            want = ' '.join(want.split())
-            if got == want:
-                return True
-
-        # The ELLIPSIS flag says to let the sequence "..." in `want`
-        # match any substring in `got`.
-        if optionflags & ELLIPSIS:
-            if _ellipsis_match(want, got):
-                return True
-
-        # We didn't find any match; return false.
-        return False
-
-    # Should we do a fancy diff?
-    def _do_a_fancy_diff(self, want, got, optionflags):
-        # Not unless they asked for a fancy diff.
-        if not optionflags & (REPORT_UDIFF |
-                              REPORT_CDIFF |
-                              REPORT_NDIFF):
-            return False
-
-        # If expected output uses ellipsis, a meaningful fancy diff is
-        # too hard ... or maybe not.  In two real-life failures Tim saw,
-        # a diff was a major help anyway, so this is commented out.
-        # [todo] _ellipsis_match() knows which pieces do and don't match,
-        # and could be the basis for a kick-ass diff in this case.
-        ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
-        ##    return False
-
-        # ndiff does intraline difference marking, so can be useful even
-        # for 1-line differences.
-        if optionflags & REPORT_NDIFF:
-            return True
-
-        # The other diff types need at least a few lines to be helpful.
-        return want.count('\n') > 2 and got.count('\n') > 2
-
-    def output_difference(self, example, got, optionflags):
-        """
-        Return a string describing the differences between the
-        expected output for a given example (`example`) and the actual
-        output (`got`).  `optionflags` is the set of option flags used
-        to compare `want` and `got`.
-        """
-        want = example.want
-        # If <BLANKLINE>s are being used, then replace blank lines
-        # with <BLANKLINE> in the actual output string.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
-
-        # Check if we should use diff.
-        if self._do_a_fancy_diff(want, got, optionflags):
-            # Split want & got into lines.
-            want_lines = want.splitlines(True)  # True == keep line ends
-            got_lines = got.splitlines(True)
-            # Use difflib to find their differences.
-            if optionflags & REPORT_UDIFF:
-                diff = difflib.unified_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'unified diff with -expected +actual'
-            elif optionflags & REPORT_CDIFF:
-                diff = difflib.context_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'context diff with expected followed by actual'
-            elif optionflags & REPORT_NDIFF:
-                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
-                diff = list(engine.compare(want_lines, got_lines))
-                kind = 'ndiff with -expected +actual'
-            else:
-                assert 0, 'Bad diff option'
-            # Remove trailing whitespace on diff output.
-            diff = [line.rstrip() + '\n' for line in diff]
-            return 'Differences (%s):\n' % kind + _indent(''.join(diff))
-
-        # If we're not using diff, then simply list the expected
-        # output followed by the actual output.
-        if want and got:
-            return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
-        elif want:
-            return 'Expected:\n%sGot nothing\n' % _indent(want)
-        elif got:
-            return 'Expected nothing\nGot:\n%s' % _indent(got)
-        else:
-            return 'Expected nothing\nGot nothing\n'
-
-class DocTestFailure(Exception):
-    """A DocTest example has failed in debugging mode.
-
-    The exception instance has variables:
-
-    - test: the DocTest object being run
-
-    - excample: the Example object that failed
-
-    - got: the actual output
-    """
-    def __init__(self, test, example, got):
-        self.test = test
-        self.example = example
-        self.got = got
-
-    def __str__(self):
-        return str(self.test)
-
-class UnexpectedException(Exception):
-    """A DocTest example has encountered an unexpected exception
-
-    The exception instance has variables:
-
-    - test: the DocTest object being run
-
-    - excample: the Example object that failed
-
-    - exc_info: the exception info
-    """
-    def __init__(self, test, example, exc_info):
-        self.test = test
-        self.example = example
-        self.exc_info = exc_info
-
-    def __str__(self):
-        return str(self.test)
-
-class DebugRunner(DocTestRunner):
-    r"""Run doc tests but raise an exception as soon as there is a failure.
-
-       If an unexpected exception occurs, an UnexpectedException is raised.
-       It contains the test, the example, and the original exception:
-
-         >>> runner = DebugRunner(verbose=False)
-         >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
-         ...                                    {}, 'foo', 'foo.py', 0)
-         >>> try:
-         ...     runner.run(test)
-         ... except UnexpectedException, failure:
-         ...     pass
-
-         >>> failure.test is test
-         True
-
-         >>> failure.example.want
-         '42\n'
-
-         >>> exc_info = failure.exc_info
-         >>> raise exc_info[0], exc_info[1], exc_info[2]
-         Traceback (most recent call last):
-         ...
-         KeyError
-
-       We wrap the original exception to give the calling application
-       access to the test and example information.
-
-       If the output doesn't match, then a DocTestFailure is raised:
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 1
-         ...      >>> x
-         ...      2
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> try:
-         ...    runner.run(test)
-         ... except DocTestFailure, failure:
-         ...    pass
-
-       DocTestFailure objects provide access to the test:
-
-         >>> failure.test is test
-         True
-
-       As well as to the example:
-
-         >>> failure.example.want
-         '2\n'
-
-       and the actual output:
-
-         >>> failure.got
-         '1\n'
-
-       If a failure or error occurs, the globals are left intact:
-
-         >>> del test.globs['__builtins__']
-         >>> test.globs
-         {'x': 1}
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 2
-         ...      >>> raise KeyError
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> runner.run(test)
-         Traceback (most recent call last):
-         ...
-         UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
-
-         >>> del test.globs['__builtins__']
-         >>> test.globs
-         {'x': 2}
-
-       But the globals are cleared if there is no error:
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 2
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> runner.run(test)
-         (0, 1)
-
-         >>> test.globs
-         {}
-
-       """
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-        if clear_globs:
-            test.globs.clear()
-        return r
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        raise UnexpectedException(test, example, exc_info)
-
-    def report_failure(self, out, test, example, got):
-        raise DocTestFailure(test, example, got)
-
-######################################################################
-## 6. Test Functions
-######################################################################
-# These should be backwards compatible.
-
-# For backward compatibility, a global instance of a DocTestRunner
-# class, updated by testmod.
-master = None
-
-def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
-            report=True, optionflags=0, extraglobs=None,
-            raise_on_error=False, exclude_empty=False):
-    """m=None, name=None, globs=None, verbose=None, isprivate=None,
-       report=True, optionflags=0, extraglobs=None, raise_on_error=False,
-       exclude_empty=False
-
-    Test examples in docstrings in functions and classes reachable
-    from module m (or the current module if m is not supplied), starting
-    with m.__doc__.  Unless isprivate is specified, private names
-    are not skipped.
-
-    Also test examples reachable from dict m.__test__ if it exists and is
-    not None.  m.__test__ maps names to functions, classes and strings;
-    function and class docstrings are tested even if the name is private;
-    strings are tested directly, as if they were docstrings.
-
-    Return (#failures, #tests).
-
-    See doctest.__doc__ for an overview.
-
-    Optional keyword arg "name" gives the name of the module; by default
-    use m.__name__.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use m.__dict__.  A copy of this
-    dict is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.  This is new in 2.4.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  This is new in 2.3.  Possible values (see the
-    docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Deprecated in Python 2.4:
-    Optional keyword arg "isprivate" specifies a function used to
-    determine whether a name is private.  The default function is
-    treat all functions as public.  Optionally, "isprivate" can be
-    set to doctest.is_private to skip over functions marked as private
-    using the underscore naming convention; see its docs for details.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if isprivate is not None:
-        warnings.warn("the isprivate argument is deprecated; "
-                      "examine DocTestFinder.find() lists instead",
-                      DeprecationWarning)
-
-    # If no module was given, then use __main__.
-    if m is None:
-        # DWA - m will still be None if this wasn't invoked from the command
-        # line, in which case the following TypeError is about as good an error
-        # as we should expect
-        m = sys.modules.get('__main__')
-
-    # Check that we were actually given a module.
-    if not inspect.ismodule(m):
-        raise TypeError("testmod: module required; %r" % (m,))
-
-    # If no name was given, then use the module's name.
-    if name is None:
-        name = m.__name__
-
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
-        runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-def testfile(filename, module_relative=True, name=None, package=None,
-             globs=None, verbose=None, report=True, optionflags=0,
-             extraglobs=None, raise_on_error=False, parser=DocTestParser()):
-    """
-    Test examples in the given file.  Return (#failures, #tests).
-
-    Optional keyword arg "module_relative" specifies how filenames
-    should be interpreted:
-
-      - If "module_relative" is True (the default), then "filename"
-         specifies a module-relative path.  By default, this path is
-         relative to the calling module's directory; but if the
-         "package" argument is specified, then it is relative to that
-         package.  To ensure os-independence, "filename" should use
-         "/" characters to separate path segments, and should not
-         be an absolute path (i.e., it may not begin with "/").
-
-      - If "module_relative" is False, then "filename" specifies an
-        os-specific path.  The path may be absolute or relative (to
-        the current working directory).
-
-    Optional keyword arg "name" gives the name of the test; by default
-    use the file's basename.
-
-    Optional keyword argument "package" is a Python package or the
-    name of a Python package whose directory should be used as the
-    base directory for a module relative filename.  If no package is
-    specified, then the calling module's directory is used as the base
-    directory for module relative filenames.  It is an error to
-    specify "package" if "module_relative" is False.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use {}.  A copy of this dict
-    is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  Possible values (see the docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Optional keyword arg "parser" specifies a DocTestParser (or
-    subclass) that should be used to extract tests from the files.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path
-    if module_relative:
-        package = _normalize_module(package)
-        filename = _module_relative_path(package, filename)
-
-    # If no name was given, then use the file's name.
-    if name is None:
-        name = os.path.basename(filename)
-
-    # Assemble the globals.
-    if globs is None:
-        globs = {}
-    else:
-        globs = globs.copy()
-    if extraglobs is not None:
-        globs.update(extraglobs)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    # Read the file, convert it to a test, and run it.
-    s = open(filename).read()
-    test = parser.get_doctest(s, globs, name, filename, 0)
-    runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-def run_docstring_examples(f, globs, verbose=False, name="NoName",
-                           compileflags=None, optionflags=0):
-    """
-    Test examples in the given object's docstring (`f`), using `globs`
-    as globals.  Optional argument `name` is used in failure messages.
-    If the optional argument `verbose` is true, then generate output
-    even if there are no failures.
-
-    `compileflags` gives the set of flags that should be used by the
-    Python compiler when running the examples.  If not specified, then
-    it will default to the set of future-import flags that apply to
-    `globs`.
-
-    Optional keyword arg `optionflags` specifies options for the
-    testing and output.  See the documentation for `testmod` for more
-    information.
-    """
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(verbose=verbose, recurse=False)
-    runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-    for test in finder.find(f, name, globs=globs):
-        runner.run(test, compileflags=compileflags)
-
-######################################################################
-## 7. Tester
-######################################################################
-# This is provided only for backwards compatibility.  It's not
-# actually used in any way.
-
-class Tester:
-    def __init__(self, mod=None, globs=None, verbose=None,
-                 isprivate=None, optionflags=0):
-
-        warnings.warn("class Tester is deprecated; "
-                      "use class doctest.DocTestRunner instead",
-                      DeprecationWarning, stacklevel=2)
-        if mod is None and globs is None:
-            raise TypeError("Tester.__init__: must specify mod or globs")
-        if mod is not None and not inspect.ismodule(mod):
-            raise TypeError("Tester.__init__: mod must be a module; %r" %
-                            (mod,))
-        if globs is None:
-            globs = mod.__dict__
-        self.globs = globs
-
-        self.verbose = verbose
-        self.isprivate = isprivate
-        self.optionflags = optionflags
-        self.testfinder = DocTestFinder(_namefilter=isprivate)
-        self.testrunner = DocTestRunner(verbose=verbose,
-                                        optionflags=optionflags)
-
-    def runstring(self, s, name):
-        test = DocTestParser().get_doctest(s, self.globs, name, None, None)
-        if self.verbose:
-            print "Running string", name
-        (f,t) = self.testrunner.run(test)
-        if self.verbose:
-            print f, "of", t, "examples failed in string", name
-        return (f,t)
-
-    def rundoc(self, object, name=None, module=None):
-        f = t = 0
-        tests = self.testfinder.find(object, name, module=module,
-                                     globs=self.globs)
-        for test in tests:
-            (f2, t2) = self.testrunner.run(test)
-            (f,t) = (f+f2, t+t2)
-        return (f,t)
-
-    def rundict(self, d, name, module=None):
-        import new
-        m = new.module(name)
-        m.__dict__.update(d)
-        if module is None:
-            module = False
-        return self.rundoc(m, name, module)
-
-    def run__test__(self, d, name):
-        import new
-        m = new.module(name)
-        m.__test__ = d
-        return self.rundoc(m, name)
-
-    def summarize(self, verbose=None):
-        return self.testrunner.summarize(verbose)
-
-    def merge(self, other):
-        self.testrunner.merge(other.testrunner)
-
-######################################################################
-## 8. Unittest Support
-######################################################################
-
-_unittest_reportflags = 0
-
-def set_unittest_reportflags(flags):
-    """Sets the unittest option flags.
-
-    The old flag is returned so that a runner could restore the old
-    value if it wished to:
-
-      >>> old = _unittest_reportflags
-      >>> set_unittest_reportflags(REPORT_NDIFF |
-      ...                          REPORT_ONLY_FIRST_FAILURE) == old
-      True
-
-      >>> import doctest
-      >>> doctest._unittest_reportflags == (REPORT_NDIFF |
-      ...                                   REPORT_ONLY_FIRST_FAILURE)
-      True
-
-    Only reporting flags can be set:
-
-      >>> set_unittest_reportflags(ELLIPSIS)
-      Traceback (most recent call last):
-      ...
-      ValueError: ('Only reporting flags allowed', 8)
-
-      >>> set_unittest_reportflags(old) == (REPORT_NDIFF |
-      ...                                   REPORT_ONLY_FIRST_FAILURE)
-      True
-    """
-    global _unittest_reportflags
-
-    if (flags & REPORTING_FLAGS) != flags:
-        raise ValueError("Only reporting flags allowed", flags)
-    old = _unittest_reportflags
-    _unittest_reportflags = flags
-    return old
-
-_para_re = re.compile('\s*\n\s*\n\s*')
-def _unittest_count(docstring):
-    words = 0
-    count = 0
-    for p in _para_re.split(docstring):
-        p = p.strip()
-        if not p:
-            continue
-        if p.startswith('>>> '):
-            if words:
-                count += 1
-                words = 0
-        else:
-            words = 1
-            
-    return count or 1
-            
-    
-class DocTestCase(unittest.TestCase):
-
-    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
-                 checker=None):
-
-        unittest.TestCase.__init__(self)
-        self._dt_optionflags = optionflags
-        self._dt_checker = checker
-        self._dt_test = test
-        self._dt_setUp = setUp
-        self._dt_tearDown = tearDown
-
-        self._dt_count = _unittest_count(test.docstring)
-
-    def countTestCases(self):
-        return self._dt_count
-
-    def setUp(self):
-        test = self._dt_test
-
-        if self._dt_setUp is not None:
-            self._dt_setUp(test)
-
-    def tearDown(self):
-        test = self._dt_test
-
-        if self._dt_tearDown is not None:
-            self._dt_tearDown(test)
-
-        test.globs.clear()
-
-    def runTest(self):
-        test = self._dt_test
-        old = sys.stdout
-        new = StringIO()
-        optionflags = self._dt_optionflags
-
-        if not (optionflags & REPORTING_FLAGS):
-            # The option flags don't include any reporting flags,
-            # so add the default reporting flags
-            optionflags |= _unittest_reportflags
-
-        runner = DocTestRunner(optionflags=optionflags,
-                               checker=self._dt_checker, verbose=False)
-
-        try:
-            runner.DIVIDER = "-"*70
-            failures, tries = runner.run(
-                test, out=new.write, clear_globs=False)
-        finally:
-            sys.stdout = old
-
-        if failures:
-            raise self.failureException(self.format_failure(new.getvalue()))
-
-    def format_failure(self, err):
-        test = self._dt_test
-        if test.lineno is None:
-            lineno = 'unknown line number'
-        else:
-            lineno = '%s' % test.lineno
-        lname = '.'.join(test.name.split('.')[-1:])
-        return ('Failed doctest test for %s\n'
-                '  File "%s", line %s, in %s\n\n%s'
-                % (test.name, test.filename, lineno, lname, err)
-                )
-
-    def debug(self):
-        r"""Run the test case without results and without catching exceptions
-
-           The unit test framework includes a debug method on test cases
-           and test suites to support post-mortem debugging.  The test code
-           is run in such a way that errors are not caught.  This way a
-           caller can catch the errors and initiate post-mortem debugging.
-
-           The DocTestCase provides a debug method that raises
-           UnexpectedException errors if there is an unexepcted
-           exception:
-
-             >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
-             ...                {}, 'foo', 'foo.py', 0)
-             >>> case = DocTestCase(test)
-             >>> try:
-             ...     case.debug()
-             ... except UnexpectedException, failure:
-             ...     pass
-
-           The UnexpectedException contains the test, the example, and
-           the original exception:
-
-             >>> failure.test is test
-             True
-
-             >>> failure.example.want
-             '42\n'
-
-             >>> exc_info = failure.exc_info
-             >>> raise exc_info[0], exc_info[1], exc_info[2]
-             Traceback (most recent call last):
-             ...
-             KeyError
-
-           If the output doesn't match, then a DocTestFailure is raised:
-
-             >>> test = DocTestParser().get_doctest('''
-             ...      >>> x = 1
-             ...      >>> x
-             ...      2
-             ...      ''', {}, 'foo', 'foo.py', 0)
-             >>> case = DocTestCase(test)
-
-             >>> try:
-             ...    case.debug()
-             ... except DocTestFailure, failure:
-             ...    pass
-
-           DocTestFailure objects provide access to the test:
-
-             >>> failure.test is test
-             True
-
-           As well as to the example:
-
-             >>> failure.example.want
-             '2\n'
-
-           and the actual output:
-
-             >>> failure.got
-             '1\n'
-
-           """
-
-        self.setUp()
-        runner = DebugRunner(optionflags=self._dt_optionflags,
-                             checker=self._dt_checker, verbose=False)
-        runner.run(self._dt_test)
-        self.tearDown()
-
-    def id(self):
-        return self._dt_test.name
-
-    def __repr__(self):
-        name = self._dt_test.name.split('.')
-        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
-
-    __str__ = __repr__
-
-    def shortDescription(self):
-        return "Doctest: " + self._dt_test.name
-
-def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
-                 **options):
-    """
-    Convert doctest tests for a module to a unittest test suite.
-
-    This converts each documentation string in a module that
-    contains doctest tests to a unittest test case.  If any of the
-    tests in a doc string fail, then the test case fails.  An exception
-    is raised showing the name of the file containing the test and a
-    (sometimes approximate) line number.
-
-    The `module` argument provides the module to be tested.  The argument
-    can be either a module or a module name.
-
-    If no argument is given, the calling module is used.
-
-    A number of options may be provided as keyword arguments:
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-       A set of doctest option flags expressed as an integer.
-    """
-
-    if test_finder is None:
-        test_finder = DocTestFinder()
-
-    module = _normalize_module(module)
-    tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
-    if globs is None:
-        globs = module.__dict__
-    if not tests:
-        # Why do we want to do this? Because it reveals a bug that might
-        # otherwise be hidden.
-        raise ValueError(module, "has no tests")
-
-    tests.sort()
-    suite = unittest.TestSuite()
-    for test in tests:
-        if len(test.examples) == 0:
-            continue
-        if not test.filename:
-            filename = module.__file__
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-            test.filename = filename
-        suite.addTest(DocTestCase(test, **options))
-
-    return suite
-
-class DocFileCase(DocTestCase):
-
-    def id(self):
-        return '_'.join(self._dt_test.name.split('.'))
-
-    def __repr__(self):
-        return self._dt_test.filename
-    __str__ = __repr__
-
-    def format_failure(self, err):
-        return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
-                % (self._dt_test.name, self._dt_test.filename, err)
-                )
-
-def DocFileTest(path, module_relative=True, package=None,
-                globs=None, parser=DocTestParser(), **options):
-    if globs is None:
-        globs = {}
-    else:
-        globs = globs.copy()
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path.
-    if module_relative:
-        package = _normalize_module(package)
-        path = _module_relative_path(package, path)
-    if "__file__" not in globs:
-        globs["__file__"] = path
-
-    # Find the file and read it.
-    name = os.path.basename(path)
-    doc = open(path).read()
-
-    # Convert it to a test, and wrap it in a DocFileCase.
-    test = parser.get_doctest(doc, globs, name, path, 0)
-    return DocFileCase(test, **options)
-
-def DocFileSuite(*paths, **kw):
-    """A unittest suite for one or more doctest files.
-
-    The path to each doctest file is given as a string; the
-    interpretation of that string depends on the keyword argument
-    "module_relative".
-
-    A number of options may be provided as keyword arguments:
-
-    module_relative
-      If "module_relative" is True, then the given file paths are
-      interpreted as os-independent module-relative paths.  By
-      default, these paths are relative to the calling module's
-      directory; but if the "package" argument is specified, then
-      they are relative to that package.  To ensure os-independence,
-      "filename" should use "/" characters to separate path
-      segments, and may not be an absolute path (i.e., it may not
-      begin with "/").
-
-      If "module_relative" is False, then the given file paths are
-      interpreted as os-specific paths.  These paths may be absolute
-      or relative (to the current working directory).
-
-    package
-      A Python package or the name of a Python package whose directory
-      should be used as the base directory for module relative paths.
-      If "package" is not specified, then the calling module's
-      directory is used as the base directory for module relative
-      filenames.  It is an error to specify "package" if
-      "module_relative" is False.
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-      A set of doctest option flags expressed as an integer.
-
-    parser
-      A DocTestParser (or subclass) that should be used to extract
-      tests from the files.
-    """
-    suite = unittest.TestSuite()
-
-    # We do this here so that _normalize_module is called at the right
-    # level.  If it were called in DocFileTest, then this function
-    # would be the caller and we might guess the package incorrectly.
-    if kw.get('module_relative', True):
-        kw['package'] = _normalize_module(kw.get('package'))
-
-    for path in paths:
-        suite.addTest(DocFileTest(path, **kw))
-
-    return suite
-
-######################################################################
-## 9. Debugging Support
-######################################################################
-
-def script_from_examples(s):
-    r"""Extract script from text with examples.
-
-       Converts text with examples to a Python script.  Example input is
-       converted to regular code.  Example output and all other words
-       are converted to comments:
-
-       >>> text = '''
-       ...       Here are examples of simple math.
-       ...
-       ...           Python has super accurate integer addition
-       ...
-       ...           >>> 2 + 2
-       ...           5
-       ...
-       ...           And very friendly error messages:
-       ...
-       ...           >>> 1/0
-       ...           To Infinity
-       ...           And
-       ...           Beyond
-       ...
-       ...           You can use logic if you want:
-       ...
-       ...           >>> if 0:
-       ...           ...    blah
-       ...           ...    blah
-       ...           ...
-       ...
-       ...           Ho hum
-       ...           '''
-
-       >>> print script_from_examples(text)
-       # Here are examples of simple math.
-       #
-       #     Python has super accurate integer addition
-       #
-       2 + 2
-       # Expected:
-       ## 5
-       #
-       #     And very friendly error messages:
-       #
-       1/0
-       # Expected:
-       ## To Infinity
-       ## And
-       ## Beyond
-       #
-       #     You can use logic if you want:
-       #
-       if 0:
-          blah
-          blah
-       #
-       #     Ho hum
-       """
-    output = []
-    for piece in DocTestParser().parse(s):
-        if isinstance(piece, Example):
-            # Add the example's source code (strip trailing NL)
-            output.append(piece.source[:-1])
-            # Add the expected output:
-            want = piece.want
-            if want:
-                output.append('# Expected:')
-                output += ['## '+l for l in want.split('\n')[:-1]]
-        else:
-            # Add non-example text.
-            output += [_comment_line(l)
-                       for l in piece.split('\n')[:-1]]
-
-    # Trim junk on both ends.
-    while output and output[-1] == '#':
-        output.pop()
-    while output and output[0] == '#':
-        output.pop(0)
-    # Combine the output, and return it.
-    return '\n'.join(output)
-
-def testsource(module, name):
-    """Extract the test sources from a doctest docstring as a script.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the doc string with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    tests = DocTestFinder().find(module)
-    test = [t for t in tests if t.name == name]
-    if not test:
-        raise ValueError(name, "not found in tests")
-    test = test[0]
-    testsrc = script_from_examples(test.docstring)
-    return testsrc
-
-def debug_src(src, pm=False, globs=None):
-    """Debug a single doctest docstring, in argument `src`'"""
-    testsrc = script_from_examples(src)
-    debug_script(testsrc, pm, globs)
-
-def debug_script(src, pm=False, globs=None):
-    "Debug a test script.  `src` is the script, as a string."
-    import pdb
-
-    # Note that tempfile.NameTemporaryFile() cannot be used.  As the
-    # docs say, a file so created cannot be opened by name a second time
-    # on modern Windows boxes, and execfile() needs to open it.
-    srcfilename = tempfile.mktemp(".py", "doctestdebug")
-    f = open(srcfilename, 'w')
-    f.write(src)
-    f.close()
-
-    try:
-        if globs:
-            globs = globs.copy()
-        else:
-            globs = {}
-
-        if pm:
-            try:
-                execfile(srcfilename, globs, globs)
-            except:
-                print sys.exc_info()[1]
-                pdb.post_mortem(sys.exc_info()[2])
-        else:
-            # Note that %r is vital here.  '%s' instead can, e.g., cause
-            # backslashes to get treated as metacharacters on Windows.
-            pdb.run("execfile(%r)" % srcfilename, globs, globs)
-
-    finally:
-        os.remove(srcfilename)
-
-def debug(module, name, pm=False):
-    """Debug a single doctest docstring.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the docstring with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    testsrc = testsource(module, name)
-    debug_script(testsrc, pm, module.__dict__)
-
-######################################################################
-## 10. Example Usage
-######################################################################
-class _TestClass:
-    """
-    A pointless class, for sanity-checking of docstring testing.
-
-    Methods:
-        square()
-        get()
-
-    >>> _TestClass(13).get() + _TestClass(-12).get()
-    1
-    >>> hex(_TestClass(13).square().get())
-    '0xa9'
-    """
-
-    def __init__(self, val):
-        """val -> _TestClass object with associated value val.
-
-        >>> t = _TestClass(123)
-        >>> print t.get()
-        123
-        """
-
-        self.val = val
-
-    def square(self):
-        """square() -> square TestClass's associated value
-
-        >>> _TestClass(13).square().get()
-        169
-        """
-
-        self.val = self.val ** 2
-        return self
-
-    def get(self):
-        """get() -> return TestClass's associated value.
-
-        >>> x = _TestClass(-42)
-        >>> print x.get()
-        -42
-        """
-
-        return self.val
-
-__test__ = {"_TestClass": _TestClass,
-            "string": r"""
-                      Example of a string object, searched as-is.
-                      >>> x = 1; y = 2
-                      >>> x + y, x * y
-                      (3, 2)
-                      """,
-
-            "bool-int equivalence": r"""
-                                    In 2.2, boolean expressions displayed
-                                    0 or 1.  By default, we still accept
-                                    them.  This can be disabled by passing
-                                    DONT_ACCEPT_TRUE_FOR_1 to the new
-                                    optionflags argument.
-                                    >>> 4 == 4
-                                    1
-                                    >>> 4 == 4
-                                    True
-                                    >>> 4 > 4
-                                    0
-                                    >>> 4 > 4
-                                    False
-                                    """,
-
-            "blank lines": r"""
-                Blank lines can be marked with <BLANKLINE>:
-                    >>> print 'foo\n\nbar\n'
-                    foo
-                    <BLANKLINE>
-                    bar
-                    <BLANKLINE>
-            """,
-
-            "ellipsis": r"""
-                If the ellipsis flag is used, then '...' can be used to
-                elide substrings in the desired output:
-                    >>> print range(1000) #doctest: +ELLIPSIS
-                    [0, 1, 2, ..., 999]
-            """,
-
-            "whitespace normalization": r"""
-                If the whitespace normalization flag is used, then
-                differences in whitespace are ignored.
-                    >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
-                    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-                     15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
-                     27, 28, 29]
-            """,
-           }
-
-def _test():
-    r = unittest.TextTestRunner()
-    r.run(DocTestSuite())
-
-if __name__ == "__main__":
-    _test()
diff --git a/branches/bug1734/src/zope/testing/doctestunit.py b/branches/bug1734/src/zope/testing/doctestunit.py
deleted file mode 100644
index e6285b67..00000000
--- a/branches/bug1734/src/zope/testing/doctestunit.py
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Extension to use doctest tests as unit tests
-
-This module provides a DocTestSuite contructor for converting doctest
-tests to unit tests.
-
-$Id$
-"""
-
-from doctest import DocFileSuite, DocTestSuite
-from doctest import debug_src, debug
-
-def pprint():
-    from pprint import PrettyPrinter
-    def pprint(ob, **opts):
-        if 'width' not in opts:
-            opts['width'] = 1
-        return PrettyPrinter(**opts).pprint(ob)
-    return pprint
-
-pprint = pprint()
diff --git a/branches/bug1734/src/zope/testing/formparser.py b/branches/bug1734/src/zope/testing/formparser.py
deleted file mode 100644
index 9afb25fe..00000000
--- a/branches/bug1734/src/zope/testing/formparser.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""HTML parser that extracts form information.
-
-This is intended to support functional tests that need to extract
-information from HTML forms returned by the publisher.
-
-See *formparser.txt* for documentation.
-
-"""
-__docformat__ = "reStructuredText"
-
-import HTMLParser
-import urlparse
-
-
-def parse(data, base=None):
-    """Return a form collection parsed from `data`.
-
-    `base` should be the URL from which `data` was retrieved.
-
-    """
-    parser = FormParser(data, base)
-    return parser.parse()
-
-
-class FormParser(object):
-
-    def __init__(self, data, base=None):
-        self.data = data
-        self.base = base
-        self._parser = HTMLParser.HTMLParser()
-        self._parser.handle_data = self._handle_data
-        self._parser.handle_endtag = self._handle_endtag
-        self._parser.handle_starttag = self._handle_starttag
-        self._parser.handle_startendtag = self._handle_starttag
-        self._buffer = []
-        self.current = None
-        self.forms = FormCollection()
-
-    def parse(self):
-        """Parse the document, returning the collection of forms."""
-        self._parser.feed(self.data)
-        self._parser.close()
-        return self.forms
-
-    # HTMLParser handlers
-
-    def _handle_data(self, data):
-        self._buffer.append(data)
-
-    def _handle_endtag(self, tag):
-        if tag == "textarea":
-            self.textarea.value = "".join(self._buffer)
-            self.textarea = None
-        elif tag == "select":
-            self.select = None
-        elif tag == "option":
-            option = self.select.options[-1]
-            label = "".join(self._buffer)
-            if not option.label:
-                option.label = label
-            if not option.value:
-                option.value = label
-            if option.selected:
-                if self.select.multiple:
-                    self.select.value.append(option.value)
-                else:
-                    self.select.value = option.value
-
-    def _handle_starttag(self, tag, attrs):
-        del self._buffer[:]
-        d = {}
-        for name, value in attrs:
-            d[name] = value
-        name = d.get("name")
-        id = d.get("id") or d.get("xml:id")
-        if tag == "form":
-            method = kwattr(d, "method", "get")
-            action = d.get("action", "").strip() or None
-            if self.base and action:
-                action = urlparse.urljoin(self.base, action)
-            enctype = kwattr(d, "enctype", "application/x-www-form-urlencoded")
-            self.current = Form(name, id, method, action, enctype)
-            self.forms.append(self.current)
-        elif tag == "input":
-            type = kwattr(d, "type", "text")
-            checked = "checked" in d
-            disabled = "disabled" in d
-            readonly = "readonly" in d
-            src = d.get("src", "").strip() or None
-            if self.base and src:
-                src = urlparse.urljoin(self.base, src)
-            value = d.get("value")
-            size = intattr(d, "size")
-            maxlength = intattr(d, "maxlength")
-            self.current[name] = Input(name, id, type, value,
-                                       checked, disabled, readonly,
-                                       src, size, maxlength)
-        elif tag == "button":
-            pass
-        elif tag == "textarea":
-            disabled = "disabled" in d
-            readonly = "readonly" in d
-            self.textarea = Input(name, id, "textarea", None,
-                                  None, disabled, readonly,
-                                  None, None, None)
-            self.textarea.rows = intattr(d, "rows")
-            self.textarea.cols = intattr(d, "cols")
-            self.current[name] = self.textarea
-            # The value will be set when the </textarea> is seen.
-        elif tag == "base":
-            href = d.get("href", "").strip()
-            if href and self.base:
-                href = urlparse.urljoin(self.base, href)
-            self.base = href
-        elif tag == "select":
-            disabled = "disabled" in d
-            multiple = "multiple" in d
-            size = intattr(d, "size")
-            self.select = Select(name, id, disabled, multiple, size)
-            self.current[name] = self.select
-        elif tag == "option":
-            disabled = "disabled" in d
-            selected = "selected" in d
-            value = d.get("value")
-            label = d.get("label")
-            option = Option(id, value, selected, label, disabled)
-            self.select.options.append(option)
-
-
-def kwattr(d, name, default=None):
-    """Return attribute, converted to lowercase."""
-    v = d.get(name, default)
-    if v != default and v is not None:
-        v = v.strip().lower()
-        v = v or default
-    return v
-
-
-def intattr(d, name):
-    """Return attribute as an integer, or None."""
-    if name in d:
-        v = d[name].strip()
-        return int(v)
-    else:
-        return None
-
-
-class FormCollection(list):
-    """Collection of all forms from a page."""
-
-    def __getattr__(self, name):
-        for form in self:
-            if form.name == name:
-                return form
-        raise AttributeError, name
-
-
-class Form(dict):
-    """A specific form within a page."""
-
-    def __init__(self, name, id, method, action, enctype):
-        super(Form, self).__init__()
-        self.name = name
-        self.id = id
-        self.method = method
-        self.action = action
-        self.enctype = enctype
-
-
-class Input(object):
-    """Input element."""
-
-    rows = None
-    cols = None
-
-    def __init__(self, name, id, type, value, checked, disabled, readonly,
-                 src, size, maxlength):
-        super(Input, self).__init__()
-        self.name = name
-        self.id = id
-        self.type = type
-        self.value = value
-        self.checked = checked
-        self.disabled = disabled
-        self.readonly = readonly
-        self.src = src
-        self.size = size
-        self.maxlength = maxlength
-
-
-class Select(Input):
-    """Select element."""
-
-    def __init__(self, name, id, disabled, multiple, size):
-        super(Select, self).__init__(name, id, "select", None, None,
-                                     disabled, None, None, size, None)
-        self.options = []
-        self.multiple = multiple
-        if multiple:
-            self.value = []
-
-
-class Option(object):
-    """Individual value representation for a select element."""
-
-    def __init__(self, id, value, selected, label, disabled):
-        super(Option, self).__init__()
-        self.id = id
-        self.value = value
-        self.selected = selected
-        self.label = label
-        self.disabled = disabled
diff --git a/branches/bug1734/src/zope/testing/formparser.txt b/branches/bug1734/src/zope/testing/formparser.txt
deleted file mode 100644
index 907e1ec1..00000000
--- a/branches/bug1734/src/zope/testing/formparser.txt
+++ /dev/null
@@ -1,130 +0,0 @@
-==================
-Parsing HTML Forms
-==================
-
-Sometimes in functional tests, information from a generated form must
-be extracted in order to re-submit it as part of a subsequent request.
-The `zope.testing.formparser` module can be used for this purpose.
-
-The scanner is implemented using the `FormParser` class.  The
-constructor arguments are the page data containing the form and
-(optionally) the URL from which the page was retrieved:
-
-  >>> import zope.testing.formparser
-
-  >>> page_text = '''\
-  ... <html><body>
-  ...   <form name="form1" action="/cgi-bin/foobar.py" method="POST">
-  ...     <input type="hidden" name="f1" value="today" />
-  ...     <input type="submit" name="do-it-now" value="Go for it!" />
-  ...     <input type="IMAGE" name="not-really" value="Don't."
-  ...            src="dont.png" />
-  ...     <select name="pick-two" size="3" multiple>
-  ...       <option value="one" selected>First</option>
-  ...       <option value="two" label="Second">Another</option>
-  ...       <optgroup>
-  ...         <option value="three">Third</option>
-  ...         <option selected="selected">Fourth</option>
-  ...       </optgroup>
-  ...     </select>
-  ...   </form>
-  ...
-  ...   Just for fun, a second form, after specifying a base:
-  ...   <base href="http://www.example.com/base/" />
-  ...   <form action = 'sproing/sprung.html' enctype="multipart/form">
-  ...     <textarea name="sometext" rows="5">Some text.</textarea>
-  ...     <input type="Image" name="action" value="Do something."
-  ...            src="else.png" />
-  ...   </form>
-  ... </body></html>
-  ... '''
-
-  >>> parser = zope.testing.formparser.FormParser(page_text)
-  >>> forms = parser.parse()
-
-  >>> len(forms)
-  2
-  >>> forms.form1 is forms[0]
-  True
-  >>> forms.form1 is forms[1]
-  False
-
-More often, the `parse()` convenience function is all that's needed:
-
-  >>> forms = zope.testing.formparser.parse(
-  ...     page_text, "http://cgi.example.com/somewhere/form.html")
-
-  >>> len(forms)
-  2
-  >>> forms.form1 is forms[0]
-  True
-  >>> forms.form1 is forms[1]
-  False
-
-Once we have the form we're interested in, we can check form
-attributes and individual field values:
-
-  >>> form = forms.form1
-  >>> form.enctype
-  'application/x-www-form-urlencoded'
-  >>> form.method
-  'post'
-
-  >>> keys = form.keys()
-  >>> keys.sort()
-  >>> keys
-  ['do-it-now', 'f1', 'not-really', 'pick-two']
-
-  >>> not_really = form["not-really"]
-  >>> not_really.type
-  'image'
-  >>> not_really.value
-  "Don't."
-  >>> not_really.readonly
-  False
-  >>> not_really.disabled
-  False
-
-Note that relative URLs are converted to absolute URLs based on the
-``<base>`` element (if present) or using the base passed in to the
-constructor.
-
-  >>> form.action
-  'http://cgi.example.com/cgi-bin/foobar.py'
-  >>> not_really.src
-  'http://cgi.example.com/somewhere/dont.png'
-
-  >>> forms[1].action
-  'http://www.example.com/base/sproing/sprung.html'
-  >>> forms[1]["action"].src
-  'http://www.example.com/base/else.png'
-
-The ``<textarea>`` element provides some additional attributes:
-
-  >>> ta = forms[1]["sometext"]
-  >>> print ta.rows
-  5
-  >>> print ta.cols
-  None
-  >>> ta.value
-  'Some text.'
-
-The ``<select>`` element provides access to the options as well:
-
-  >>> select = form["pick-two"]
-  >>> select.multiple
-  True
-  >>> select.size
-  3
-  >>> select.type
-  'select'
-  >>> select.value
-  ['one', 'Fourth']
-
-  >>> options = select.options
-  >>> len(options)
-  4
-  >>> [opt.label for opt in options]
-  ['First', 'Second', 'Third', 'Fourth']
-  >>> [opt.value for opt in options]
-  ['one', 'two', 'three', 'Fourth']
diff --git a/branches/bug1734/src/zope/testing/loggingsupport.py b/branches/bug1734/src/zope/testing/loggingsupport.py
deleted file mode 100644
index f13b6219..00000000
--- a/branches/bug1734/src/zope/testing/loggingsupport.py
+++ /dev/null
@@ -1,122 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Support for testing logging code
-
-If you want to test that your code generates proper log output, you
-can create and install a handler that collects output:
-
-  >>> handler = InstalledHandler('foo.bar')
-
-The handler is installed into loggers for all of the names passed. In
-addition, the logger level is set to 1, which means, log
-everything. If you want to log less than everything, you can provide a
-level keyword argument.  The level setting effects only the named
-loggers.
-
-Then, any log output is collected in the handler:
-
-  >>> logging.getLogger('foo.bar').exception('eek')
-  >>> logging.getLogger('foo.bar').info('blah blah')
-
-  >>> for record in handler.records:
-  ...     print record.name, record.levelname
-  ...     print ' ', record.getMessage()
-  foo.bar ERROR
-    eek
-  foo.bar INFO
-    blah blah
-
-A similar effect can be gotten by just printing the handler:
-
-  >>> print handler
-  foo.bar ERROR
-    eek
-  foo.bar INFO
-    blah blah
-
-After checking the log output, you need to uninstall the handler:
-
-  >>> handler.uninstall()
-
-At which point, the handler won't get any more log output.
-Let's clear the handler:
-
-  >>> handler.clear()
-  >>> handler.records
-  []
-
-And then log something:
-  
-  >>> logging.getLogger('foo.bar').info('blah')
-
-and, sure enough, we still have no output:
-  
-  >>> handler.records
-  []
-  
-$Id$
-"""
-
-import logging
-
-class Handler(logging.Handler):
-
-    def __init__(self, *names, **kw):
-        logging.Handler.__init__(self)
-        self.names = names
-        self.records = []
-        self.setLoggerLevel(**kw)
-
-    def setLoggerLevel(self, level=1):
-        self.level = level
-        self.oldlevels = {}
-
-    def emit(self, record):
-        self.records.append(record)
-
-    def clear(self):
-        del self.records[:]
-
-    def install(self):
-        for name in self.names:
-            logger = logging.getLogger(name)
-            self.oldlevels[name] = logger.level
-            logger.setLevel(self.level)
-            logger.addHandler(self)
-
-    def uninstall(self):
-        for name in self.names:
-            logger = logging.getLogger(name)
-            logger.setLevel(self.oldlevels[name])
-            logger.removeHandler(self)
-
-    def __str__(self):
-        return '\n'.join(
-            [("%s %s\n  %s" %
-              (record.name, record.levelname,
-               '\n'.join([line
-                          for line in record.getMessage().split('\n')
-                          if line.strip()])
-               )
-              )
-              for record in self.records]
-              )
-        
-
-class InstalledHandler(Handler):
-
-    def __init__(self, *names):
-        Handler.__init__(self, *names)
-        self.install()
-    
diff --git a/branches/bug1734/src/zope/testing/loghandler.py b/branches/bug1734/src/zope/testing/loghandler.py
deleted file mode 100644
index 81d5a33d..00000000
--- a/branches/bug1734/src/zope/testing/loghandler.py
+++ /dev/null
@@ -1,77 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2003 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""logging handler for tests that check logging output.
-
-$Id$
-"""
-import logging
-
-class Handler(logging.Handler):
-    """Handler for use with unittest.TestCase objects.
-
-    The handler takes a TestCase instance as a constructor argument.
-    It can be registered with one or more loggers and collects log
-    records they generate.
-
-    The assertLogsMessage() and failIfLogsMessage() methods can be
-    used to check the logger output and causes the test to fail as
-    appropriate.
-    """
-
-    def __init__(self, testcase, propagate=False):
-        logging.Handler.__init__(self)
-        self.records = []
-        # loggers stores (logger, propagate) tuples
-        self.loggers = []
-        self.closed = False
-        self.propagate = propagate
-        self.testcase = testcase
-
-    def close(self):
-        """Remove handler from any loggers it was added to."""
-        if self.closed:
-            return
-        for logger, propagate in self.loggers:
-            logger.removeHandler(self)
-            logger.propagate = propagate
-        self.closed = True
-
-    def add(self, name):
-        """Add handler to logger named name."""
-        logger = logging.getLogger(name)
-        old_prop = logger.propagate
-        logger.addHandler(self)
-        if self.propagate:
-            logger.propagate = 1
-        else:
-            logger.propagate = 0
-        self.loggers.append((logger, old_prop))
-
-    def emit(self, record):
-        self.records.append(record)
-
-    def assertLogsMessage(self, msg, level=None):
-        for r in self.records:
-            if r.getMessage() == msg:
-                if level is not None and r.levelno == level:
-                    return
-        msg = "No log message contained %r" % msg
-        if level is not None:
-            msg += " at level %d" % level
-        self.testcase.fail(msg)
-
-    def failIfLogsMessage(self, msg):
-        for r in self.records:
-            if r.getMessage() == msg:
-                self.testcase.fail("Found log message %r" % msg)
diff --git a/branches/bug1734/src/zope/testing/module.py b/branches/bug1734/src/zope/testing/module.py
deleted file mode 100644
index e6274238..00000000
--- a/branches/bug1734/src/zope/testing/module.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Fake module support
-
-$Id$
-"""
-
-import sys
-
-class FakeModule:
-    def __init__(self, dict):
-        self.__dict = dict
-    def __getattr__(self, name):
-        try:
-            return self.__dict[name]
-        except KeyError:
-            raise AttributeError, name
-
-def setUp(test, name='README.txt'):
-    dict = test.globs
-    dict['__name__'] = name    
-    sys.modules[name] = FakeModule(dict)
-
-def tearDown(test, name='README.txt'):
-    del sys.modules[name]
diff --git a/branches/bug1734/src/zope/testing/tests.py b/branches/bug1734/src/zope/testing/tests.py
deleted file mode 100644
index 5fbbff96..00000000
--- a/branches/bug1734/src/zope/testing/tests.py
+++ /dev/null
@@ -1,30 +0,0 @@
-##############################################################################
-#
-# Copyright (c) 2004 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""Tests for the testing framework.
-
-$Id$
-"""
-import unittest
-from zope.testing.doctestunit import DocTestSuite, DocFileSuite
-
-
-def test_suite():
-    return unittest.TestSuite((
-        DocFileSuite('formparser.txt'),
-        DocTestSuite('zope.testing.loggingsupport'),
-        ))
-
-if __name__ == '__main__':
-    unittest.main(defaultTest='test_suite')
-
diff --git a/branches/bug1734/test.py b/branches/bug1734/test.py
deleted file mode 100644
index 71adea69..00000000
--- a/branches/bug1734/test.py
+++ /dev/null
@@ -1,875 +0,0 @@
-#! /usr/bin/env python2.2
-##############################################################################
-#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL).  A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-"""
-test.py [-aBbcdDfgGhLmprtTuv] [modfilter [testfilter]]
-
-Test harness.
-
--a level
---all
-    Run the tests at the given level.  Any test at a level at or below this is
-    run, any test at a level above this is not run.  Level 0 runs all tests.
-    The default is to run tests at level 1.  --all is a shortcut for -a 0.
-
--b
-    Run "python setup.py build" before running tests, where "python"
-    is the version of python used to run test.py.  Highly recommended.
-    Tests will be run from the build directory.  (Note: In Python < 2.3
-    the -q flag is added to the setup.py command line.)
-
--B
-    Run "python setup.py build_ext -i" before running tests.  Tests will be
-    run from the source directory.
-
--c  use pychecker
-
--d
-    Instead of the normal test harness, run a debug version which
-    doesn't catch any exceptions.  This is occasionally handy when the
-    unittest code catching the exception doesn't work right.
-    Unfortunately, the debug harness doesn't print the name of the
-    test, so Use With Care.
-
---dir directory
-    Option to limit where tests are searched for. This is
-    important when you *really* want to limit the code that gets run.
-    For example, if refactoring interfaces, you don't want to see the way
-    you have broken setups for tests in other packages. You *just* want to
-    run the interface tests.
-
--D
-    Works like -d, except that it loads pdb when an exception occurs.
-
--f
-    Run functional tests instead of unit tests.
-
--g threshold
-    Set the garbage collector generation0 threshold.  This can be used to
-    stress memory and gc correctness.  Some crashes are only reproducible when
-    the threshold is set to 1 (agressive garbage collection).  Do "-g 0" to
-    disable garbage collection altogether.
-
--G gc_option
-    Set the garbage collection debugging flags.  The argument must be one
-    of the DEBUG_ flags defined bythe Python gc module.  Multiple options
-    can be specified by using "-G OPTION1 -G OPTION2."
-
---libdir test_root
-    Search for tests starting in the specified start directory
-    (useful for testing components being developed outside the main
-    "src" or "build" trees).
-
---keepbytecode
-    Do not delete all stale bytecode before running tests
-
--L
-    Keep running the selected tests in a loop.  You may experience
-    memory leakage.
-
--n
-    Name temporary files after the test that is running.
-
--t
-    Time the individual tests and print a list of the top 50, sorted from
-    longest to shortest.
-
--p
-    Show running progress.  It can be combined with -v or -vv.
-
--r
-    Look for refcount problems.
-    This requires that Python was built --with-pydebug.
-
--T
-    Use the trace module from Python for code coverage.  This only works
-    if trace.py is explicitly added to PYTHONPATH.  The current utility writes
-    coverage files to a directory named `coverage' that is parallel to
-    `build'.  It also prints a summary to stdout.
-
--v
-    Verbose output.  With one -v, unittest prints a dot (".") for each test
-    run.  With -vv, unittest prints the name of each test (for some definition
-    of "name" ...).  With no -v, unittest is silent until the end of the run,
-    except when errors occur.
-
--u
--m
-    Use the PyUnit GUI instead of output to the command line.  The GUI imports
-    tests on its own, taking care to reload all dependencies on each run.  The
-    debug (-d), verbose (-v), and Loop (-L) options will be ignored.  The
-    testfilter filter is also not applied.
-
-    -m starts the gui minimized.  Double-clicking the progress bar will start
-    the import and run all tests.
-
-
-modfilter
-testfilter
-    Case-sensitive regexps to limit which tests are run, used in search
-    (not match) mode.
-    In an extension of Python regexp notation, a leading "!" is stripped
-    and causes the sense of the remaining regexp to be negated (so "!bc"
-    matches any string that does not match "bc", and vice versa).
-    By default these act like ".", i.e. nothing is excluded.
-
-    modfilter is applied to a test file's path, starting at "build" and
-    including (OS-dependent) path separators.
-
-    testfilter is applied to the (method) name of the unittest methods
-    contained in the test files whose paths modfilter matched.
-
-Extreme (yet useful) examples:
-
-    test.py -vvb . "^checkWriteClient$"
-
-    Builds the project silently, then runs unittest in verbose mode on all
-    tests whose names are precisely "checkWriteClient".  Useful when
-    debugging a specific test.
-
-    test.py -vvb . "!^checkWriteClient$"
-
-    As before, but runs all tests whose names aren't precisely
-    "checkWriteClient".  Useful to avoid a specific failing test you don't
-    want to deal with just yet.
-
-    test.py -m . "!^checkWriteClient$"
-
-    As before, but now opens up a minimized PyUnit GUI window (only showing
-    the progress bar).  Useful for refactoring runs where you continually want
-    to make sure all tests still pass.
-"""
-
-import gc
-import os
-import pdb
-import re
-import sys
-import tempfile
-import time
-import traceback
-import unittest
-
-from distutils.util import get_platform
-
-PLAT_SPEC = "%s-%s" % (get_platform(), sys.version[0:3])
-
-class ImmediateTestResult(unittest._TextTestResult):
-
-    __super_init = unittest._TextTestResult.__init__
-    __super_startTest = unittest._TextTestResult.startTest
-    __super_printErrors = unittest._TextTestResult.printErrors
-
-    def __init__(self, stream, descriptions, verbosity, debug=False,
-                 count=None, progress=False):
-        self.__super_init(stream, descriptions, verbosity)
-        self._debug = debug
-        self._progress = progress
-        self._progressWithNames = False
-        self._count = count
-        self._testtimes = {}
-        if progress and verbosity == 1:
-            self.dots = False
-            self._progressWithNames = True
-            self._lastWidth = 0
-            self._maxWidth = 80
-            try:
-                import curses
-            except ImportError:
-                pass
-            else:
-                import curses.wrapper
-                def get_max_width(scr, self=self):
-                    self._maxWidth = scr.getmaxyx()[1]
-                try:
-                    curses.wrapper(get_max_width)
-                except curses.error:
-                    pass
-            self._maxWidth -= len("xxxx/xxxx (xxx.x%): ") + 1
-
-    def stopTest(self, test):
-        self._testtimes[test] = time.time() - self._testtimes[test]
-        if PATCH_TEMPFILE:
-            tempfile.tempdir = self._old_dir
-            if not os.listdir(self._new_dir):
-                os.rmdir(self._new_dir)
-        if gc.garbage:
-            print "The following test left garbage:"
-            print test
-            print gc.garbage
-            # TODO:  Perhaps eat the garbage here, so that the garbage isn't
-            # |printed for every subsequent test.
-
-    def print_times(self, stream, count=None):
-        results = self._testtimes.items()
-        results.sort(lambda x, y: cmp(y[1], x[1]))
-        if count:
-            n = min(count, len(results))
-            if n:
-                print >>stream, "Top %d longest tests:" % n
-        else:
-            n = len(results)
-        if not n:
-            return
-        for i in range(n):
-            print >>stream, "%6dms" % int(results[i][1] * 1000), results[i][0]
-
-    def _print_traceback(self, msg, err, test, errlist):
-        if self.showAll or self.dots or self._progress:
-            self.stream.writeln("\n")
-            self._lastWidth = 0
-
-        tb = "".join(traceback.format_exception(*err))
-        self.stream.writeln(msg)
-        self.stream.writeln(tb)
-        errlist.append((test, tb))
-
-    def startTest(self, test):
-        if self._progress:
-            self.stream.write("\r%4d" % (self.testsRun + 1))
-            if self._count:
-                self.stream.write("/%d (%5.1f%%)" % (self._count,
-                                  (self.testsRun + 1) * 100.0 / self._count))
-            if self.showAll:
-                self.stream.write(": ")
-            elif self._progressWithNames:
-                # TODO:  will break with multibyte strings.
-                name = self.getShortDescription(test)
-                width = len(name)
-                if width < self._lastWidth:
-                    name += " " * (self._lastWidth - width)
-                self.stream.write(": %s" % name)
-                self._lastWidth = width
-            self.stream.flush()
-        if PATCH_TEMPFILE:
-            # It sure is dumb that unittest hides the test's name.
-            name = test._TestCase__testMethodName
-            self._old_dir = tempfile.gettempdir()
-            self._new_dir = os.path.join(self._old_dir, name)
-            if not os.path.exists(self._new_dir):
-                os.mkdir(self._new_dir)
-            tempfile.tempdir = self._new_dir
-
-        self.__super_startTest(test)
-        self._testtimes[test] = time.time()
-
-    def getShortDescription(self, test):
-        s = self.getDescription(test)
-        if len(s) > self._maxWidth:
-            pos = s.find(" (")
-            if pos >= 0:
-                w = self._maxWidth - (pos + 5)
-                if w < 1:
-                    # first portion (test method name) is too long
-                    s = s[:self._maxWidth-3] + "..."
-                else:
-                    pre = s[:pos+2]
-                    post = s[-w:]
-                    s = "%s...%s" % (pre, post)
-        return s[:self._maxWidth]
-
-    def addError(self, test, err):
-        if self._progress:
-            self.stream.write("\r")
-        if self._debug:
-            raise err[0], err[1], err[2]
-        self._print_traceback("Error in test %s" % test, err,
-                              test, self.errors)
-
-    def addFailure(self, test, err):
-        if self._progress:
-            self.stream.write("\r")
-        if self._debug:
-            raise err[0], err[1], err[2]
-        self._print_traceback("Failure in test %s" % test, err,
-                              test, self.failures)
-
-    def printErrors(self):
-        if self._progress and not (self.dots or self.showAll):
-            self.stream.writeln()
-        self.__super_printErrors()
-
-    def printErrorList(self, flavor, errors):
-        for test, err in errors:
-            self.stream.writeln(self.separator1)
-            self.stream.writeln("%s: %s" % (flavor, self.getDescription(test)))
-            self.stream.writeln(self.separator2)
-            self.stream.writeln(err)
-
-
-class ImmediateTestRunner(unittest.TextTestRunner):
-
-    __super_init = unittest.TextTestRunner.__init__
-
-    def __init__(self, **kwarg):
-        debug = kwarg.get("debug")
-        if debug is not None:
-            del kwarg["debug"]
-        progress = kwarg.get("progress")
-        if progress is not None:
-            del kwarg["progress"]
-        self.__super_init(**kwarg)
-        self._debug = debug
-        self._progress = progress
-
-    def _makeResult(self):
-        return ImmediateTestResult(self.stream, self.descriptions,
-                                   self.verbosity, debug=self._debug,
-                                   count=self._count, progress=self._progress)
-
-    def run(self, test):
-        self._count = test.countTestCases()
-        return unittest.TextTestRunner.run(self, test)
-
-# setup list of directories to put on the path
-class PathInit:
-    def __init__(self, build, build_inplace, libdir=None):
-        self.inplace = None
-        # Figure out if we should test in-place or test in-build.  If the -b
-        # or -B option was given, test in the place we were told to build in.
-        # Otherwise, we'll look for a build directory and if we find one,
-        # we'll test there, otherwise we'll test in-place.
-        if build:
-            self.inplace = build_inplace
-        if self.inplace is None:
-            # Need to figure it out
-            if os.path.isdir(os.path.join("build", "lib.%s" % PLAT_SPEC)):
-                self.inplace = False
-            else:
-                self.inplace = True
-        # Calculate which directories we're going to add to sys.path, and cd
-        # to the appropriate working directory
-        org_cwd = os.getcwd()
-        if self.inplace:
-            self.libdir = "src"
-        else:
-            self.libdir = "lib.%s" % PLAT_SPEC
-            os.chdir("build")
-        # Hack sys.path
-        self.cwd = os.getcwd()
-        sys.path.insert(0, os.path.join(self.cwd, self.libdir))
-        # Hack again for external products.
-        global functional
-        kind = functional and "functional" or "unit"
-        if libdir:
-            extra = os.path.join(org_cwd, libdir)
-            print "Running %s tests from %s" % (kind, extra)
-            self.libdir = extra
-            sys.path.insert(0, extra)
-        else:
-            print "Running %s tests from %s" % (kind, self.cwd)
-        # Make sure functional tests find ftesting.zcml
-        if functional:
-            config_file = 'ftesting.zcml'
-            if not self.inplace:
-                # We chdired into build, so ftesting.zcml is in the
-                # parent directory
-                config_file = os.path.join('..', 'ftesting.zcml')
-            print "Parsing %s" % config_file
-            from zope.testing.functional import FunctionalTestSetup
-            FunctionalTestSetup(config_file)
-
-def match(rx, s):
-    if not rx:
-        return True
-    if rx[0] == "!":
-        return re.search(rx[1:], s) is None
-    else:
-        return re.search(rx, s) is not None
-
-class TestFileFinder:
-    def __init__(self, prefix):
-        self.files = []
-        self._plen = len(prefix)
-        if not prefix.endswith(os.sep):
-            self._plen += 1
-        global functional
-        if functional:
-            self.dirname = "ftests"
-        else:
-            self.dirname = "tests"
-
-    def visit(self, rx, dir, files):
-        if os.path.split(dir)[1] != self.dirname:
-            return
-        # ignore tests that aren't in packages
-        if not "__init__.py" in files:
-            if not files or files == ["CVS"]:
-                return
-            print "not a package", dir
-            return
-
-        # Put matching files in matches.  If matches is non-empty,
-        # then make sure that the package is importable.
-        matches = []
-        for file in files:
-            if file.startswith('test') and os.path.splitext(file)[-1] == '.py':
-                path = os.path.join(dir, file)
-                if match(rx, path):
-                    matches.append(path)
-
-        # ignore tests when the package can't be imported, possibly due to
-        # dependency failures.
-        pkg = dir[self._plen:].replace(os.sep, '.')
-        try:
-            __import__(pkg)
-        # We specifically do not want to catch ImportError since that's useful
-        # information to know when running the tests.
-        except RuntimeError, e:
-            if VERBOSE:
-                print "skipping %s because: %s" % (pkg, e)
-            return
-        else:
-            self.files.extend(matches)
-
-    def module_from_path(self, path):
-        """Return the Python package name indicated by the filesystem path."""
-        assert path.endswith(".py")
-        path = path[self._plen:-3]
-        mod = path.replace(os.sep, ".")
-        return mod
-
-def walk_with_symlinks(top, func, arg):
-    """Like os.path.walk, but follows symlinks on POSIX systems.
-
-    This could theoreticaly result in an infinite loop, if you create symlink
-    cycles in your Zope sandbox, so don't do that.
-    """
-    try:
-        names = os.listdir(top)
-    except os.error, why:
-        print "Error listing %r: %s" % (top, why)
-        return
-    func(arg, top, names)
-    exceptions = ('.', '..')
-    for name in names:
-        if name not in exceptions:
-            name = os.path.join(top, name)
-            if os.path.isdir(name):
-                walk_with_symlinks(name, func, arg)
-
-def find_tests(rx):
-    global finder
-    finder = TestFileFinder(pathinit.libdir)
-    walkdir = test_dir or pathinit.libdir
-    walk_with_symlinks(walkdir, finder.visit, rx)
-    return finder.files
-
-def package_import(modname):
-    mod = __import__(modname)
-    for part in modname.split(".")[1:]:
-        mod = getattr(mod, part)
-    return mod
-
-def get_suite(file):
-    modname = finder.module_from_path(file)
-    try:
-        mod = package_import(modname)
-    except ImportError, err:
-        # print traceback
-        print "Error importing %s\n%s" % (modname, err)
-        if debug:
-            raise
-        return None
-    try:
-        suite_func = mod.test_suite
-    except AttributeError:
-        print "No test_suite() in %s" % file
-        return None
-    return suite_func()
-
-def filter_testcases(s, rx):
-    new = unittest.TestSuite()
-    for test in s._tests:
-        # See if the levels match
-        dolevel = (level == 0) or level >= getattr(test, "level", 0)
-        if not dolevel:
-            continue
-        if isinstance(test, unittest.TestCase):
-            name = test.id() # Full test name: package.module.class.method
-            name = name[1 + name.rfind("."):] # extract method name
-            if not rx or match(rx, name):
-                new.addTest(test)
-        else:
-            filtered = filter_testcases(test, rx)
-            if filtered:
-                new.addTest(filtered)
-    return new
-
-def gui_runner(files, test_filter):
-    if build_inplace:
-        utildir = os.path.join(os.getcwd(), "utilities")
-    else:
-        utildir = os.path.join(os.getcwd(), "..", "utilities")
-    sys.path.append(utildir)
-    import unittestgui
-    suites = []
-    for file in files:
-        suites.append(finder.module_from_path(file) + ".test_suite")
-
-    suites = ", ".join(suites)
-    minimal = (GUI == "minimal")
-    unittestgui.main(suites, minimal)
-
-class TrackRefs:
-    """Object to track reference counts across test runs."""
-
-    def __init__(self):
-        self.type2count = {}
-        self.type2all = {}
-        # Put types in self.interesting to get detailed stats for them.
-        self.interesting = {}
-
-    def update(self):
-        obs = sys.getobjects(0)
-        type2count = {}
-        type2all = {}
-        for o in obs:
-            all = sys.getrefcount(o)
-            t = type(o)
-            if t in type2count:
-                type2count[t] += 1
-                type2all[t] += all
-            else:
-                type2count[t] = 1
-                type2all[t] = all
-
-        ct = [(type2count[t] - self.type2count.get(t, 0),
-               type2all[t] - self.type2all.get(t, 0),
-               t)
-              for t in type2count.iterkeys()]
-        ct.sort()
-        ct.reverse()
-        for delta1, delta2, t in ct:
-            if delta1 or delta2:
-                print "%-55s %8d %8d" % (t, delta1, delta2)
-                if t in self.interesting:
-                    for o in obs:
-                        if type(o) == t:
-                            print sys.getrefcount(o), len(gc.get_referrers(o))
-                            delta1 -= 1
-                            if not delta1:
-                                break
-
-        self.type2count = type2count
-        self.type2all = type2all
-
-def runner(files, test_filter, debug):
-    runner = ImmediateTestRunner(verbosity=VERBOSE, debug=debug,
-                                 progress=progress)
-    suite = unittest.TestSuite()
-    for file in files:
-        s = get_suite(file)
-        # See if the levels match
-        dolevel = (level == 0) or level >= getattr(s, "level", 0)
-        if s is not None and dolevel:
-            s = filter_testcases(s, test_filter)
-            suite.addTest(s)
-    try:
-        r = runner.run(suite)
-        if timesfn:
-            r.print_times(open(timesfn, "w"))
-            if VERBOSE:
-                print "Wrote timing data to", timesfn
-        if timetests:
-            r.print_times(sys.stdout, timetests)
-    except:
-        if debugger:
-            pdb.post_mortem(sys.exc_info()[2])
-        else:
-            raise
-
-def remove_stale_bytecode(arg, dirname, names):
-    names = map(os.path.normcase, names)
-    for name in names:
-        if name.endswith(".pyc") or name.endswith(".pyo"):
-            srcname = name[:-1]
-            if srcname not in names:
-                fullname = os.path.join(dirname, name)
-                print "Removing stale bytecode file", fullname
-                os.unlink(fullname)
-
-def main(module_filter, test_filter, libdir):
-    if not keepStaleBytecode:
-        os.path.walk(os.curdir, remove_stale_bytecode, None)
-
-    # Skip this; zLOG will eventually win, and coordinating
-    # initialization is a loosing battle.
-    configure_logging()
-
-    # Initialize the path and cwd
-    global pathinit
-    pathinit = PathInit(build, build_inplace, libdir)
-
-    files = find_tests(module_filter)
-    files.sort()
-
-    if GUI:
-        gui_runner(files, test_filter)
-    elif LOOP:
-        if REFCOUNT:
-            rc = sys.gettotalrefcount()
-            track = TrackRefs()
-        while True:
-            runner(files, test_filter, debug)
-            gc.collect()
-            if gc.garbage:
-                print "GARBAGE:", len(gc.garbage), gc.garbage
-                return
-            if REFCOUNT:
-                prev = rc
-                rc = sys.gettotalrefcount()
-                print "totalrefcount=%-8d change=%-6d" % (rc, rc - prev)
-                track.update()
-    else:
-        runner(files, test_filter, debug)
-
-
-def configure_logging():
-    """Initialize the logging module."""
-    import logging.config
-
-    # Get the log.ini file from the current directory instead of possibly
-    # buried in the build directory.  This isn't perfect because if
-    # log.ini specifies a log file, it'll be relative to the build directory.
-    # Hmm...
-    logini = os.path.abspath("log.ini")
-
-    if os.path.exists(logini):
-        logging.config.fileConfig(logini)
-    else:
-        logging.basicConfig()
-
-    if os.environ.has_key("LOGGING"):
-        level = int(os.environ["LOGGING"])
-        logging.getLogger().setLevel(level)
-
-
-def process_args(argv=None):
-    import getopt
-    import warnings
-
-    global module_filter
-    global test_filter
-    global VERBOSE
-    global LOOP
-    global GUI
-    global TRACE
-    global REFCOUNT
-    global PATCH_TEMPFILE
-    global debug
-    global debugger
-    global build
-    global level
-    global libdir
-    global timesfn
-    global timetests
-    global progress
-    global build_inplace
-    global keepStaleBytecode
-    global functional
-    global test_dir
-
-    # Persistence/__init__.py generates a long warning message about the
-    # the failure of
-    #     from _Persistence import Persistent
-    # for the benefit of people expecting that to work from previous (pre 3.3)
-    # ZODB3 releases.  We don't need to see that msg every time we run the
-    # test suite, though, and it's positively unhelpful to see it in this
-    # context.
-    # NOTE:  "(?s)" enables re.SINGLELINE, so that the ".*" can suck up
-    #        newlines.
-    warnings.filterwarnings("ignore",
-        message="(?s)Couldn't import the ExtensionClass-based base class.*"
-                "There are two possibilities:",
-        category=UserWarning)
-
-    if argv is None:
-        argv = sys.argv
-
-    module_filter = None
-    test_filter = None
-    VERBOSE = 0
-    LOOP = False
-    GUI = False
-    TRACE = False
-    REFCOUNT = False
-    PATCH_TEMPFILE = False
-    debug = False # Don't collect test results; simply let tests crash
-    debugger = False
-    build = False
-    build_inplace = False
-    gcthresh = None
-    gcdebug = 0
-    gcflags = []
-    level = 1
-    libdir = None
-    progress = False
-    timesfn = None
-    timetests = 0
-    keepStaleBytecode = 0
-    functional = False
-    test_dir = None
-
-    try:
-        opts, args = getopt.getopt(argv[1:], "a:bBcdDfg:G:hLmnprtTuv",
-                                   ["all", "help", "libdir=", "times=",
-                                    "keepbytecode", "dir="])
-    except getopt.error, msg:
-        print msg
-        print "Try `python %s -h' for more information." % argv[0]
-        sys.exit(2)
-
-    for k, v in opts:
-        if k == "-a":
-            level = int(v)
-        elif k == "--all":
-            level = 0
-        elif k == "-b":
-            build = True
-        elif k == "-B":
-            build = build_inplace = True
-        elif k == "-c":
-            # make sure you have a recent version of pychecker
-            if not os.environ.get("PYCHECKER"):
-                os.environ["PYCHECKER"] = "-q"
-            import pychecker.checker
-        elif k == "-d":
-            debug = True
-        elif k == "-D":
-            debug = True
-            debugger = True
-        elif k == "-f":
-            functional = True
-        elif k in ("-h", "--help"):
-            print __doc__
-            sys.exit(0)
-        elif k == "-g":
-            gcthresh = int(v)
-        elif k == "-G":
-            if not v.startswith("DEBUG_"):
-                print "-G argument must be DEBUG_ flag, not", repr(v)
-                sys.exit(1)
-            gcflags.append(v)
-        elif k == '--keepbytecode':
-            keepStaleBytecode = 1
-        elif k == '--libdir':
-            libdir = v
-        elif k == "-L":
-            LOOP = 1
-        elif k == "-m":
-            GUI = "minimal"
-        elif k == "-n":
-            PATCH_TEMPFILE = True
-        elif k == "-p":
-            progress = True
-        elif k == "-r":
-            if hasattr(sys, "gettotalrefcount"):
-                REFCOUNT = True
-            else:
-                print "-r ignored, because it needs a debug build of Python"
-        elif k == "-T":
-            TRACE = True
-        elif k == "-t":
-            if not timetests:
-                timetests = 50
-        elif k == "-u":
-            GUI = 1
-        elif k == "-v":
-            VERBOSE += 1
-        elif k == "--times":
-            try:
-                timetests = int(v)
-            except ValueError:
-                # must be a filename to write
-                timesfn = v
-        elif k == '--dir':
-            test_dir = v
-
-    if gcthresh is not None:
-        if gcthresh == 0:
-            gc.disable()
-            print "gc disabled"
-        else:
-            gc.set_threshold(gcthresh)
-            print "gc threshold:", gc.get_threshold()
-
-    if gcflags:
-        val = 0
-        for flag in gcflags:
-            v = getattr(gc, flag, None)
-            if v is None:
-                print "Unknown gc flag", repr(flag)
-                print gc.set_debug.__doc__
-                sys.exit(1)
-            val |= v
-        gcdebug |= v
-
-    if gcdebug:
-        gc.set_debug(gcdebug)
-
-    if build:
-        # Python 2.3 is more sane in its non -q output
-        if sys.hexversion >= 0x02030000:
-            qflag = ""
-        else:
-            qflag = "-q"
-        cmd = sys.executable + " setup.py " + qflag + " build"
-        if build_inplace:
-            cmd += "_ext -i"
-        if VERBOSE:
-            print cmd
-        sts = os.system(cmd)
-        if sts:
-            print "Build failed", hex(sts)
-            sys.exit(1)
-
-    if VERBOSE:
-        kind = functional and "functional" or "unit"
-        if level == 0:
-            print "Running %s tests at all levels" % kind
-        else:
-            print "Running %s tests at level %d" % (kind, level)
-
-    if args:
-        if len(args) > 1:
-            test_filter = args[1]
-        module_filter = args[0]
-    try:
-        if TRACE:
-            # if the trace module is used, then we don't exit with
-            # status if on a false return value from main.
-            coverdir = os.path.join(os.getcwd(), "coverage")
-            import trace
-            ignoremods = ["os", "posixpath", "stat"]
-            tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
-                                 ignoremods=ignoremods,
-                                 trace=False, count=True)
-
-            tracer.runctx("main(module_filter, test_filter, libdir)",
-                          globals=globals(), locals=vars())
-            r = tracer.results()
-            r.write_results(show_missing=True, summary=True, coverdir=coverdir)
-        else:
-            bad = main(module_filter, test_filter, libdir)
-            if bad:
-                sys.exit(1)
-    except ImportError, err:
-        print err
-        print sys.path
-        raise
-
-
-if __name__ == "__main__":
-    process_args()
-- 
2.30.9