Commit 10e1326a authored by Jim Fulton's avatar Jim Fulton Committed by GitHub

Merge pull request #101 from zopefoundation/reference-docs

Reference docs
parents 8a4d763a bed11cba
...@@ -78,9 +78,13 @@ def className(obj): ...@@ -78,9 +78,13 @@ def className(obj):
IPersistentDataManager, IPersistentDataManager,
ISynchronizer) ISynchronizer)
class Connection(ExportImport, object): class Connection(ExportImport, object):
"""Connection to ZODB for loading and storing objects.""" """Connection to ZODB for loading and storing objects.
Connections manage object state in collaboration with transaction
managers. They're created by calling the
:meth:`~ZODB.DB.open` method on :py:class:`database
<ZODB.DB>` objects.
"""
_code_timestamp = 0 _code_timestamp = 0
......
This diff is collapsed.
...@@ -40,10 +40,52 @@ from .utils import load_current, maxtid ...@@ -40,10 +40,52 @@ from .utils import load_current, maxtid
ZODB.interfaces.IStorageIteration, ZODB.interfaces.IStorageIteration,
) )
class DemoStorage(ConflictResolvingStorage): class DemoStorage(ConflictResolvingStorage):
"""A storage that stores changes against a read-only base database
This storage was originally meant to support distribution of
application demonstrations with populated read-only databases (on
CDROM) and writable in-memory databases.
Demo storages are extemely convenient for testing where setup of a
base database can be shared by many tests.
Demo storages are also handy for staging appplications where a
read-only snapshot of a production database (often accomplished
using a `beforestorage
<https://pypi.python.org/pypi/zc.beforestorage>`_) is combined
with a changes database implemented with a
:class:`~ZODB.FileStorage.FileStorage.FileStorage`.
"""
def __init__(self, name=None, base=None, changes=None, def __init__(self, name=None, base=None, changes=None,
close_base_on_close=None, close_changes_on_close=None): close_base_on_close=None, close_changes_on_close=None):
"""Create a demo storage
:param str name: The storage name used by the
:meth:`~ZODB.interfaces.IStorage.getName` and
:meth:`~ZODB.interfaces.IStorage.sortKey` methods.
:param object base: base storage
:param object changes: changes storage
:param bool close_base_on_close: A Flag indicating whether the base
database should be closed when the demo storage is closed.
:param bool close_changes_on_close: A Flag indicating whether the
changes database should be closed when the demo storage is closed.
If a base database isn't provided, a
:class:`~ZODB.MappingStorage.MappingStorage` will be
constructed and used.
If ``close_base_on_close`` isn't specified, it will be ``True`` if
a base database was provided and ``False`` otherwise.
If a changes database isn't provided, a
:class:`~ZODB.MappingStorage.MappingStorage` will be
constructed and used and blob support will be provided using a
temporary blob directory.
If ``close_changes_on_close`` isn't specified, it will be ``True`` if
a changes database was provided and ``False`` otherwise.
"""
if close_base_on_close is None: if close_base_on_close is None:
if base is None: if base is None:
...@@ -51,6 +93,8 @@ class DemoStorage(ConflictResolvingStorage): ...@@ -51,6 +93,8 @@ class DemoStorage(ConflictResolvingStorage):
close_base_on_close = False close_base_on_close = False
else: else:
close_base_on_close = True close_base_on_close = True
elif base is None:
base = ZODB.MappingStorage.MappingStorage()
self.base = base self.base = base
self.close_base_on_close = close_base_on_close self.close_base_on_close = close_base_on_close
...@@ -285,10 +329,17 @@ class DemoStorage(ConflictResolvingStorage): ...@@ -285,10 +329,17 @@ class DemoStorage(ConflictResolvingStorage):
raise raise
def pop(self): def pop(self):
"""Close the changes database and return the base.
"""
self.changes.close() self.changes.close()
return self.base return self.base
def push(self, changes=None): def push(self, changes=None):
"""Create a new demo storage using the storage as a base.
The given changes are used as the changes for the returned
storage and ``False`` is passed as ``close_base_on_close``.
"""
return self.__class__(base=self, changes=changes, return self.__class__(base=self, changes=changes,
close_base_on_close=False) close_base_on_close=False)
......
...@@ -140,7 +140,8 @@ class FileStorage( ...@@ -140,7 +140,8 @@ class FileStorage(
ConflictResolvingStorage, ConflictResolvingStorage,
BaseStorage, BaseStorage,
): ):
"""Storage that saves data in a file
"""
# Set True while a pack is in progress; undo is blocked for the duration. # Set True while a pack is in progress; undo is blocked for the duration.
_pack_is_in_progress = False _pack_is_in_progress = False
...@@ -148,6 +149,82 @@ class FileStorage( ...@@ -148,6 +149,82 @@ class FileStorage(
def __init__(self, file_name, create=False, read_only=False, stop=None, def __init__(self, file_name, create=False, read_only=False, stop=None,
quota=None, pack_gc=True, pack_keep_old=True, packer=None, quota=None, pack_gc=True, pack_keep_old=True, packer=None,
blob_dir=None): blob_dir=None):
"""Create a file storage
:param str file_name: Path to store data file
:param bool create: Flag indicating whether a file should be
created even if it already exists.
:param bool read_only: Flag indicating whether the file is
read only. Only one process is able to open the file
non-read-only.
:param bytes stop: Time-travel transaction id
When the file is opened, data will be read up to the given
transaction id. Transaction ids correspond to times and
you can compute transaction ids for a given time using
:class:`~ZODB.TimeStamp.TimeStamp`.
:param int quota: File-size quota
:param bool pack_gc: Flag indicating whether garbage
collection should be performed when packing.
:param bool pack_keep_old: flag indicating whether old data
files should be retained after packing as a ``.old`` file.
:param callable packer: An alternative
:interface:`packer <ZODB.FileStorage.interfaces.IFileStoragePacker>`.
:param str blob_dir: A blob-directory path name.
Blobs will be supported if this option is provided.
A file storage stores data in a single file that behaves like
a traditional transaction log. New data records are appended
to the end of the file. Periodically, the file is packed to
free up space. When this is done, current records as of the
pack time or later are copied to a new file, which replaces
the old file.
FileStorages keep in-memory indexes mapping object oids to the
location of their current records in the file. Back pointers to
previous records allow access to non-current records from the
current records.
In addition to the data file, some ancillary files are
created. These can be lost without affecting data
integrity, however losing the index file may cause extremely
slow startup. Each has a name that's a concatenation of the
original file and a suffix. The files are listed below by
suffix:
.index
Snapshot of the in-memory index. This are created on
shutdown, packing, and after rebuilding an index when one
was not found. For large databases, creating a
file-storage object without an index file can take very
long because it's necessary to scan the data file to build
the index.
.lock
A lock file preventing multiple processes from opening a
file storage on non-read-only mode.
.tmp
A file used to store data being committed in the first phase
of 2-phase commit
.index_tmp
A temporary file used when saving the in-memory index to
avoid overwriting an existing index until a new index has
been fully saved.
.pack
A temporary file written while packing containing current
records as of and after the pack time.
.old
The previous database file after a pack.
When the database is packed, current records as of the pack
time and later are written to the ``.pack`` file. At the end
of packing, the ``.old`` file is removed, if it exists, and
the data file is renamed to the ``.old`` file and finally the
``.pack`` file is rewritten to the data file.
"""
if read_only: if read_only:
self._is_read_only = True self._is_read_only = True
......
...@@ -16,18 +16,27 @@ import zope.interface ...@@ -16,18 +16,27 @@ import zope.interface
class IFileStoragePacker(zope.interface.Interface): class IFileStoragePacker(zope.interface.Interface):
def __call__(storage, referencesf, stop, gc): def __call__(storage, referencesf, stop, gc):
"""Pack the file storage into a new file r"""Pack the file storage into a new file
:param FileStorage storage: The storage object to be packed
:param callable referencesf: A function that extracts object
references from a pickle bytes string. This is usually
``ZODB.serialize.referencesf``.
:param bytes stop: A transaction id representing the time at
which to stop packing.
:param bool gc: A flag indicating whether garbage collection
should be performed.
The new file will have the same name as the old file with The new file will have the same name as the old file with
'.pack' appended. (The packer can get the old file name via ``.pack`` appended. (The packer can get the old file name via
storage._file.name.) If blobs are supported, if the storages storage._file.name.) If blobs are supported, if the storages
blob_dir attribute is not None or empty, then a .removed file blob_dir attribute is not None or empty, then a .removed file
most be created in the blob directory. This file contains of must be created in the blob directory. This file contains records of
the form: the form::
(oid+serial).encode('hex')+'\n' (oid+serial).encode('hex')+'\n'
or, of the form: or, of the form::
oid.encode('hex')+'\n' oid.encode('hex')+'\n'
......
...@@ -32,8 +32,22 @@ import zope.interface ...@@ -32,8 +32,22 @@ import zope.interface
ZODB.interfaces.IStorageIteration, ZODB.interfaces.IStorageIteration,
) )
class MappingStorage(object): class MappingStorage(object):
"""In-memory storage implementation
Note that this implementation is somewhat naive and inefficient
with regard to locking. Its implementation is primarily meant to
be a simple illustration of storage implementation. It's also
useful for testing and exploration where scalability and efficiency
are unimportant.
"""
def __init__(self, name='MappingStorage'): def __init__(self, name='MappingStorage'):
"""Create a mapping storage
The name parameter is used by the
:meth:`~ZODB.interfaces.IStorage.getName` and
:meth:`~ZODB.interfaces.IStorage.sortKey` methods.
"""
self.__name__ = name self.__name__ = name
self._data = {} # {oid->{tid->pickle}} self._data = {} # {oid->{tid->pickle}}
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord} self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
<description> <description>
If supplied, the file storage will provide blob support and this If supplied, the file storage will provide blob support and this
is the name of a directory to hold blob data. The directory will is the name of a directory to hold blob data. The directory will
be created if it doeesn't exist. If no value (or an empty value) be created if it doesn't exist. If no value (or an empty value)
is provided, then no blob support will be provided. (You can still is provided, then no blob support will be provided. (You can still
use a BlobStorage to provide blob support.) use a BlobStorage to provide blob support.)
</description> </description>
...@@ -69,7 +69,13 @@ ...@@ -69,7 +69,13 @@
<sectiontype name="mappingstorage" datatype=".MappingStorage" <sectiontype name="mappingstorage" datatype=".MappingStorage"
implements="ZODB.storage"> implements="ZODB.storage">
<key name="name" default="Mapping Storage"/> <key name="name" default="Mapping Storage">
<description>
The storage name, used by the
:meth:`~ZODB.interfaces.IStorage.getName` and
:meth:`~ZODB.interfaces.IStorage.sortKey` methods.
</description>
</key>
</sectiontype> </sectiontype>
<!-- The BDB storages probably need to be revised somewhat still. <!-- The BDB storages probably need to be revised somewhat still.
...@@ -187,14 +193,14 @@ ...@@ -187,14 +193,14 @@
<key name="read-only-fallback" datatype="boolean" default="off"> <key name="read-only-fallback" datatype="boolean" default="off">
<description> <description>
A flag indicating whether a read-only remote storage should be A flag indicating whether a read-only remote storage should be
acceptable as a fallback when no writable storages are acceptable as a fall-back when no writable storages are
available. Defaults to false. At most one of read_only and available. Defaults to false. At most one of read_only and
read_only_fallback should be true. read_only_fallback should be true.
</description> </description>
</key> </key>
<key name="username" required="no"> <key name="username" required="no">
<description> <description>
The authentication username of the server. The authentication user name of the server.
</description> </description>
</key> </key>
<key name="password" required="no"> <key name="password" required="no">
...@@ -205,7 +211,7 @@ ...@@ -205,7 +211,7 @@
<key name="realm" required="no"> <key name="realm" required="no">
<description> <description>
The authentication realm of the server. Some authentication The authentication realm of the server. Some authentication
schemes use a realm to identify the logic set of usernames schemes use a realm to identify the logic set of user names
that are accepted by this server. that are accepted by this server.
</description> </description>
</key> </key>
...@@ -225,7 +231,13 @@ ...@@ -225,7 +231,13 @@
<sectiontype name="demostorage" datatype=".DemoStorage" <sectiontype name="demostorage" datatype=".DemoStorage"
implements="ZODB.storage"> implements="ZODB.storage">
<key name="name" /> <key name="name">
<description>
The storage name, used by the
:meth:`~ZODB.interfaces.IStorage.getName` and
:meth:`~ZODB.interfaces.IStorage.sortKey` methods.
</description>
</key>
<multisection type="ZODB.storage" name="*" attribute="factories" /> <multisection type="ZODB.storage" name="*" attribute="factories" />
</sectiontype> </sectiontype>
...@@ -233,11 +245,12 @@ ...@@ -233,11 +245,12 @@
<sectiontype name="zodb" datatype=".ZODBDatabase" <sectiontype name="zodb" datatype=".ZODBDatabase"
implements="ZODB.database"> implements="ZODB.database">
<section type="ZODB.storage" name="*" attribute="storage"/> <section type="ZODB.storage" name="*" attribute="storage"/>
<key name="cache-size" datatype="integer" default="5000"/> <key name="cache-size" datatype="integer" default="5000">
<description> <description>
Target size, in number of objects, of each connection's Target size, in number of objects, of each connection's
object cache. object cache.
</description> </description>
</key>
<key name="cache-size-bytes" datatype="byte-size" default="0"> <key name="cache-size-bytes" datatype="byte-size" default="0">
<description> <description>
Target size, in total estimated size for objects, of each connection's Target size, in total estimated size for objects, of each connection's
...@@ -245,8 +258,14 @@ ...@@ -245,8 +258,14 @@
"0" means no limit. "0" means no limit.
</description> </description>
</key> </key>
<key name="large-record-size" datatype="byte-size" /> <key name="large-record-size" datatype="byte-size" default="16MB">
<key name="pool-size" datatype="integer" default="7"/> <description>
When object records are saved
that are larger than this, a warning is issued,
suggesting that blobs should be used instead.
</description>
</key>
<key name="pool-size" datatype="integer" default="7">
<description> <description>
The expected maximum number of simultaneously open connections. The expected maximum number of simultaneously open connections.
There is no hard limit (as many connections as are requested There is no hard limit (as many connections as are requested
...@@ -255,21 +274,25 @@ ...@@ -255,21 +274,25 @@
and exceeding twice pool-size connections causes a critical and exceeding twice pool-size connections causes a critical
message to be logged. message to be logged.
</description> </description>
<key name="pool-timeout" datatype="time-interval"/> </key>
<key name="pool-timeout" datatype="time-interval">
<description> <description>
The minimum interval that an unused (non-historical) The minimum interval that an unused (non-historical)
connection should be kept. connection should be kept.
</description> </description>
<key name="historical-pool-size" datatype="integer" default="3"/> </key>
<key name="historical-pool-size" datatype="integer" default="3">
<description> <description>
The expected maximum total number of historical connections The expected maximum total number of historical connections
simultaneously open. simultaneously open.
</description> </description>
<key name="historical-cache-size" datatype="integer" default="1000"/> </key>
<key name="historical-cache-size" datatype="integer" default="1000">
<description> <description>
Target size, in number of objects, of each historical connection's Target size, in number of objects, of each historical connection's
object cache. object cache.
</description> </description>
</key>
<key name="historical-cache-size-bytes" datatype="byte-size" default="0"> <key name="historical-cache-size-bytes" datatype="byte-size" default="0">
<description> <description>
Target size, in total estimated size of objects, of each historical connection's Target size, in total estimated size of objects, of each historical connection's
...@@ -285,12 +308,12 @@ ...@@ -285,12 +308,12 @@
</key> </key>
<key name="database-name"> <key name="database-name">
<description> <description>
When multidatabases are in use, this is the name given to this When multi-databases are in use, this is the name given to this
database in the collection. The name must be unique across all database in the collection. The name must be unique across all
databases in the collection. The collection must also be given databases in the collection. The collection must also be given
a mapping from its databases' names to their databases, but that a mapping from its databases' names to their databases, but that
cannot be specified in a ZODB config file. Applications using cannot be specified in a ZODB config file. Applications using
multidatabases typical supply a way to configure the mapping in multi-databases typical supply a way to configure the mapping in
their own config files, using the "databases" parameter of a DB their own config files, using the "databases" parameter of a DB
constructor. constructor.
</description> </description>
......
...@@ -42,13 +42,33 @@ def getStorageSchema(): ...@@ -42,13 +42,33 @@ def getStorageSchema():
return _s_schema return _s_schema
def databaseFromString(s): def databaseFromString(s):
"""Create a database from a database-configuration string.
The string must contain one or more :ref:`zodb
<database-text-configuration>` sections.
The database defined by the first section is returned.
If :ref:`more than one zodb section is provided
<multidatabase-text-configuration>`, a multi-database
configuration will be created and all of the databases will be
available in the returned database's ``databases`` attribute.
"""
return databaseFromFile(StringIO(s)) return databaseFromFile(StringIO(s))
def databaseFromFile(f): def databaseFromFile(f):
"""Create a database from a file object that provides configuration.
See :func:`databaseFromString`.
"""
config, handle = ZConfig.loadConfigFile(getDbSchema(), f) config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
return databaseFromConfig(config.database) return databaseFromConfig(config.database)
def databaseFromURL(url): def databaseFromURL(url):
"""Load a database from URL (or file name) that provides configuration.
See :func:`databaseFromString`.
"""
config, handler = ZConfig.loadConfig(getDbSchema(), url) config, handler = ZConfig.loadConfig(getDbSchema(), url)
return databaseFromConfig(config.database) return databaseFromConfig(config.database)
...@@ -63,13 +83,20 @@ def databaseFromConfig(database_factories): ...@@ -63,13 +83,20 @@ def databaseFromConfig(database_factories):
return first return first
def storageFromString(s): def storageFromString(s):
"""Create a storage from a storage-configuration string.
"""
return storageFromFile(StringIO(s)) return storageFromFile(StringIO(s))
def storageFromFile(f): def storageFromFile(f):
"""Create a storage from a file object providing storage-configuration.
"""
config, handle = ZConfig.loadConfigFile(getStorageSchema(), f) config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
return storageFromConfig(config.storage) return storageFromConfig(config.storage)
def storageFromURL(url): def storageFromURL(url):
"""\
Create a storage from a URL (or file name) providing storage-configuration.
"""
config, handler = ZConfig.loadConfig(getStorageSchema(), url) config, handler = ZConfig.loadConfig(getStorageSchema(), url)
return storageFromConfig(config.storage) return storageFromConfig(config.storage)
......
...@@ -437,7 +437,6 @@ class IStorage(Interface): ...@@ -437,7 +437,6 @@ class IStorage(Interface):
"""A storage is responsible for storing and retrieving data of objects. """A storage is responsible for storing and retrieving data of objects.
Consistency and locking Consistency and locking
-----------------------
When transactions are committed, a storage assigns monotonically When transactions are committed, a storage assigns monotonically
increasing transaction identifiers (tids) to the transactions and increasing transaction identifiers (tids) to the transactions and
...@@ -472,6 +471,9 @@ class IStorage(Interface): ...@@ -472,6 +471,9 @@ class IStorage(Interface):
Finalize the storage, releasing any external resources. The Finalize the storage, releasing any external resources. The
storage should not be used after this method is called. storage should not be used after this method is called.
Note that databses close their storages when they're closed, so
this method isn't generally called from application code.
""" """
def getName(): def getName():
......
"""Work around an issue with defining class attribute documentation.
See http://stackoverflow.com/questions/9153473/sphinx-values-for-attributes-reported-as-none/39276413
"""
class ValueDoc:
def __init__(self, text):
self.text = text
def __repr__(self):
return self.text
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment