Commit 25b9f257 authored by Jim Fulton's avatar Jim Fulton

There is a new pool-timeout database configuration option to specify that

connections unused after the given time interval should be garbage
colection.  This will provide a means of dealing with extra
connections that are created in rare circumstances and that would
consume an unreasonable amount of memory.
parent 3b8c3bed
...@@ -63,6 +63,12 @@ New Features ...@@ -63,6 +63,12 @@ New Features
wastage. Now, when connections are placed on the stack, they sink wastage. Now, when connections are placed on the stack, they sink
below existing connections that have more active objects. below existing connections that have more active objects.
- There is a new pool-timeout database configuration option to specify that
connections unused after the given time interval should be garbage
colection. This will provide a means of dealing with extra
connections that are created in rare circumstances and that would
consume an unreasonable amount of memory.
3.9.0a8 (2008-12-15) 3.9.0a8 (2008-12-15)
==================== ====================
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
# #
############################################################################## ##############################################################################
"""Database objects """Database objects
"""
$Id$"""
import warnings import warnings
...@@ -70,7 +69,7 @@ class AbstractConnectionPool(object): ...@@ -70,7 +69,7 @@ class AbstractConnectionPool(object):
connectionDebugInfo() can still gather statistics. connectionDebugInfo() can still gather statistics.
""" """
def __init__(self, size, timeout=None): def __init__(self, size, timeout):
# The largest # of connections we expect to see alive simultaneously. # The largest # of connections we expect to see alive simultaneously.
self._size = size self._size = size
...@@ -95,8 +94,7 @@ class AbstractConnectionPool(object): ...@@ -95,8 +94,7 @@ class AbstractConnectionPool(object):
def setTimeout(self, timeout): def setTimeout(self, timeout):
old = self._timeout old = self._timeout
self._timeout = timeout self._timeout = timeout
if timeout is not None and old != timeout and ( if timeout < old:
old is None or old > timeout):
self._reduce_size() self._reduce_size()
def getSize(self): def getSize(self):
...@@ -112,7 +110,7 @@ class AbstractConnectionPool(object): ...@@ -112,7 +110,7 @@ class AbstractConnectionPool(object):
class ConnectionPool(AbstractConnectionPool): class ConnectionPool(AbstractConnectionPool):
# XXX WTF, passing time.time() as a default? # XXX WTF, passing time.time() as a default?
def __init__(self, size, timeout=time.time()): def __init__(self, size, timeout=1<<31):
super(ConnectionPool, self).__init__(size, timeout) super(ConnectionPool, self).__init__(size, timeout)
# A stack of connections available to hand out. This is a subset # A stack of connections available to hand out. This is a subset
...@@ -245,7 +243,7 @@ class KeyedConnectionPool(AbstractConnectionPool): ...@@ -245,7 +243,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
# see the comments in ConnectionPool for method descriptions. # see the comments in ConnectionPool for method descriptions.
def __init__(self, size, timeout=time.time()): def __init__(self, size, timeout=1<<31):
super(KeyedConnectionPool, self).__init__(size, timeout) super(KeyedConnectionPool, self).__init__(size, timeout)
self.pools = {} self.pools = {}
...@@ -305,8 +303,7 @@ class KeyedConnectionPool(AbstractConnectionPool): ...@@ -305,8 +303,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
for pool in self.pools.itervalues(): for pool in self.pools.itervalues():
result.extend(pool.available) result.extend(pool.available)
return tuple(result) return tuple(result)
def toTimeStamp(dt): def toTimeStamp(dt):
utc_struct = dt.utctimetuple() utc_struct = dt.utctimetuple()
...@@ -379,6 +376,7 @@ class DB(object): ...@@ -379,6 +376,7 @@ class DB(object):
def __init__(self, storage, def __init__(self, storage,
pool_size=7, pool_size=7,
pool_timeout=1<<31,
cache_size=400, cache_size=400,
cache_size_bytes=0, cache_size_bytes=0,
historical_pool_size=3, historical_pool_size=3,
...@@ -416,7 +414,7 @@ class DB(object): ...@@ -416,7 +414,7 @@ class DB(object):
self._r = x.release self._r = x.release
# pools and cache sizes # pools and cache sizes
self.pool = ConnectionPool(pool_size) self.pool = ConnectionPool(pool_size, pool_timeout)
self.historical_pool = KeyedConnectionPool(historical_pool_size, self.historical_pool = KeyedConnectionPool(historical_pool_size,
historical_timeout) historical_timeout)
self._cache_size = cache_size self._cache_size = cache_size
...@@ -838,7 +836,7 @@ class DB(object): ...@@ -838,7 +836,7 @@ class DB(object):
finally: finally:
self._r() self._r()
def setHistoricalCacheSize(self, size): def setHistoricalCacheSize(self, size):
self._a() self._a()
try: try:
self._historical_cache_size = size self._historical_cache_size = size
...@@ -848,7 +846,7 @@ class DB(object): ...@@ -848,7 +846,7 @@ class DB(object):
finally: finally:
self._r() self._r()
def setHistoricalCacheSizeBytes(self, size): def setHistoricalCacheSizeBytes(self, size):
self._a() self._a()
try: try:
self._historical_cache_size_bytes = size self._historical_cache_size_bytes = size
......
...@@ -250,6 +250,11 @@ ...@@ -250,6 +250,11 @@
and exceeding twice pool-size connections causes a critical and exceeding twice pool-size connections causes a critical
message to be logged. message to be logged.
</description> </description>
<key name="pool-timeout" datatype="time-interval"/>
<description>
The minimum interval that an unused (non-historical)
connection should be kept.
</description>
<key name="historical-pool-size" datatype="integer" default="3"/> <key name="historical-pool-size" datatype="integer" default="3"/>
<description> <description>
The expected maximum total number of historical connections The expected maximum total number of historical connections
......
...@@ -92,6 +92,10 @@ class ZODBDatabase(BaseConfig): ...@@ -92,6 +92,10 @@ class ZODBDatabase(BaseConfig):
def open(self, databases=None): def open(self, databases=None):
section = self.config section = self.config
storage = section.storage.open() storage = section.storage.open()
options = {}
if section.pool_timeout is not None:
options['pool_timeout'] = section.pool_timeout
try: try:
return ZODB.DB( return ZODB.DB(
storage, storage,
...@@ -104,7 +108,7 @@ class ZODBDatabase(BaseConfig): ...@@ -104,7 +108,7 @@ class ZODBDatabase(BaseConfig):
historical_timeout=section.historical_timeout, historical_timeout=section.historical_timeout,
database_name=section.database_name, database_name=section.database_name,
databases=databases, databases=databases,
) **options)
except: except:
storage.close() storage.close()
raise raise
...@@ -127,7 +131,7 @@ class DemoStorage(BaseConfig): ...@@ -127,7 +131,7 @@ class DemoStorage(BaseConfig):
base = factory.open() base = factory.open()
else: else:
raise ValueError("Too many base storages defined!") raise ValueError("Too many base storages defined!")
from ZODB.DemoStorage import DemoStorage from ZODB.DemoStorage import DemoStorage
return DemoStorage(self.config.name, base=base, changes=changes) return DemoStorage(self.config.name, base=base, changes=changes)
...@@ -139,7 +143,7 @@ class FileStorage(BaseConfig): ...@@ -139,7 +143,7 @@ class FileStorage(BaseConfig):
if self.config.packer: if self.config.packer:
m, name = self.config.packer.rsplit('.', 1) m, name = self.config.packer.rsplit('.', 1)
options['packer'] = getattr(__import__(m, {}, {}, ['*']), name) options['packer'] = getattr(__import__(m, {}, {}, ['*']), name)
return FileStorage(self.config.path, return FileStorage(self.config.path,
create=self.config.create, create=self.config.create,
read_only=self.config.read_only, read_only=self.config.read_only,
...@@ -169,7 +173,7 @@ class ZEOClient(BaseConfig): ...@@ -169,7 +173,7 @@ class ZEOClient(BaseConfig):
options['blob_cache_size'] = self.config.blob_cache_size options['blob_cache_size'] = self.config.blob_cache_size
if self.config.blob_cache_size_check is not None: if self.config.blob_cache_size_check is not None:
options['blob_cache_size_check'] = self.config.blob_cache_size_check options['blob_cache_size_check'] = self.config.blob_cache_size_check
return ClientStorage( return ClientStorage(
L, L,
blob_dir=self.config.blob_dir, blob_dir=self.config.blob_dir,
......
...@@ -19,9 +19,10 @@ import ZEO.ClientStorage ...@@ -19,9 +19,10 @@ import ZEO.ClientStorage
import ZODB.config import ZODB.config
import ZODB.POSException import ZODB.POSException
import ZODB.tests.util import ZODB.tests.util
from zope.testing import doctest
class ConfigTestBase(ZODB.tests.util.TestCase): class ConfigTestBase(ZODB.tests.util.TestCase):
def _opendb(self, s): def _opendb(self, s):
return ZODB.config.databaseFromString(s) return ZODB.config.databaseFromString(s)
...@@ -128,11 +129,35 @@ class ZEOConfigTest(ConfigTestBase): ...@@ -128,11 +129,35 @@ class ZEOConfigTest(ConfigTestBase):
os.path.abspath('blobs')) os.path.abspath('blobs'))
self.assertRaises(ClientDisconnected, self._test, cfg) self.assertRaises(ClientDisconnected, self._test, cfg)
def db_connection_pool_timeout():
"""
Test that the database pool timeout option works:
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... <mappingstorage/>
... </zodb>
... ''')
>>> db.pool._timeout == 1<<31
True
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... pool-timeout 600
... <mappingstorage/>
... </zodb>
... ''')
>>> db.pool._timeout == 600
True
"""
def test_suite(): def test_suite():
suite = unittest.TestSuite() suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBConfigTest)) suite.addTest(unittest.makeSuite(ZODBConfigTest))
suite.addTest(unittest.makeSuite(ZEOConfigTest)) suite.addTest(unittest.makeSuite(ZEOConfigTest))
suite.addTest(doctest.DocTestSuite())
return suite return suite
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment