diff --git a/src/CHANGES.txt b/src/CHANGES.txt
index f9239a32351d7874947862fb0626e70412988119..d60195ee6e77ef36d5dc17e1fbe0eccc3a6827aa 100644
--- a/src/CHANGES.txt
+++ b/src/CHANGES.txt
@@ -8,6 +8,10 @@
 Bugs Fixed
 ----------
 
+- Sizes of new objects weren't added to the object cache size
+  estimation, causing the object-cache size limiting feature to let
+  the cache grow too large when many objects were added.
+
 - Deleted records weren't removed when packing file storages.
 
 - Fixed intermittent failures in the MVCCMappingStorage tests.
diff --git a/src/ZODB/Connection.py b/src/ZODB/Connection.py
index b8f2a2f6a6280720edfa21f44697479693b1ed0d..07785b5b743f412663e00e783efc8bd257889c49 100644
--- a/src/ZODB/Connection.py
+++ b/src/ZODB/Connection.py
@@ -656,10 +656,7 @@ class Connection(ExportImport, object):
                 obj._p_invalidate()
             else:
                 s = self._storage.store(oid, serial, p, '', transaction)
-            self._cache.update_object_size_estimation(oid,
-                                                   len(p)
-                                                   )
-            obj._p_estimated_size = len(p)
+
             self._store_count += 1
             # Put the object in the cache before handling the
             # response, just in case the response contains the
@@ -674,6 +671,9 @@ class Connection(ExportImport, object):
                 else:
                     raise
 
+            self._cache.update_object_size_estimation(oid, len(p))
+            obj._p_estimated_size = len(p)
+
             self._handle_serial(s, oid)
 
     def _handle_serial(self, store_return, oid=None, change=1):
@@ -901,9 +901,7 @@ class Connection(ExportImport, object):
 
         self._reader.setGhostState(obj, p)
         obj._p_serial = serial
-        self._cache.update_object_size_estimation(obj._p_oid,
-                                               len(p)
-                                               )
+        self._cache.update_object_size_estimation(obj._p_oid, len(p))
         obj._p_estimated_size = len(p)
 
         # Blob support
@@ -1162,9 +1160,7 @@ class Connection(ExportImport, object):
             data, serial = src.load(oid, src)
             obj = self._cache.get(oid, None)
             if obj is not None:
-                self._cache.update_object_size_estimation(obj._p_oid,
-                                                       len(data)
-                                                       )
+                self._cache.update_object_size_estimation(obj._p_oid, len(data))
                 obj._p_estimated_size = len(data)
             if isinstance(self._reader.getGhost(data), Blob):
                 blobfilename = src.loadBlob(oid, serial)
diff --git a/src/ZODB/tests/testCache.py b/src/ZODB/tests/testCache.py
index 5cf7d6f357d0ad7dc0534f4068ea3609739e9760..2498c37de47e57d750962eec90ab712764a77f19 100644
--- a/src/ZODB/tests/testCache.py
+++ b/src/ZODB/tests/testCache.py
@@ -18,21 +18,20 @@ purposes. It acts like a memo for unpickling.  It also keeps recent
 objects in memory under the assumption that they may be used again.
 """
 
-import gc
-import time
-import unittest
-import threading
-
 from persistent.cPickleCache import PickleCache
+from persistent import Persistent
 from persistent.mapping import PersistentMapping
+from ZODB.tests.MinPO import MinPO
+from ZODB.utils import p64
+from zope.testing import doctest
+import gc
+import threading
+import time
 import transaction
+import unittest
 import ZODB
 import ZODB.MappingStorage
-from ZODB.tests.MinPO import MinPO
 import ZODB.tests.util
-from ZODB.utils import p64
-
-from persistent import Persistent
 
 class CacheTestBase(ZODB.tests.util.TestCase):
 
@@ -418,8 +417,64 @@ class CacheErrors(unittest.TestCase):
         else:
             self.fail("two objects with the same oid should have failed")
 
+def check_basic_cache_size_estimation():
+    """Make sure the basic accounting is correct:
+
+    >>> import ZODB.MappingStorage
+    >>> db = ZODB.MappingStorage.DB()
+    >>> conn = db.open()
+
+The cache is empty initially:
+
+    >>> conn._cache.total_estimated_size
+    0
+
+We force the root to be loaded and the cache grows:
+
+    >>> getattr(conn.root, 'z', None)
+    >>> conn._cache.total_estimated_size
+    128
+
+We add some data and the cache grows:
+
+    >>> conn.root.z = ZODB.tests.util.P('x'*100)
+    >>> import transaction
+    >>> transaction.commit()
+    >>> conn._cache.total_estimated_size
+    320
+
+Loading the objects in another connection gets the same sizes:
+
+    >>> conn2 = db.open()
+    >>> conn2._cache.total_estimated_size
+    0
+    >>> getattr(conn2.root, 'x', None)
+    >>> conn2._cache.total_estimated_size
+    128
+    >>> _ = conn2.root.z.name
+    >>> conn2._cache.total_estimated_size
+    320
+
+If we deactivate, the size goes down:
+
+    >>> conn2.root.z._p_deactivate()
+    >>> conn2._cache.total_estimated_size
+    128
+
+Loading data directly, rather than through traversal updates the cache
+size correctly:
+
+    >>> conn3 = db.open()
+    >>> _ = conn3.get(conn2.root.z._p_oid).name
+    >>> conn3._cache.total_estimated_size
+    192
+
+    """
+
+
 def test_suite():
     s = unittest.makeSuite(DBMethods, 'check')
     s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
     s.addTest(unittest.makeSuite(CacheErrors, 'check'))
+    s.addTest(doctest.DocTestSuite())
     return s