Commit 0632ffa6 authored by Jim Fulton's avatar Jim Fulton

New Feature:

  The file-storage backup script, repoze, will now create a backup
  index file if an output file name is given via the --output/-o
  option.

(Merged the tseaver-repozo_index branch.)
parent f19d7a0c
......@@ -37,6 +37,10 @@ New Features
new option (large_record_size/large-record-size) to control the
record size at which the warning is issued.
- The file-storage backup script, repoze, will now create a backup
index file if an output file name is given via the --output/-o
option.
Bugs Fixed
----------
......
......@@ -5,7 +5,7 @@
# Originally written by Anthony Baxter
# Significantly modified by Barry Warsaw
"""repozo.py -- incremental and full backups of a Data.fs file.
"""repozo.py -- incremental and full backups of a Data.fs file and index.
Usage: %(program)s [options]
Where:
......@@ -66,9 +66,13 @@ Options for -R/--recover:
--output=filename
Write recovered ZODB to given file. By default, the file is
written to stdout.
Note: for the stdout case, the index file will **not** be restored
automatically.
"""
import os
import shutil
import sys
try:
# the hashlib package is available from Python 2.5
......@@ -96,6 +100,11 @@ VERBOSE = False
class WouldOverwriteFiles(Exception):
pass
class NoFiles(Exception):
pass
def usage(code, msg=''):
outfp = sys.stderr
if code == 0:
......@@ -394,14 +403,14 @@ def delete_old_backups(options):
# keep most recent full
if not full:
return
recentfull = full.pop(-1)
deletable.remove(recentfull)
root, ext = os.path.splitext(recentfull)
dat = root + '.dat'
if dat in deletable:
deletable.remove(dat)
for fname in deletable:
log('removing old backup file %s (and .dat)', fname)
root, ext = os.path.splitext(fname)
......@@ -412,6 +421,10 @@ def delete_old_backups(options):
os.unlink(os.path.join(options.repository, fname))
def do_full_backup(options):
options.full = True
dest = os.path.join(options.repository, gen_filename(options))
if os.path.exists(dest):
raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
# Find the file position of the last completed transaction.
fs = FileStorage(options.file, read_only=True)
# Note that the FileStorage ctor calls read_index() which scans the file
......@@ -420,11 +433,12 @@ def do_full_backup(options):
# because we only want to copy stuff from the beginning of the file to the
# last valid transaction record.
pos = fs.getSize()
# Save the storage index into the repository
index_file = os.path.join(options.repository,
gen_filename(options, '.index'))
log('writing index')
fs._index.save(pos, index_file)
fs.close()
options.full = True
dest = os.path.join(options.repository, gen_filename(options))
if os.path.exists(dest):
raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
log('writing full backup: %s bytes to %s', pos, dest)
sum = copyfile(options, dest, 0, pos)
# Write the data file for this full backup
......@@ -439,6 +453,10 @@ def do_full_backup(options):
def do_incremental_backup(options, reposz, repofiles):
options.full = False
dest = os.path.join(options.repository, gen_filename(options))
if os.path.exists(dest):
raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
# Find the file position of the last completed transaction.
fs = FileStorage(options.file, read_only=True)
# Note that the FileStorage ctor calls read_index() which scans the file
......@@ -447,11 +465,11 @@ def do_incremental_backup(options, reposz, repofiles):
# because we only want to copy stuff from the beginning of the file to the
# last valid transaction record.
pos = fs.getSize()
log('writing index')
index_file = os.path.join(options.repository,
gen_filename(options, '.index'))
fs._index.save(pos, index_file)
fs.close()
options.full = False
dest = os.path.join(options.repository, gen_filename(options))
if os.path.exists(dest):
raise WouldOverwriteFiles('Cannot overwrite existing file: %s' % dest)
log('writing incremental: %s bytes to %s', pos-reposz, dest)
sum = copyfile(options, dest, reposz, pos - reposz)
# The first file in repofiles points to the last full backup. Use this to
......@@ -552,10 +570,9 @@ def do_recover(options):
repofiles = find_files(options)
if not repofiles:
if options.date:
log('No files in repository before %s', options.date)
raise NoFiles('No files in repository before %s', options.date)
else:
log('No files in repository')
return
raise NoFiles('No files in repository')
if options.output is None:
log('Recovering file to stdout')
outfp = sys.stdout
......@@ -567,6 +584,16 @@ def do_recover(options):
outfp.close()
log('Recovered %s bytes, md5: %s', reposz, reposum)
if options.output is not None:
last_base = os.path.splitext(repofiles[-1])[0]
source_index = '%s.index' % last_base
target_index = '%s.index' % options.output
if os.path.exists(source_index):
log('Restoring index file %s to %s', source_index, target_index)
shutil.copyfile(source_index, target_index)
else:
log('No index file to restore: %s', source_index)
def main(argv=None):
if argv is None:
......@@ -577,10 +604,14 @@ def main(argv=None):
do_backup(options)
except WouldOverwriteFiles, e:
print >> sys.stderr, str(e)
sys.exit(2)
sys.exit(1)
else:
assert options.mode == RECOVER
do_recover(options)
try:
do_recover(options)
except NoFiles, e:
print >> sys.stderr, str(e)
sys.exit(1)
if __name__ == '__main__':
......
......@@ -74,6 +74,7 @@ class OurDB:
del tree[keys[0]]
transaction.commit()
self.pos = self.db.storage._pos
self.maxkey = self.db.storage._oid
self.close()
......@@ -326,6 +327,11 @@ class Test_find_files(OptionsTestBase, unittest.TestCase):
f.close()
return fqn
def test_no_files(self):
options = self._makeOptions(date='2010-05-14-13-30-57')
found = self._callFUT(options)
self.assertEqual(found, [])
def test_explicit_date(self):
options = self._makeOptions(date='2010-05-14-13-30-57')
files = []
......@@ -525,7 +531,9 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
self.assertRaises(WouldOverwriteFiles, self._callFUT, options)
def test_empty(self):
import struct
from ZODB.scripts.repozo import gen_filename
from ZODB.fsIndex import fsIndex
db = self._makeDB()
options = self._makeOptions(file=db._file_name,
gzip=False,
......@@ -542,6 +550,15 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
self.assertEqual(open(datfile).read(),
'%s 0 %d %s\n' %
(target, len(original), md5(original).hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
ndx_info = fsIndex.load(ndxfile)
self.assertEqual(ndx_info['pos'], len(original))
index = ndx_info['index']
pZero = struct.pack(">Q", 0)
pOne = struct.pack(">Q", 1)
self.assertEqual(index.minKey(), pZero)
self.assertEqual(index.maxKey(), pOne)
class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
......@@ -576,7 +593,9 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
self._callFUT, options, 0, repofiles)
def test_no_changes(self):
import struct
from ZODB.scripts.repozo import gen_filename
from ZODB.fsIndex import fsIndex
db = self._makeDB()
oldpos = db.pos
options = self._makeOptions(file=db._file_name,
......@@ -603,9 +622,20 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
self.assertEqual(open(datfile).read(),
'%s %d %d %s\n' %
(target, oldpos, oldpos, md5('').hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
ndx_info = fsIndex.load(ndxfile)
self.assertEqual(ndx_info['pos'], oldpos)
index = ndx_info['index']
pZero = struct.pack(">Q", 0)
pOne = struct.pack(">Q", 1)
self.assertEqual(index.minKey(), pZero)
self.assertEqual(index.maxKey(), pOne)
def test_w_changes(self):
import struct
from ZODB.scripts.repozo import gen_filename
from ZODB.fsIndex import fsIndex
db = self._makeDB()
oldpos = db.pos
options = self._makeOptions(file=db._file_name,
......@@ -637,7 +667,108 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
'%s %d %d %s\n' %
(target, oldpos, newpos,
md5(increment).hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
ndx_info = fsIndex.load(ndxfile)
self.assertEqual(ndx_info['pos'], newpos)
index = ndx_info['index']
pZero = struct.pack(">Q", 0)
self.assertEqual(index.minKey(), pZero)
self.assertEqual(index.maxKey(), db.maxkey)
class Test_do_recover(OptionsTestBase, unittest.TestCase):
def _callFUT(self, options):
from ZODB.scripts.repozo import do_recover
return do_recover(options)
def _makeFile(self, hour, min, sec, ext, text=None):
# call _makeOptions first!
name = '2010-05-14-%02d-%02d-%02d%s' % (hour, min, sec, ext)
if text is None:
text = name
fqn = os.path.join(self._repository_directory, name)
f = open(fqn, 'wb')
f.write(text)
f.flush()
f.close()
return fqn
def test_no_files(self):
from ZODB.scripts.repozo import NoFiles
options = self._makeOptions(date=None,
test_now=(2010, 5, 15, 13, 30, 57))
self.assertRaises(NoFiles, self._callFUT, options)
def test_no_files_before_explicit_date(self):
from ZODB.scripts.repozo import NoFiles
options = self._makeOptions(date='2010-05-13-13-30-57')
files = []
for h, m, s, e in [(2, 13, 14, '.fs'),
(2, 13, 14, '.dat'),
(3, 14, 15, '.deltafs'),
(4, 14, 15, '.deltafs'),
(5, 14, 15, '.deltafs'),
(12, 13, 14, '.fs'),
(12, 13, 14, '.dat'),
(13, 14, 15, '.deltafs'),
(14, 15, 16, '.deltafs'),
]:
files.append(self._makeFile(h, m, s, e))
self.assertRaises(NoFiles, self._callFUT, options)
def test_w_full_backup_latest_no_index(self):
import tempfile
dd = self._data_directory = tempfile.mkdtemp()
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.fs', 'BBB')
self._callFUT(options)
self.assertEqual(open(output, 'rb').read(), 'BBB')
def test_w_full_backup_latest_index(self):
import tempfile
dd = self._data_directory = tempfile.mkdtemp()
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.fs', 'BBB')
self._makeFile(4, 5, 6, '.index', 'CCC')
self._callFUT(options)
self.assertEqual(open(output, 'rb').read(), 'BBB')
self.assertEqual(open(index, 'rb').read(), 'CCC')
def test_w_incr_backup_latest_no_index(self):
import tempfile
dd = self._data_directory = tempfile.mkdtemp()
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBB')
self._callFUT(options)
self.assertEqual(open(output, 'rb').read(), 'AAABBB')
def test_w_incr_backup_latest_index(self):
import tempfile
dd = self._data_directory = tempfile.mkdtemp()
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBB')
self._makeFile(4, 5, 6, '.index', 'CCC')
self._callFUT(options)
self.assertEqual(open(output, 'rb').read(), 'AAABBB')
self.assertEqual(open(index, 'rb').read(), 'CCC')
class MonteCarloTests(unittest.TestCase):
......@@ -754,5 +885,9 @@ def test_suite():
unittest.makeSuite(Test_delete_old_backups),
unittest.makeSuite(Test_do_full_backup),
unittest.makeSuite(Test_do_incremental_backup),
#unittest.makeSuite(Test_do_backup), #TODO
unittest.makeSuite(Test_do_recover),
# N.B.: this test take forever to run (~40sec on a fast laptop),
# *and* it is non-deterministic.
unittest.makeSuite(MonteCarloTests),
])
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment