Commit 275d2c0e authored by ben's avatar ben

Changed for 0.7.3 release


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@48 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent 9d159ffa
New in v0.7.3 (2002/04/29)
--------------------------
Fixed broken remote operation in v0.7.2 by applying (a variant of)
Daniel Robbins' patch. Also fixed associated bug in test set.
Fixed bug recognizing --[include|exclude]-filelist-stdin options, and
IndexError bug reading some filelists.
--force is no longer necessary if the target directory is empty.
--include/--exclude/etc now work for restoring as they do for backing up.
Raised verbosity level for traceback output - if long log error
messages are annoying you, set verbosity to 2. Will come up with a
better logging system later.
May have fixed a problem encountered by Matthew Farrellee and Kevin
Spicer wherein the _session_info_list information was stored on the
wrong computer. This could cause rdiff-backup to fail when running
after another backup that failed for a different reason. May backport
this fix to 0.6.0 later.
May have fixed a problem also noticed by Matthew Farrellee which can
cause rdiff-backup to exit when a directory changes into into a
non-directory file while rdiff-backup is processing the directory.
(May also apply to 0.6.0).
Fixed a bug noticed by Jamie Heilman where restoring could fail if a
recent rdiff-backup process which produced the backup set was aborted
while processing a new directory. (May also apply to 0.6.0)
New in v0.7.2 (2002/04/11)
--------------------------
......
Accept a list of files??
Security audit
Don't produce stack trace which looks like crash/include file name in
logging stats
Add to above Dean Gaudet's suggestion: make errors look prettier (like tar).
Add new restore option - bring directory to state it was in, say, 5
days ago, etc.
Michael S. Muegel suggestion: "--char-translate source-char
replacement-string" for use between windows/unix conversions, e.g. ':'
to _colon_. Also distinguish new vs changed update in lvl 5 logging.
Bugs:
Jamie Heilman's race condition
\ No newline at end of file
......@@ -180,72 +180,4 @@ class DestructiveStepping:
lambda dsrpath, x, y: dsrpath.write_changes(),
initial_state)
def isexcluded(dsrp, source):
"""Return true if given DSRPath is excluded/ignored
If source = 1, treat as source file, otherwise treat as
destination file.
"""
return None # this code is for the test suites only, use Select instead
if Globals.exclude_device_files and dsrp.isdev(): return 1
if source: exclude_regexps = Globals.exclude_regexps
else: exclude_regexps = Globals.exclude_mirror_regexps
for regexp in exclude_regexps:
if regexp.match(dsrp.path):
Log("Excluding %s" % dsrp.path, 6)
return 1
return None
def Iterate_from(baserp, source, starting_index = None):
"""Iterate dsrps from baserp, skipping any matching exclude_regexps
includes only dsrps with indicies greater than starting_index
if starting_index is not None.
"""
def helper_starting_from(dsrpath):
"""Like helper, but only start iterating after starting_index"""
if dsrpath.index > starting_index:
# Past starting_index, revert to normal helper
for dsrp in helper(dsrpath): yield dsrp
elif dsrpath.index == starting_index[:len(dsrpath.index)]:
# May encounter starting index on this branch
if (not DestructiveStepping.isexcluded(dsrpath, source) and
not DestructiveStepping.initialize(dsrpath, source)):
if dsrpath.isdir():
dir_listing = dsrpath.listdir()
dir_listing.sort()
for filename in dir_listing:
for dsrp in helper_starting_from(
dsrpath.append(filename)):
yield dsrp
def helper(dsrpath):
if (not DestructiveStepping.isexcluded(dsrpath, source) and
not DestructiveStepping.initialize(dsrpath, source)):
yield dsrpath
if dsrpath.isdir():
dir_listing = dsrpath.listdir()
dir_listing.sort()
for filename in dir_listing:
for dsrp in helper(dsrpath.append(filename)):
yield dsrp
base_dsrpath = DSRPath(baserp.conn, baserp.base,
baserp.index, baserp.data)
if starting_index is None: return helper(base_dsrpath)
else: return helper_starting_from(base_dsrpath)
def Iterate_with_Finalizer(baserp, source):
"""Like Iterate_from, but finalize each dsrp afterwards"""
finalize = DestructiveStepping.Finalizer()
for dsrp in DestructiveStepping.Iterate_from(baserp, source):
yield dsrp
finalize(dsrp)
finalize.getresult()
MakeStatic(DestructiveStepping)
#!/usr/bin/env python
#
# rdiff-backup -- Mirror files while keeping incremental changes
# Version 0.7.2 released April 11, 2002
# Version 0.7.3 released April 29, 2002
# Copyright (C) 2001, 2002 Ben Escoto <bescoto@stanford.edu>
#
# This program is licensed under the GNU General Public License (GPL).
......@@ -14,6 +14,6 @@
# bugs or have any suggestions.
from __future__ import nested_scopes, generators
import os, stat, time, sys, getopt, re, cPickle, types, shutil, sha, marshal, traceback, popen2, tempfile, gzip
import os, stat, time, sys, getopt, re, cPickle, types, shutil, sha, marshal, traceback, popen2, tempfile, gzip, UserList
......@@ -73,6 +73,9 @@ class HighLevel:
if not isinstance(target_base, DSRPath):
target_base = DSRPath(target_base.conn, target_base.base,
target_base.index, target_base.data)
if not isinstance(mirror_base, DSRPath):
mirror_base = DSRPath(mirror_base.conn, mirror_base.base,
mirror_base.index, mirror_base.data)
Restore.RestoreRecursive(rest_time, mirror_base, rel_index,
baseinc_tup, target_base)
......@@ -272,9 +275,9 @@ class HLDestinationStruct:
try: dsrp = cls.check_skip_error(error_checked, dsrp)
except StopIteration: break
SaveState.checkpoint_inc_backup(ITR, finalizer, dsrp)
cls.check_skip_error(ITR.getresult, dsrp)
cls.check_skip_error(finalizer.getresult, dsrp)
except: cls.handle_last_error(dsrp, finalizer, ITR)
ITR.getresult()
finalizer.getresult()
if Globals.preserve_hardlinks: Hardlink.final_writedata()
SaveState.checkpoint_remove()
......@@ -288,6 +291,7 @@ class HLDestinationStruct:
(exp[0] in [2, # Means that a file is missing
5, # Reported by docv (see list)
13, # Permission denied IOError
20, # Means a directory changed to non-dir
26] # Requested by Campbell (see list) -
# happens on some NT systems
))):
......
......@@ -137,8 +137,8 @@ class Logger:
exc_info = sys.exc_info()
logging_func("Exception %s raised of class %s" %
(exc_info[1], exc_info[0]), 2)
logging_func("".join(traceback.format_tb(exc_info[2])), 2)
(exc_info[1], exc_info[0]), 3)
logging_func("".join(traceback.format_tb(exc_info[2])), 3)
Log = Logger()
......@@ -20,7 +20,6 @@ class Restore:
and rptarget is the rpath that will be written with the restored file.
"""
inclist = Restore.sortincseq(rest_time, inclist)
if not inclist and not (rpbase and rpbase.lstat()):
return # no increments were applicable
Log("Restoring %s with increments %s to %s" %
......@@ -58,7 +57,7 @@ class Restore:
i = i+1
incpairs = incpairs[:i+1]
# Return increments in reversed order
# Return increments in reversed order (latest first)
incpairs.reverse()
return map(lambda pair: pair[1], incpairs)
......@@ -106,44 +105,106 @@ class Restore:
"""
assert isinstance(target_base, DSRPath)
collated = RORPIter.CollateIterators(
DestructiveStepping.Iterate_from(mirror_base, None),
Restore.yield_inc_tuples(baseinc_tup))
baseinc_tup = IndexedTuple(baseinc_tup.index, (baseinc_tup[0],
Restore.sortincseq(rest_time, baseinc_tup[1])))
collated = Restore.yield_collated_tuples((), mirror_base,
baseinc_tup, target_base, rest_time)
mirror_finalizer = DestructiveStepping.Finalizer()
target_finalizer = DestructiveStepping.Finalizer()
for mirror, inc_tup in collated:
if not inc_tup:
inclist = []
target = target_base.new_index(mirror.index)
else:
inclist = inc_tup[1]
target = target_base.new_index(inc_tup.index)
for mirror, inc_tup, target in collated:
inclist = inc_tup and inc_tup[1] or []
DestructiveStepping.initialize(target, None)
Restore.RestoreFile(rest_time, mirror, mirror_rel_index,
inclist, target)
target_finalizer(target)
if mirror: mirror_finalizer(mirror)
target_finalizer.getresult()
mirror_finalizer.getresult()
mirror_finalizer.getresult()
def yield_collated_tuples(index, mirrorrp, inc_tup, target, rest_time):
"""Iterate collated tuples starting with given args
def yield_inc_tuples(inc_tuple):
"""Iterate increment tuples starting with inc_tuple
A collated tuple is an IndexedTuple (mirrorrp, inc_tuple, target).
inc_tuple is itself an IndexedTuple. target is an rpath where
the created file should go.
An increment tuple is an IndexedTuple (pair). The first will
be the rpath of a directory, and the second is a list of all
the increments associated with that directory. If there are
increments that do not correspond to a directory, the first
element will be None. All the rpaths involved correspond to
files in the increment directory.
In this case the "mirror" directory is treated as the source,
and we are actually copying stuff onto what Select considers
the source directory.
"""
oldindex, rpath = inc_tuple.index, inc_tuple[0]
yield inc_tuple
if not rpath or not rpath.isdir(): return
select_result = Globals.select_mirror.Select(target)
if select_result == 0: return
inc_base = inc_tup and inc_tup[0]
if mirrorrp and (not Globals.select_source.Select(mirrorrp) or
DestructiveStepping.initialize(mirrorrp, None)):
mirrorrp = None
collated_tuple = IndexedTuple(index, (mirrorrp, inc_tup, target))
if mirrorrp and mirrorrp.isdir() or inc_base and inc_base.isdir():
depth_tuples = Restore.yield_collated_tuples_dir(index, mirrorrp,
inc_tup, target, rest_time)
else: depth_tuples = None
if select_result == 1:
yield collated_tuple
if depth_tuples:
for tup in depth_tuples: yield tup
elif select_result == 2:
if depth_tuples:
try: first = depth_tuples.next()
except StopIteration: return # no tuples found inside, skip
yield collated_tuple
yield first
for tup in depth_tuples: yield tup
def yield_collated_tuples_dir(index, mirrorrp, inc_tup, target, rest_time):
"""Yield collated tuples from inside given args"""
if not Restore.check_dir_exists(mirrorrp, inc_tup): return
if mirrorrp and mirrorrp.isdir():
dirlist = mirrorrp.listdir()
dirlist.sort()
mirror_list = map(lambda x: IndexedTuple(x, (mirrorrp.append(x),)),
dirlist)
else: mirror_list = []
inc_list = Restore.get_inc_tuples(inc_tup, rest_time)
for indexed_tup in RORPIter.CollateIterators(iter(mirror_list),
iter(inc_list)):
filename = indexed_tup.index
new_inc_tup = indexed_tup[1]
new_mirrorrp = indexed_tup[0] and indexed_tup[0][0]
for new_col_tup in Restore.yield_collated_tuples(
index + (filename,), new_mirrorrp, new_inc_tup,
target.append(filename), rest_time): yield new_col_tup
def check_dir_exists(mirrorrp, inc_tuple):
"""Return true if target should be a directory"""
if inc_tuple and inc_tuple[1]:
# Incs say dir if last (earliest) one is a dir increment
return inc_tuple[1][-1].getinctype() == "dir"
elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror
else: return None
def get_inc_tuples(inc_tuple, rest_time):
"""Return list of inc tuples in given rpath of increment directory
An increment tuple is an IndexedTuple (pair). The second
element in the pair is a list of increments with the same
base. The first element is the rpath of the corresponding
base. Usually this base is a directory, otherwise it is
ignored. If there are increments whose corresponding base
doesn't exist, the first element will be None. All the rpaths
involved correspond to files in the increment directory.
"""
if not inc_tuple: return []
oldindex, incdir = inc_tuple.index, inc_tuple[0]
if not incdir.isdir(): return []
inc_list_dict = {} # Index tuple lists by index
dirlist = rpath.listdir()
dirlist = incdir.listdir()
def affirm_dict_indexed(index):
"""Make sure the inc_list_dict has given index"""
......@@ -152,7 +213,7 @@ class Restore:
def add_to_dict(filename):
"""Add filename to the inc tuple dictionary"""
rp = rpath.append(filename)
rp = incdir.append(filename)
if rp.isincfile():
basename = rp.getincbase_str()
affirm_dict_indexed(basename)
......@@ -161,20 +222,22 @@ class Restore:
affirm_dict_indexed(filename)
inc_list_dict[filename][0] = rp
def list2tuple(index):
"""Return inc_tuple version of dictionary entry by index"""
inclist = inc_list_dict[index]
if not inclist[1]: return None # no increments, so ignore
return IndexedTuple(oldindex + (index,), inclist)
def index2tuple(index):
"""Return inc_tuple version of dictionary entry by index
Also runs sortincseq to sort the increments and remove
irrelevant ones. This is done here so we can avoid
descending into .missing directories.
"""
incbase, inclist = inc_list_dict[index]
inclist = Restore.sortincseq(rest_time, inclist)
if not inclist: return None # no relevant increments, so ignore
return IndexedTuple(index, (incbase, inclist))
for filename in dirlist: add_to_dict(filename)
keys = inc_list_dict.keys()
keys.sort()
for index in keys:
new_inc_tuple = list2tuple(index)
if not new_inc_tuple: continue
elif new_inc_tuple[0]: # corresponds to directory
for i in Restore.yield_inc_tuples(new_inc_tuple): yield i
else: yield new_inc_tuple
return filter(lambda x: x, map(index2tuple, keys))
MakeStatic(Restore)
......@@ -394,6 +394,7 @@ class Resume:
specified.
"""
assert Globals.isbackup_writer
if Time.prevtime > later_than: return Time.prevtime # usual case
for si in cls.get_sis_covering_index(index):
......@@ -416,6 +417,7 @@ class Resume:
def SetSessionInfo(cls):
"""Read data directory and initialize _session_info"""
assert Globals.isbackup_writer
silist = []
rp_quad_dict = cls.group_rps_by_time(cls.get_relevant_rps())
times = rp_quad_dict.keys()
......
execfile("robust.py")
from __future__ import generators
import tempfile
import tempfile, UserList
#######################################################################
#
......@@ -224,7 +224,7 @@ MakeStatic(RORPIter)
class IndexedTuple:
class IndexedTuple(UserList.UserList):
"""Like a tuple, but has .index
This is used by CollateIterator above, and can be passed to the
......@@ -238,9 +238,15 @@ class IndexedTuple:
def __len__(self): return len(self.data)
def __getitem__(self, key):
"""This only works for numerical keys (faster that way)"""
"""This only works for numerical keys (easier this way)"""
return self.data[key]
def __lt__(self, other): return self.__cmp__(other) == -1
def __le__(self, other): return self.__cmp__(other) != 1
def __ne__(self, other): return not self.__eq__(other)
def __gt__(self, other): return self.__cmp__(other) == 1
def __ge__(self, other): return self.__cmp__(other) != -1
def __cmp__(self, other):
assert isinstance(other, IndexedTuple)
if self.index < other.index: return -1
......@@ -255,6 +261,4 @@ class IndexedTuple:
else: return None
def __str__(self):
assert len(self.data) == 2
return "(%s, %s).%s" % (str(self.data[0]), str(self.data[1]),
str(self.index))
return "(%s).%s" % (", ".join(map(str, self.data)), self.index)
......@@ -150,7 +150,7 @@ class Select:
finalize.getresult()
def Select(self, dsrp):
"""Run through the selection functions and return dominant value"""
"""Run through the selection functions and return dominant val 0/1/2"""
for sf in self.selection_functions:
result = sf(dsrp)
if result is not None: return result
......@@ -186,7 +186,7 @@ class Select:
1, arg[0]))
elif opt == "--include-regexp":
self.add_selection_func(self.regexp_get_sf(arg, 1))
else: assert 0, "Bad option %s" % opt
else: assert 0, "Bad selection option %s" % opt
except SelectError, e: self.parse_catch_error(e)
self.parse_last_excludes()
......@@ -254,8 +254,8 @@ probably isn't what you meant.""" %
i = [0] # We have to put index in list because of stupid scoping rules
def selection_function(dsrp):
if i[0] > len(tuple_list): return inc_default
while 1:
if i[0] >= len(tuple_list): return None
include, move_on = \
self.filelist_pair_match(dsrp, tuple_list[i[0]])
if move_on:
......@@ -313,9 +313,9 @@ probably isn't what you meant.""" %
def filelist_pair_match(self, dsrp, pair):
"""Matches a filelist tuple against a dsrp
Returns a pair (include, move_on, definitive). include is
None if the tuple doesn't match either way, and 0/1 if the
tuple excludes or includes the dsrp.
Returns a pair (include, move_on). include is None if the
tuple doesn't match either way, and 0/1 if the tuple excludes
or includes the dsrp.
move_on is true if the tuple cannot match a later index, and
so we should move on to the next tuple in the index.
......
......@@ -180,72 +180,4 @@ class DestructiveStepping:
lambda dsrpath, x, y: dsrpath.write_changes(),
initial_state)
def isexcluded(dsrp, source):
"""Return true if given DSRPath is excluded/ignored
If source = 1, treat as source file, otherwise treat as
destination file.
"""
return None # this code is for the test suites only, use Select instead
if Globals.exclude_device_files and dsrp.isdev(): return 1
if source: exclude_regexps = Globals.exclude_regexps
else: exclude_regexps = Globals.exclude_mirror_regexps
for regexp in exclude_regexps:
if regexp.match(dsrp.path):
Log("Excluding %s" % dsrp.path, 6)
return 1
return None
def Iterate_from(baserp, source, starting_index = None):
"""Iterate dsrps from baserp, skipping any matching exclude_regexps
includes only dsrps with indicies greater than starting_index
if starting_index is not None.
"""
def helper_starting_from(dsrpath):
"""Like helper, but only start iterating after starting_index"""
if dsrpath.index > starting_index:
# Past starting_index, revert to normal helper
for dsrp in helper(dsrpath): yield dsrp
elif dsrpath.index == starting_index[:len(dsrpath.index)]:
# May encounter starting index on this branch
if (not DestructiveStepping.isexcluded(dsrpath, source) and
not DestructiveStepping.initialize(dsrpath, source)):
if dsrpath.isdir():
dir_listing = dsrpath.listdir()
dir_listing.sort()
for filename in dir_listing:
for dsrp in helper_starting_from(
dsrpath.append(filename)):
yield dsrp
def helper(dsrpath):
if (not DestructiveStepping.isexcluded(dsrpath, source) and
not DestructiveStepping.initialize(dsrpath, source)):
yield dsrpath
if dsrpath.isdir():
dir_listing = dsrpath.listdir()
dir_listing.sort()
for filename in dir_listing:
for dsrp in helper(dsrpath.append(filename)):
yield dsrp
base_dsrpath = DSRPath(baserp.conn, baserp.base,
baserp.index, baserp.data)
if starting_index is None: return helper(base_dsrpath)
else: return helper_starting_from(base_dsrpath)
def Iterate_with_Finalizer(baserp, source):
"""Like Iterate_from, but finalize each dsrp afterwards"""
finalize = DestructiveStepping.Finalizer()
for dsrp in DestructiveStepping.Iterate_from(baserp, source):
yield dsrp
finalize(dsrp)
finalize.getresult()
MakeStatic(DestructiveStepping)
......@@ -8,7 +8,7 @@ import re, os
class Globals:
# The current version of rdiff-backup
version = "0.7.2"
version = "0.7.3"
# If this is set, use this value in seconds as the current time
# instead of reading it from the clock.
......
#!/usr/bin/env python
#
# rdiff-backup -- Mirror files while keeping incremental changes
# Version 0.7.2 released April 11, 2002
# Version 0.7.3 released April 29, 2002
# Copyright (C) 2001, 2002 Ben Escoto <bescoto@stanford.edu>
#
# This program is licensed under the GNU General Public License (GPL).
......@@ -14,6 +14,6 @@
# bugs or have any suggestions.
from __future__ import nested_scopes, generators
import os, stat, time, sys, getopt, re, cPickle, types, shutil, sha, marshal, traceback, popen2, tempfile, gzip
import os, stat, time, sys, getopt, re, cPickle, types, shutil, sha, marshal, traceback, popen2, tempfile, gzip, UserList
......@@ -73,6 +73,9 @@ class HighLevel:
if not isinstance(target_base, DSRPath):
target_base = DSRPath(target_base.conn, target_base.base,
target_base.index, target_base.data)
if not isinstance(mirror_base, DSRPath):
mirror_base = DSRPath(mirror_base.conn, mirror_base.base,
mirror_base.index, mirror_base.data)
Restore.RestoreRecursive(rest_time, mirror_base, rel_index,
baseinc_tup, target_base)
......@@ -272,9 +275,9 @@ class HLDestinationStruct:
try: dsrp = cls.check_skip_error(error_checked, dsrp)
except StopIteration: break
SaveState.checkpoint_inc_backup(ITR, finalizer, dsrp)
cls.check_skip_error(ITR.getresult, dsrp)
cls.check_skip_error(finalizer.getresult, dsrp)
except: cls.handle_last_error(dsrp, finalizer, ITR)
ITR.getresult()
finalizer.getresult()
if Globals.preserve_hardlinks: Hardlink.final_writedata()
SaveState.checkpoint_remove()
......@@ -288,6 +291,7 @@ class HLDestinationStruct:
(exp[0] in [2, # Means that a file is missing
5, # Reported by docv (see list)
13, # Permission denied IOError
20, # Means a directory changed to non-dir
26] # Requested by Campbell (see list) -
# happens on some NT systems
))):
......
......@@ -137,8 +137,8 @@ class Logger:
exc_info = sys.exc_info()
logging_func("Exception %s raised of class %s" %
(exc_info[1], exc_info[0]), 2)
logging_func("".join(traceback.format_tb(exc_info[2])), 2)
(exc_info[1], exc_info[0]), 3)
logging_func("".join(traceback.format_tb(exc_info[2])), 3)
Log = Logger()
......@@ -53,7 +53,8 @@ class Main:
elif opt == "--exclude-filelist":
self.select_opts.append((opt, (arg, sel_fl(arg))))
elif opt == "--exclude-filelist-stdin":
self.select_opts.append((opt, ("standard input", sys.stdin)))
self.select_opts.append(("--exclude-filelist",
("standard input", sys.stdin)))
elif opt == "--exclude-mirror":
self.select_mirror_opts.append(("--exclude", arg))
elif opt == "--exclude-regexp": self.select_opts.append((opt, arg))
......@@ -62,7 +63,8 @@ class Main:
elif opt == "--include-filelist":
self.select_opts.append((opt, (arg, sel_fl(arg))))
elif opt == "--include-filelist-stdin":
self.select_opts.append((opt, ("standard input", sys.stdin)))
self.select_opts.append(("--include-filelist",
("standard input", sys.stdin)))
elif opt == "--include-regexp":
self.select_opts.append((opt, arg))
elif opt == "-l" or opt == "--list-increments":
......@@ -125,7 +127,7 @@ class Main:
sys.exit(1)
def misc_setup(self, rps):
"""Set default change ownership flag, umask, Select objects"""
"""Set default change ownership flag, umask, relay regexps"""
if ((len(rps) == 2 and rps[1].conn.os.getuid() == 0) or
(len(rps) < 2 and os.getuid() == 0)):
# Allow change_ownership if destination connection is root
......@@ -134,10 +136,9 @@ class Main:
for rp in rps: rp.setdata() # Update with userinfo
os.umask(077)
rps[0].conn.Globals.set_select(1, rps[0], self.select_opts)
if len(rps) == 2:
rps[1].conn.Globals.set_select(None, rps[1],
self.select_mirror_opts)
# This is because I originally didn't think compiled regexps
# could be pickled, and so must be compiled on remote side.
Globals.postset_regexp('no_compression_regexp',
Globals.no_compression_regexp_string)
......@@ -190,9 +191,10 @@ rdiff-backup with the --force option if you want to mirror anyway.""" %
def Backup(self, rpin, rpout):
"""Backup, possibly incrementally, src_path to dest_path."""
SetConnections.BackupInitConnections(rpin.conn, rpout.conn)
self.backup_init_select(rpin, rpout)
self.backup_init_dirs(rpin, rpout)
Time.setcurtime(Globals.current_time)
RSI = Resume.ResumeCheck()
RSI = Globals.backup_writer.Resume.ResumeCheck()
if self.prevtime:
Time.setprevtime(self.prevtime)
SaveState.init_filenames(1)
......@@ -202,6 +204,11 @@ rdiff-backup with the --force option if you want to mirror anyway.""" %
HighLevel.Mirror(rpin, rpout, 1, RSI)
self.backup_touch_curmirror(rpin, rpout)
def backup_init_select(self, rpin, rpout):
"""Create Select objects on source and dest connections"""
rpin.conn.Globals.set_select(1, rpin, self.select_opts)
rpout.conn.Globals.set_select(None, rpout, self.select_mirror_opts)
def backup_init_dirs(self, rpin, rpout):
"""Make sure rpin and rpout are valid, init data dir and logging"""
if rpout.lstat() and not rpout.isdir():
......@@ -223,8 +230,11 @@ rdiff-backup with the --force option if you want to mirror anyway.""" %
"increments"))
self.prevtime = self.backup_get_mirrortime()
if rpout.lstat() and not self.datadir.lstat() and not self.force:
Log.FatalError(
if rpout.lstat():
if rpout.isdir() and not rpout.listdir(): # rpout is empty dir
rpout.chmod(0700) # just make sure permissions aren't too lax
elif not self.datadir.lstat() and not self.force:
Log.FatalError(
"""Destination directory %s exists, but does not look like a
rdiff-backup directory. Running rdiff-backup like this could mess up
what is currently in it. If you want to overwrite it, run
......@@ -292,6 +302,7 @@ went wrong during your last backup? Using """ + mirrorrps[-1].path, 2)
"""Main restoring function - take src_path to dest_path"""
Log("Starting Restore", 5)
rpin, rpout = self.restore_check_paths(src_rp, dest_rp)
self.restore_init_select(rpin, rpout)
inc_tup = self.restore_get_inctup(rpin)
mirror_base, mirror_rel_index = self.restore_get_mirror(rpin)
rtime = Time.stringtotime(rpin.getinctime())
......@@ -315,6 +326,17 @@ Try restoring from an increment file (the filenames look like
"Will not overwrite." % rpout.path)
return rpin, rpout
def restore_init_select(self, rpin, rpout):
"""Initialize Select
Unlike the backup selections, here they are on the local
connection, because the backup operation is pipelined in a way
the restore operation isn't.
"""
Globals.set_select(1, rpin, self.select_mirror_opts)
Globals.set_select(None, rpout, self.select_opts)
def restore_get_inctup(self, rpin):
"""Return increment tuple (incrp, list of incs)"""
rpin_dir = rpin.dirsplit()[0]
......
......@@ -20,7 +20,6 @@ class Restore:
and rptarget is the rpath that will be written with the restored file.
"""
inclist = Restore.sortincseq(rest_time, inclist)
if not inclist and not (rpbase and rpbase.lstat()):
return # no increments were applicable
Log("Restoring %s with increments %s to %s" %
......@@ -58,7 +57,7 @@ class Restore:
i = i+1
incpairs = incpairs[:i+1]
# Return increments in reversed order
# Return increments in reversed order (latest first)
incpairs.reverse()
return map(lambda pair: pair[1], incpairs)
......@@ -106,44 +105,106 @@ class Restore:
"""
assert isinstance(target_base, DSRPath)
collated = RORPIter.CollateIterators(
DestructiveStepping.Iterate_from(mirror_base, None),
Restore.yield_inc_tuples(baseinc_tup))
baseinc_tup = IndexedTuple(baseinc_tup.index, (baseinc_tup[0],
Restore.sortincseq(rest_time, baseinc_tup[1])))
collated = Restore.yield_collated_tuples((), mirror_base,
baseinc_tup, target_base, rest_time)
mirror_finalizer = DestructiveStepping.Finalizer()
target_finalizer = DestructiveStepping.Finalizer()
for mirror, inc_tup in collated:
if not inc_tup:
inclist = []
target = target_base.new_index(mirror.index)
else:
inclist = inc_tup[1]
target = target_base.new_index(inc_tup.index)
for mirror, inc_tup, target in collated:
inclist = inc_tup and inc_tup[1] or []
DestructiveStepping.initialize(target, None)
Restore.RestoreFile(rest_time, mirror, mirror_rel_index,
inclist, target)
target_finalizer(target)
if mirror: mirror_finalizer(mirror)
target_finalizer.getresult()
mirror_finalizer.getresult()
mirror_finalizer.getresult()
def yield_collated_tuples(index, mirrorrp, inc_tup, target, rest_time):
"""Iterate collated tuples starting with given args
def yield_inc_tuples(inc_tuple):
"""Iterate increment tuples starting with inc_tuple
A collated tuple is an IndexedTuple (mirrorrp, inc_tuple, target).
inc_tuple is itself an IndexedTuple. target is an rpath where
the created file should go.
An increment tuple is an IndexedTuple (pair). The first will
be the rpath of a directory, and the second is a list of all
the increments associated with that directory. If there are
increments that do not correspond to a directory, the first
element will be None. All the rpaths involved correspond to
files in the increment directory.
In this case the "mirror" directory is treated as the source,
and we are actually copying stuff onto what Select considers
the source directory.
"""
oldindex, rpath = inc_tuple.index, inc_tuple[0]
yield inc_tuple
if not rpath or not rpath.isdir(): return
select_result = Globals.select_mirror.Select(target)
if select_result == 0: return
inc_base = inc_tup and inc_tup[0]
if mirrorrp and (not Globals.select_source.Select(mirrorrp) or
DestructiveStepping.initialize(mirrorrp, None)):
mirrorrp = None
collated_tuple = IndexedTuple(index, (mirrorrp, inc_tup, target))
if mirrorrp and mirrorrp.isdir() or inc_base and inc_base.isdir():
depth_tuples = Restore.yield_collated_tuples_dir(index, mirrorrp,
inc_tup, target, rest_time)
else: depth_tuples = None
if select_result == 1:
yield collated_tuple
if depth_tuples:
for tup in depth_tuples: yield tup
elif select_result == 2:
if depth_tuples:
try: first = depth_tuples.next()
except StopIteration: return # no tuples found inside, skip
yield collated_tuple
yield first
for tup in depth_tuples: yield tup
def yield_collated_tuples_dir(index, mirrorrp, inc_tup, target, rest_time):
"""Yield collated tuples from inside given args"""
if not Restore.check_dir_exists(mirrorrp, inc_tup): return
if mirrorrp and mirrorrp.isdir():
dirlist = mirrorrp.listdir()
dirlist.sort()
mirror_list = map(lambda x: IndexedTuple(x, (mirrorrp.append(x),)),
dirlist)
else: mirror_list = []
inc_list = Restore.get_inc_tuples(inc_tup, rest_time)
for indexed_tup in RORPIter.CollateIterators(iter(mirror_list),
iter(inc_list)):
filename = indexed_tup.index
new_inc_tup = indexed_tup[1]
new_mirrorrp = indexed_tup[0] and indexed_tup[0][0]
for new_col_tup in Restore.yield_collated_tuples(
index + (filename,), new_mirrorrp, new_inc_tup,
target.append(filename), rest_time): yield new_col_tup
def check_dir_exists(mirrorrp, inc_tuple):
"""Return true if target should be a directory"""
if inc_tuple and inc_tuple[1]:
# Incs say dir if last (earliest) one is a dir increment
return inc_tuple[1][-1].getinctype() == "dir"
elif mirrorrp: return mirrorrp.isdir() # if no incs, copy mirror
else: return None
def get_inc_tuples(inc_tuple, rest_time):
"""Return list of inc tuples in given rpath of increment directory
An increment tuple is an IndexedTuple (pair). The second
element in the pair is a list of increments with the same
base. The first element is the rpath of the corresponding
base. Usually this base is a directory, otherwise it is
ignored. If there are increments whose corresponding base
doesn't exist, the first element will be None. All the rpaths
involved correspond to files in the increment directory.
"""
if not inc_tuple: return []
oldindex, incdir = inc_tuple.index, inc_tuple[0]
if not incdir.isdir(): return []
inc_list_dict = {} # Index tuple lists by index
dirlist = rpath.listdir()
dirlist = incdir.listdir()
def affirm_dict_indexed(index):
"""Make sure the inc_list_dict has given index"""
......@@ -152,7 +213,7 @@ class Restore:
def add_to_dict(filename):
"""Add filename to the inc tuple dictionary"""
rp = rpath.append(filename)
rp = incdir.append(filename)
if rp.isincfile():
basename = rp.getincbase_str()
affirm_dict_indexed(basename)
......@@ -161,20 +222,22 @@ class Restore:
affirm_dict_indexed(filename)
inc_list_dict[filename][0] = rp
def list2tuple(index):
"""Return inc_tuple version of dictionary entry by index"""
inclist = inc_list_dict[index]
if not inclist[1]: return None # no increments, so ignore
return IndexedTuple(oldindex + (index,), inclist)
def index2tuple(index):
"""Return inc_tuple version of dictionary entry by index
Also runs sortincseq to sort the increments and remove
irrelevant ones. This is done here so we can avoid
descending into .missing directories.
"""
incbase, inclist = inc_list_dict[index]
inclist = Restore.sortincseq(rest_time, inclist)
if not inclist: return None # no relevant increments, so ignore
return IndexedTuple(index, (incbase, inclist))
for filename in dirlist: add_to_dict(filename)
keys = inc_list_dict.keys()
keys.sort()
for index in keys:
new_inc_tuple = list2tuple(index)
if not new_inc_tuple: continue
elif new_inc_tuple[0]: # corresponds to directory
for i in Restore.yield_inc_tuples(new_inc_tuple): yield i
else: yield new_inc_tuple
return filter(lambda x: x, map(index2tuple, keys))
MakeStatic(Restore)
......@@ -394,6 +394,7 @@ class Resume:
specified.
"""
assert Globals.isbackup_writer
if Time.prevtime > later_than: return Time.prevtime # usual case
for si in cls.get_sis_covering_index(index):
......@@ -416,6 +417,7 @@ class Resume:
def SetSessionInfo(cls):
"""Read data directory and initialize _session_info"""
assert Globals.isbackup_writer
silist = []
rp_quad_dict = cls.group_rps_by_time(cls.get_relevant_rps())
times = rp_quad_dict.keys()
......
execfile("robust.py")
from __future__ import generators
import tempfile
import tempfile, UserList
#######################################################################
#
......@@ -224,7 +224,7 @@ MakeStatic(RORPIter)
class IndexedTuple:
class IndexedTuple(UserList.UserList):
"""Like a tuple, but has .index
This is used by CollateIterator above, and can be passed to the
......@@ -238,9 +238,15 @@ class IndexedTuple:
def __len__(self): return len(self.data)
def __getitem__(self, key):
"""This only works for numerical keys (faster that way)"""
"""This only works for numerical keys (easier this way)"""
return self.data[key]
def __lt__(self, other): return self.__cmp__(other) == -1
def __le__(self, other): return self.__cmp__(other) != 1
def __ne__(self, other): return not self.__eq__(other)
def __gt__(self, other): return self.__cmp__(other) == 1
def __ge__(self, other): return self.__cmp__(other) != -1
def __cmp__(self, other):
assert isinstance(other, IndexedTuple)
if self.index < other.index: return -1
......@@ -255,6 +261,4 @@ class IndexedTuple:
else: return None
def __str__(self):
assert len(self.data) == 2
return "(%s, %s).%s" % (str(self.data[0]), str(self.data[1]),
str(self.index))
return "(%s).%s" % (", ".join(map(str, self.data)), self.index)
......@@ -150,7 +150,7 @@ class Select:
finalize.getresult()
def Select(self, dsrp):
"""Run through the selection functions and return dominant value"""
"""Run through the selection functions and return dominant val 0/1/2"""
for sf in self.selection_functions:
result = sf(dsrp)
if result is not None: return result
......@@ -186,7 +186,7 @@ class Select:
1, arg[0]))
elif opt == "--include-regexp":
self.add_selection_func(self.regexp_get_sf(arg, 1))
else: assert 0, "Bad option %s" % opt
else: assert 0, "Bad selection option %s" % opt
except SelectError, e: self.parse_catch_error(e)
self.parse_last_excludes()
......@@ -254,8 +254,8 @@ probably isn't what you meant.""" %
i = [0] # We have to put index in list because of stupid scoping rules
def selection_function(dsrp):
if i[0] > len(tuple_list): return inc_default
while 1:
if i[0] >= len(tuple_list): return None
include, move_on = \
self.filelist_pair_match(dsrp, tuple_list[i[0]])
if move_on:
......@@ -313,9 +313,9 @@ probably isn't what you meant.""" %
def filelist_pair_match(self, dsrp, pair):
"""Matches a filelist tuple against a dsrp
Returns a pair (include, move_on, definitive). include is
None if the tuple doesn't match either way, and 0/1 if the
tuple excludes or includes the dsrp.
Returns a pair (include, move_on). include is None if the
tuple doesn't match either way, and 0/1 if the tuple excludes
or includes the dsrp.
move_on is true if the tuple cannot match a later index, and
so we should move on to the next tuple in the index.
......
......@@ -177,7 +177,6 @@ information.""" % (exception, remote_cmd))
cls.UpdateGlobal("backup_reader", reading_conn)
cls.UpdateGlobal("backup_writer", writing_conn)
def CloseConnections(cls):
"""Close all connections. Run by client"""
assert not Globals.server
......
......@@ -7,9 +7,12 @@ the server. Otherwise will start the server without a chdir.
"""
execfile("commontest.py")
rbexec("setconnections.py")
import os, sys
#execfile("commontest.py")
#rbexec("setconnections.py")
if len(sys.argv) > 1: os.chdir(sys.argv[1])
PipeConnection(sys.stdin, sys.stdout).Server()
#PipeConnection(sys.stdin, sys.stdout).Server()
os.system("/home/ben/prog/python/rdiff-backup/src/rdiff-backup --server")
......@@ -13,6 +13,10 @@ def rbexec(src_file):
execfile(src_file, globals())
os.chdir(AbsCurdir)
def Myrm(dirstring):
"""Run myrm on given directory string"""
assert not os.system("%s/myrm %s" % (MiscDir, dirstring))
def Make():
"""Make sure the rdiff-backup script in the source dir is up-to-date"""
os.chdir(SourceDir)
......@@ -91,6 +95,7 @@ def InternalMirror(source_local, dest_local, src_dir, dest_dir,
rpin, rpout = SetConnections.InitRPs([src_dir, dest_dir], remote_schema)
_get_main().misc_setup([rpin, rpout])
_get_main().backup_init_select(rpin, rpout)
if not rpout.lstat(): rpout.mkdir()
if checkpointing: # rdiff-backup-data must exist to checkpoint
data_dir = rpout.append("rdiff-backup-data")
......@@ -118,21 +123,20 @@ def InternalRestore(mirror_local, dest_local, mirror_dir, dest_dir, time):
mirror_rp, dest_rp = SetConnections.InitRPs([mirror_dir, dest_dir],
remote_schema)
def get_increment_rp(time):
"""Return increment rp matching time"""
data_rp = mirror_rp.append("rdiff-backup-data")
for filename in data_rp.listdir():
rp = data_rp.append(filename)
if (rp.isincfile() and rp.getincbase_str() == "increments" and
Time.stringtotime(rp.getinctime()) == time):
return rp
assert None, ("No increments.XXX.dir found in directory "
"%s with that time" % data_rp.path)
_get_main().Restore(get_increment_rp(time), dest_rp)
_get_main().Restore(get_increment_rp(mirror_rp, time), dest_rp)
_get_main().cleanup()
def get_increment_rp(mirror_rp, time):
"""Return increment rp matching time in seconds"""
data_rp = mirror_rp.append("rdiff-backup-data")
for filename in data_rp.listdir():
rp = data_rp.append(filename)
if (rp.isincfile() and rp.getincbase_str() == "increments" and
Time.stringtotime(rp.getinctime()) == time):
return rp
assert None, ("No increments.XXX.dir found in directory "
"%s with that time" % data_rp.path)
def _reset_connections(src_rp, dest_rp):
"""Reset some global connection information"""
Globals.isbackup_reader = Globals.isbackup_writer = None
......@@ -148,7 +152,8 @@ def _get_main():
Globals.Main = Main()
return Globals.Main
def CompareRecursive(src_rp, dest_rp, compare_hardlinks = 1):
def CompareRecursive(src_rp, dest_rp, compare_hardlinks = 1,
equality_func = None):
"""Compare src_rp and dest_rp, which can be directories
This only compares file attributes, not the actual data. This
......@@ -182,6 +187,7 @@ def CompareRecursive(src_rp, dest_rp, compare_hardlinks = 1):
dsiter1 = Hardlink.add_rorp_iter(dsiter1, 1)
dsiter2 = Hardlink.add_rorp_iter(dsiter2, None)
result = Iter.equal(dsiter1, dsiter2, 1, hardlink_equal)
elif equality_func: result = Iter.equal(dsiter1, dsiter2, 1, equality_func)
else: result = Iter.equal(dsiter1, dsiter2, 1)
for i in dsiter1: pass # make sure all files processed anyway
......
......@@ -65,12 +65,15 @@ class PathSetter(unittest.TestCase):
print "executing " + cmdstr
assert not os.system(cmdstr)
def runtest(self):
# Deleting previous output
def delete_tmpdirs(self):
"""Remove any temp directories created by previous tests"""
assert not os.system(MiscDir + '/myrm testfiles/output* '
'testfiles/restoretarget* testfiles/vft_out '
'timbar.pyc')
def runtest(self):
self.delete_tmpdirs()
# Backing up increment1
self.exec_rb('testfiles/increment1', 'testfiles/output')
assert CompareRecursive(Local.inc1rp, Local.rpout)
......@@ -147,4 +150,85 @@ class Final(PathSetter):
self.runtest()
class FinalSelection(PathSetter):
"""Test selection options"""
def testSelLocal(self):
"""Quick backup testing a few selection options"""
self.delete_tmpdirs()
# Test --include option
assert not \
os.system(self.rb_schema +
"--current-time 10000 "
"--include testfiles/increment2/various_file_types "
"--exclude '**' "
"testfiles/increment2 testfiles/output")
assert os.lstat("testfiles/output/various_file_types/regular_file")
self.assertRaises(OSError, os.lstat, "testfiles/output/test.py")
# Now try reading list of files
fp = os.popen(self.rb_schema +
"--current-time 20000 "
"--include-filelist-stdin --exclude '**' "
"testfiles/increment2 testfiles/output", "w")
fp.write("""
testfiles/increment2/test.py
testfiles/increment2/changed_dir""")
assert not fp.close()
assert os.lstat("testfiles/output/changed_dir")
assert os.lstat("testfiles/output/test.py")
self.assertRaises(OSError, os.lstat,
"testfiles/output/various_file_types")
self.assertRaises(OSError, os.lstat,
"testfiles/output/changed_dir/foo")
# Test selective restoring
mirror_rp = RPath(Globals.local_connection, "testfiles/output")
restore_filename = get_increment_rp(mirror_rp, 10000).path
assert not os.system(self.rb_schema +
"--include testfiles/restoretarget1/various_file_types/"
"regular_file "
"--exclude '**' " +
restore_filename + " testfiles/restoretarget1")
assert os.lstat("testfiles/restoretarget1/various_file_types/"
"regular_file")
self.assertRaises(OSError, os.lstat, "testfiles/restoretarget1/tester")
self.assertRaises(OSError, os.lstat,
"testfiles/restoretarget1/various_file_types/executable")
fp = os.popen(self.rb_schema +
"--include-filelist-stdin " + restore_filename +
" testfiles/restoretarget2", "w")
fp.write("""
- testfiles/restoretarget2/various_file_types/executable""")
assert not fp.close()
assert os.lstat("testfiles/restoretarget2/various_file_types/"
"regular_file")
self.assertRaises(OSError, os.lstat,
"testfiles/restoretarget2/various_file_types/executable")
class FinalCorrupt(PathSetter):
def testBackupOverlay(self):
"""Test backing up onto a directory already backed up for that time
This will test to see if rdiff-backup will ignore files who
already have an increment where it wants to put something.
Just make sure rdiff-backup doesn't exit with an error.
"""
self.delete_tmpdirs()
assert not os.system("cp -a testfiles/corruptbackup testfiles/output")
self.set_connections(None, None, None, None)
self.exec_rb('testfiles/corruptbackup_source', 'testfiles/output')
def testBackupOverlayRemote(self):
"""Like above but destination is remote"""
self.delete_tmpdirs()
assert not os.system("cp -a testfiles/corruptbackup testfiles/output")
self.set_connections(None, None, "test1/", '../')
self.exec_rb('testfiles/corruptbackup_source', 'testfiles/output')
if __name__ == "__main__": unittest.main()
......@@ -37,7 +37,7 @@ Globals.no_compression_regexp = \
class inctest(unittest.TestCase):
"""Test the incrementRP function"""
def setUp(self):
pass
Globals.set('isbackup_writer',1)
def testreg(self):
"""Test increment of regular files"""
......
......@@ -192,21 +192,26 @@ class IncrementTest2(PathSetter):
"""After setting connections, etc., run actual test using this"""
Time.setcurtime()
SaveState.init_filenames(1)
_get_main().backup_init_select(Local.inc1rp, Local.rpout)
HighLevel.Mirror(self.inc1rp, self.rpout)
assert CompareRecursive(Local.inc1rp, Local.rpout)
Time.setcurtime()
Time.setprevtime(999500000)
_get_main().backup_init_select(self.inc2rp, self.rpout)
HighLevel.Mirror_and_increment(self.inc2rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc2rp, Local.rpout)
Time.setcurtime()
Time.setprevtime(999510000)
_get_main().backup_init_select(self.inc3rp, self.rpout)
HighLevel.Mirror_and_increment(self.inc3rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc3rp, Local.rpout)
Time.setcurtime()
Time.setprevtime(999520000)
_get_main().backup_init_select(self.inc4rp, self.rpout)
HighLevel.Mirror_and_increment(self.inc4rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc4rp, Local.rpout)
......@@ -415,6 +420,7 @@ class MirrorTest(PathSetter):
def Mirror(self, rpin, rpout, checkpoint = 1):
"""Like HighLevel.Mirror, but run misc_setup first"""
_get_main().misc_setup([rpin, rpout])
_get_main().backup_init_select(rpin, rpout)
HighLevel.Mirror(rpin, rpout, checkpoint)
if __name__ == "__main__": unittest.main()
import unittest
execfile("commontest.py")
rbexec("restore.py")
rbexec("main.py")
Log.setverbosity(7)
lc = Globals.local_connection
......@@ -26,12 +27,15 @@ class RestoreTest(unittest.TestCase):
rpbase = RPath(lc, self.prefix + basename)
rptarget = RPath(lc, "testfiles/outfile")
if rptarget.lstat(): rptarget.delete()
for pair in tuples:
print "Processing file " + pair[0].path
if rptarget.lstat(): rptarget.delete()
rest_time = Time.stringtotime(pair[0].getinctime())
Restore.RestoreFile(rest_time, rpbase, (), incs, rptarget)
sorted_incs = Restore.sortincseq(rest_time, incs)
Restore.RestoreFile(rest_time, rpbase, (), sorted_incs, rptarget)
rptarget.setdata()
if not rptarget.lstat(): assert not pair[1].lstat()
elif not pair[1].lstat(): assert not rptarget.lstat()
else:
assert RPath.cmp(rptarget, pair[1]), \
"%s %s" % (rptarget.path, pair[1].path)
......@@ -39,9 +43,56 @@ class RestoreTest(unittest.TestCase):
"%s %s" % (rptarget.path, pair[1].path)
rptarget.delete()
def testsortincseq(self):
"""Test the Restore.sortincseq function
This test just makes sure that it comes up with the right
number of increments for each base name - given a list of
increments, we should eventually get sorted sequences that
end in each one (each one will be the last increment once).
"""
for basename in ['ocaml', 'mf']:
tuples, incs = self.maketesttuples(basename)
completed_dict = {}
for i in range(len(tuples)):
pair = tuples[i]
rest_time = Time.stringtotime(pair[0].getinctime())
sorted_incs = Restore.sortincseq(rest_time, incs)
key = sorted_incs[-1].path
assert not completed_dict.has_key(key)
completed_dict[key] = 1
for inc in incs: assert completed_dict[inc.path] == 1
def testRestorefiles(self):
"""Testing restoration of files one at a time"""
map(self.restoreonefiletest, ["ocaml", "mf"])
def testRestoreDir(self):
"""Test restoring from a real backup set"""
Myrm("testfiles/output")
InternalRestore(1, 1, "testfiles/restoretest3",
"testfiles/output", 20000)
src_rp = RPath(Globals.local_connection, "testfiles/increment2")
restore_rp = RPath(Globals.local_connection, "testfiles/output")
assert CompareRecursive(src_rp, restore_rp)
def testRestoreCorrupt(self):
"""Test restoring a partially corrupt archive
The problem here is that a directory is missing from what is
to be restored, but because the previous backup was aborted in
the middle, some of the files in that directory weren't marked
as .missing.
"""
Myrm("testfiles/output")
InternalRestore(1, 1, "testfiles/restoretest4", "testfiles/output",
10000)
assert os.lstat("testfiles/output")
self.assertRaises(OSError, os.lstat, "testfiles/output/tmp")
self.assertRaises(OSError, os.lstat, "testfiles/output/rdiff-backup")
if __name__ == "__main__": unittest.main()
......@@ -70,17 +70,11 @@ class RORPIterTest(unittest.TestCase):
def compare_no_times(self, src_rp, dest_rp):
"""Compare but disregard directories attributes"""
dsiter1, dsiter2 = map(DestructiveStepping.Iterate_with_Finalizer,
[src_rp, dest_rp], [1, None])
def equal(src_rorp, dest_rorp):
return ((src_rorp.isdir() and dest_rorp.isdir()) or
src_rorp == dest_rorp)
result = Iter.equal(dsiter1, dsiter2, 1, equal)
for i in dsiter1: pass # make sure all files processed anyway
for i in dsiter2: pass
return result
return CompareRecursive(src_rp, dest_rp, None, equal)
class IndexedTupleTest(unittest.TestCase):
......
......@@ -79,7 +79,7 @@ class MatchingTest(unittest.TestCase):
testfiles/select/1/2
testfiles/select/1
testfiles/select/1/2/3
testfiles/select/3/3/3""")
testfiles/select/3/3/2""")
sf = self.Select.filelist_get_sf(fp, 1, "test")
assert sf(self.root) == 1
assert sf(self.makeext("1")) == 1
......@@ -88,6 +88,7 @@ testfiles/select/3/3/3""")
assert sf(self.makeext("2/2")) == None
assert sf(self.makeext("3")) == 1
assert sf(self.makeext("3/3")) == 1
assert sf(self.makeext("3/3/3")) == None
def testFilelistExclude(self):
"""Test included filelist"""
......@@ -95,7 +96,7 @@ testfiles/select/3/3/3""")
testfiles/select/1/2
testfiles/select/1
testfiles/select/1/2/3
testfiles/select/3/3/3""")
testfiles/select/3/3/2""")
sf = self.Select.filelist_get_sf(fp, 0, "test")
assert sf(self.root) == None
assert sf(self.makeext("1")) == 0
......@@ -103,7 +104,8 @@ testfiles/select/3/3/3""")
assert sf(self.makeext("1/2/3")) == 0
assert sf(self.makeext("2/2")) == None
assert sf(self.makeext("3")) == None
assert sf(self.makeext("3/3/3")) == 0
assert sf(self.makeext("3/3/2")) == 0
assert sf(self.makeext("3/3/3")) == None
def testFilelistInclude2(self):
"""testFilelistInclude2 - with modifiers"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment