Commit ad0fe5ab authored by bescoto's avatar bescoto

Still refactoring for 0.11.1 - prepare to rename highlevel


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@253 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent e95885f3
New in v0.11.1 (2002/12/??) New in v0.11.1 (2002/12/??)
--------------------------- ---------------------------
**Warning** Various features have been removed from this version, so
this is not a safe upgrade. Also this version has less error
checking, and, if it crashes, this version may be more prone to leave
the destination directory in an inconsistent state. I plan to look at
these issues in the next version. Also, this version is quite
different from previous ones, so you cannot run version 0.11.1 on one
end of a connection and any previous version on the other side.
The following features have been removed:
--mirror-only option: If you just want to mirror something, use
rsync. (Or you could use rdiff-backup and then just delete the
rdiff-backup-data directory, and then update the root mtime.)
--change-source-perms option: This feature was pretty complicated
to implement, and if something happened to rdiff-backup during a
transfer, the old permissions could not be restored.
All "resume" related functionality, like --checkpoint-interval:
This was complicated to implement, and didn't seem to work all
that well.
Directory statistics file: Although the session statistics file is
still generated, the directory statistics file no longer is,
because the new code structure makes it less inconvenient.
Extensive refactoring. A lot of rdiff-backup's code was structured as
if it was still in one file, so it didn't make enough use of Python's
module system.
Now rdiff-backup writes metadata (uid, gid, mtime, etc.) to a
compressed text file in the rdiff-backup-data directory. Here are
some ramifications:
A user does not need root access on the destination side to record
file ownership information.
Some files may be recognized as not having changed based on this
metadata, so it may not be necessary to traverse the whole mirror
directory. This can reduce file access on the destination side.
Fixed bug with the --{include|exclude}-globbing-filelist options Fixed bug with the --{include|exclude}-globbing-filelist options
(reported by Claus Herwig). (reported by Claus Herwig).
Added --list-changed-since option to list the files changed since the Added --list-changed-since option to list the files changed since the
given date. given date, and added Bud Bruegger's patch to that. The format and
information this option provides will probably change in the near
future.
Removed --mirror-only and --change-source-perms options.
Removed all "resume" related functionality, like
--checkpoint-interval.
New in v0.11.0 (2002/10/05) New in v0.11.0 (2002/10/05)
......
...@@ -150,9 +150,9 @@ ssh_compression = 1 ...@@ -150,9 +150,9 @@ ssh_compression = 1
# If true, print statistics after successful backup # If true, print statistics after successful backup
print_statistics = None print_statistics = None
# On the reader and writer connections, the following will be # On the writer connection, the following will be set to the mirror
# replaced by the source and mirror Select objects respectively. # Select iterator.
select_source, select_mirror = None, None select_mirror = None
# On the backup writer connection, holds the root incrementing branch # On the backup writer connection, holds the root incrementing branch
# object. Access is provided to increment error counts. # object. Access is provided to increment error counts.
...@@ -246,18 +246,4 @@ def postset_regexp_local(name, re_string, flags): ...@@ -246,18 +246,4 @@ def postset_regexp_local(name, re_string, flags):
if flags: globals()[name] = re.compile(re_string, flags) if flags: globals()[name] = re.compile(re_string, flags)
else: globals()[name] = re.compile(re_string) else: globals()[name] = re.compile(re_string)
def set_select(source, Sel_Obj, rpath, tuplelist, quote_mode, *filelists):
"""Initialize select object using tuplelist
Note that each list in filelists must each be passed as
separate arguments, so each is recognized as a file by the
connection. Otherwise we will get an error because a list
containing files can't be pickled.
"""
global select_source, select_mirror
sel = Sel_Obj(rpath, quote_mode)
sel.ParseArgs(tuplelist, filelists)
if source: select_source = sel
else: select_mirror = sel
...@@ -36,18 +36,88 @@ import Globals, Time, TempFile, rpath, log, robust ...@@ -36,18 +36,88 @@ import Globals, Time, TempFile, rpath, log, robust
# In all of these lists of indicies are the values. The keys in # In all of these lists of indicies are the values. The keys in
# _inode_ ones are (inode, devloc) pairs. # _inode_ ones are (inode, devloc) pairs.
_src_inode_indicies = {} _src_inode_indicies = None
_dest_inode_indicies = {} _dest_inode_indicies = None
# The keys for these two are just indicies. They share values # The keys for these two are just indicies. They share values
# with the earlier dictionaries. # with the earlier dictionaries.
_src_index_indicies = {} _src_index_indicies = None
_dest_index_indicies = {} _dest_index_indicies = None
# When a linked file is restored, its path is added to this dict, # When a linked file is restored, its path is added to this dict,
# so it can be found when later paths being restored are linked to # so it can be found when later paths being restored are linked to
# it. # it.
_restore_index_path = {} _restore_index_path = None
def initialize_dictionaries():
"""Set all the hard link dictionaries to empty"""
global _src_inode_indicies, _dest_inode_indicies
global _src_index_indicies, _dest_index_indicies, _restore_index_path
_src_inode_indicies = {}
_dest_inode_indicies = {}
_src_index_indicies = {}
_dest_index_indicies = {}
_restore_index_path = {}
def clear_dictionaries():
"""Delete all dictionaries"""
global _src_inode_indicies, _dest_inode_indicies
global _src_index_indicies, _dest_index_indicies, _restore_index_path
_src_inode_indicies = _dest_inode_indicies = None
_src_index_indicies = _dest_index_indicies = _restore_index_path = None
# The keys of this dictionary are (inode, devloc) pairs on the source
# side. The values are (numlinks, index) pairs, where numlinks are
# the number of files currently linked to this spot, and index is the
# index of the first file so linked.
_src_inode_index_dict = {}
_dest_inode_index_dict = {}
#def rorp_eq(src_rorp, dest_rorp):
# """Return true if source and dest rorp are equal as far as hardlinking
#
# This also processes the src_rorp, adding it if necessary to the
# inode dictionary.
#
# """
# if not src_rorp.isreg(): return 1 # only reg files can be hard linked
# if src_rorp.getnumlinks() == 1: return dest_rorp.getnumlinks() == 1
#
# src_linked_index = process_rorp(src_rorp, _src_inode_index_dict)
# if dest_rorp.getnumlinks() == 1: return 0
# dest_linked_index = process_rorp(dest_rorp, _dest_inode_index_dict)
# return src_linked_index == dest_linked_index
def process_rorp(rorp, inode_dict):
"""Add inode info and returns index src_rorp is linked to, or None"""
key_pair = (rorp.getinode(), rorp.getdevloc())
try: num, linked_index = inode_dict[key_pair]
except KeyError:
inode_dict[key_pair] = (1, src_rorp.index)
return None
inode_dict[key_pair] = (num+1, linked_index)
if num+1 == src_rorp.getnumlinks(): del _inode_index_dict[key_pair]
else: _inode_index_dict[key_pair] = (num+1, linked_index)
return linked_index
def get_linked_index(src_rorp):
"""Return the index a src_rorp is linked to, or None
Also deletes the src_rorp's entry in the dictionary if we have
accumulated all the hard link references.
"""
key_pair = (rorp.getinode(), rorp.getdevloc())
try: num, linked_index = _src_inode_index_dict[key_pair]
except KeyError: return None
if num == src_rorp.getnumlinks():
del _src_inode_index_dict[key_pair]
def get_inode_key(rorp): def get_inode_key(rorp):
"""Return rorp's key for _inode_ dictionaries""" """Return rorp's key for _inode_ dictionaries"""
...@@ -100,7 +170,6 @@ def rorp_eq(src_rorp, dest_rorp): ...@@ -100,7 +170,6 @@ def rorp_eq(src_rorp, dest_rorp):
indicies. indicies.
""" """
if not src_rorp.index == dest_rorp.index: return None
if (not src_rorp.isreg() or not dest_rorp.isreg() or if (not src_rorp.isreg() or not dest_rorp.isreg() or
src_rorp.getnumlinks() == dest_rorp.getnumlinks() == 1): src_rorp.getnumlinks() == dest_rorp.getnumlinks() == 1):
return 1 # Hard links don't apply return 1 # Hard links don't apply
......
...@@ -23,7 +23,8 @@ from __future__ import generators ...@@ -23,7 +23,8 @@ from __future__ import generators
import getopt, sys, re, os import getopt, sys, re, os
from log import Log from log import Log
import Globals, Time, SetConnections, selection, robust, rpath, \ import Globals, Time, SetConnections, selection, robust, rpath, \
manage, highlevel, connection, restore, FilenameMapping, Security manage, highlevel, connection, restore, FilenameMapping, \
Security, Hardlink
action = None action = None
...@@ -108,8 +109,7 @@ def parse_cmdlineoptions(arglist): ...@@ -108,8 +109,7 @@ def parse_cmdlineoptions(arglist):
elif opt == "--no-hard-links": Globals.set('preserve_hardlinks', 0) elif opt == "--no-hard-links": Globals.set('preserve_hardlinks', 0)
elif opt == "--null-separator": Globals.set("null_separator", 1) elif opt == "--null-separator": Globals.set("null_separator", 1)
elif opt == "--parsable-output": Globals.set('parsable_output', 1) elif opt == "--parsable-output": Globals.set('parsable_output', 1)
elif opt == "--print-statistics": elif opt == "--print-statistics": Globals.set('print_statistics', 1)
Globals.set('print_statistics', 1)
elif opt == "--quoting-char": elif opt == "--quoting-char":
Globals.set('quoting_char', arg) Globals.set('quoting_char', arg)
Globals.set('quoting_enabled', 1) Globals.set('quoting_enabled', 1)
...@@ -195,13 +195,11 @@ def misc_setup(rps): ...@@ -195,13 +195,11 @@ def misc_setup(rps):
Time.setcurtime(Globals.current_time) Time.setcurtime(Globals.current_time)
FilenameMapping.set_init_quote_vals() FilenameMapping.set_init_quote_vals()
SetConnections.UpdateGlobal("client_conn", Globals.local_connection) SetConnections.UpdateGlobal("client_conn", Globals.local_connection)
# This is because I originally didn't think compiled regexps
# could be pickled, and so must be compiled on remote side.
Globals.postset_regexp('no_compression_regexp', Globals.postset_regexp('no_compression_regexp',
Globals.no_compression_regexp_string) Globals.no_compression_regexp_string)
for conn in Globals.connections:
for conn in Globals.connections: robust.install_signal_handlers() conn.robust.install_signal_handlers()
conn.Hardlink.initialize_dictionaries()
def take_action(rps): def take_action(rps):
"""Do whatever action says""" """Do whatever action says"""
...@@ -238,20 +236,18 @@ def Main(arglist): ...@@ -238,20 +236,18 @@ def Main(arglist):
def Backup(rpin, rpout): def Backup(rpin, rpout):
"""Backup, possibly incrementally, src_path to dest_path.""" """Backup, possibly incrementally, src_path to dest_path."""
SetConnections.BackupInitConnections(rpin.conn, rpout.conn) SetConnections.BackupInitConnections(rpin.conn, rpout.conn)
backup_init_select(rpin, rpout) backup_set_select(rpin)
backup_init_dirs(rpin, rpout) backup_init_dirs(rpin, rpout)
if prevtime: if prevtime:
Time.setprevtime(prevtime) Time.setprevtime(prevtime)
highlevel.HighLevel.Mirror_and_increment(rpin, rpout, incdir) highlevel.Mirror_and_increment(rpin, rpout, incdir)
else: highlevel.HighLevel.Mirror(rpin, rpout, incdir) else: highlevel.Mirror(rpin, rpout)
rpout.conn.Main.backup_touch_curmirror_local(rpin, rpout) rpout.conn.Main.backup_touch_curmirror_local(rpin, rpout)
def backup_init_select(rpin, rpout): def backup_set_select(rpin):
"""Create Select objects on source and dest connections""" """Create Select objects on source connection"""
rpin.conn.Globals.set_select(1, selection.Select, rpin.conn.highlevel.HLSourceStruct.set_source_select(rpin, select_opts,
rpin, select_opts, None, *select_files) *select_files)
rpout.conn.Globals.set_select(0, selection.Select,
rpout, select_mirror_opts, 1)
def backup_init_dirs(rpin, rpout): def backup_init_dirs(rpin, rpout):
"""Make sure rpin and rpout are valid, init data dir and logging""" """Make sure rpin and rpout are valid, init data dir and logging"""
...@@ -277,10 +273,14 @@ def backup_init_dirs(rpin, rpout): ...@@ -277,10 +273,14 @@ def backup_init_dirs(rpin, rpout):
if rpout.isdir() and not rpout.listdir(): # rpout is empty dir if rpout.isdir() and not rpout.listdir(): # rpout is empty dir
rpout.chmod(0700) # just make sure permissions aren't too lax rpout.chmod(0700) # just make sure permissions aren't too lax
elif not datadir.lstat() and not force: Log.FatalError( elif not datadir.lstat() and not force: Log.FatalError(
"""Destination directory %s exists, but does not look like a """Destination directory
rdiff-backup directory. Running rdiff-backup like this could mess up
what is currently in it. If you want to update or overwrite it, run %s
rdiff-backup with the --force option.""" % rpout.path)
exists, but does not look like a rdiff-backup directory. Running
rdiff-backup like this could mess up what is currently in it. If you
want to update or overwrite it, run rdiff-backup with the --force
option.""" % rpout.path)
if not rpout.lstat(): if not rpout.lstat():
try: rpout.mkdir() try: rpout.mkdir()
...@@ -410,9 +410,10 @@ def restore_init_select(rpin, rpout): ...@@ -410,9 +410,10 @@ def restore_init_select(rpin, rpout):
the restore operation isn't. the restore operation isn't.
""" """
Globals.set_select(1, selection.Select, rpin, select_mirror_opts, None) restore._select_mirror = selection.Select(rpin)
Globals.set_select(0, selection.Select, restore._select_mirror.ParseArgs(select_mirror_opts, [])
rpout, select_opts, None, *select_files) restore._select_mirror.parse_rbdir_exclude()
restore._select_source = selection.Select(rpout)
def restore_get_root(rpin): def restore_get_root(rpin):
"""Return (mirror root, index) and set the data dir """Return (mirror root, index) and set the data dir
...@@ -540,7 +541,6 @@ def ListChangedSince(rp): ...@@ -540,7 +541,6 @@ def ListChangedSince(rp):
root_rid = restore.RestoreIncrementData(index, inc_rpath, inc_list) root_rid = restore.RestoreIncrementData(index, inc_rpath, inc_list)
for rid in get_rids_recursive(root_rid): for rid in get_rids_recursive(root_rid):
if rid.inc_list: if rid.inc_list:
if not rid.index: path = "." print "%-11s: %s" % (determineChangeType(rid.inc_list),
else: path = "/".join(rid.index) rid.get_indexpath())
print "%-11s: %s" % (determineChangeType(rid.inc_list), path)
...@@ -42,7 +42,7 @@ def get_delta_sigfileobj(sig_fileobj, rp_new): ...@@ -42,7 +42,7 @@ def get_delta_sigfileobj(sig_fileobj, rp_new):
def get_delta_sigrp(rp_signature, rp_new): def get_delta_sigrp(rp_signature, rp_new):
"""Take signature rp and new rp, return delta file object""" """Take signature rp and new rp, return delta file object"""
Log("Getting delta of %s with signature %s" % Log("Getting delta of %s with signature %s" %
(rp_new.path, rp_signature.path), 7) (rp_new.path, rp_signature.get_indexpath()), 7)
return librsync.DeltaFile(rp_signature.open("rb"), rp_new.open("rb")) return librsync.DeltaFile(rp_signature.open("rb"), rp_new.open("rb"))
def write_delta_action(basis, new, delta, compress = None): def write_delta_action(basis, new, delta, compress = None):
......
# Copyright 2002 Ben Escoto
#
# This file is part of rdiff-backup.
#
# rdiff-backup is free software; you can redistribute it and/or modify
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# rdiff-backup is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rdiff-backup; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""Manage temp files"""
import os
import Globals, rpath
# This is a connection-specific list of temp files, to be cleaned
# up before rdiff-backup exits.
_tempfiles = []
# To make collisions less likely, this gets put in the file name
# and incremented whenever a new file is requested.
_tfindex = 0
def new(rp_base, same_dir = 1):
"""Return new tempfile that isn't in use.
If same_dir, tempfile will be in same directory as rp_base.
Otherwise, use tempfile module to get filename.
"""
conn = rp_base.conn
if conn is not Globals.local_connection:
return conn.TempFile.new(rp_base, same_dir)
def find_unused(conn, dir):
"""Find an unused tempfile with connection conn in directory dir"""
global _tfindex, tempfiles
while 1:
if _tfindex > 100000000:
Log("Resetting index", 2)
_tfindex = 0
tf = TempFile(conn, os.path.join(dir,
"rdiff-backup.tmp.%d" % _tfindex))
_tfindex = _tfindex+1
if not tf.lstat(): return tf
if same_dir: tf = find_unused(conn, rp_base.dirsplit()[0])
else: tf = TempFile(conn, tempfile.mktemp())
_tempfiles.append(tf)
return tf
def remove_listing(tempfile):
"""Remove listing of tempfile"""
if Globals.local_connection is not tempfile.conn:
tempfile.conn.TempFile.remove_listing(tempfile)
elif tempfile in _tempfiles: _tempfiles.remove(tempfile)
def delete_all():
"""Delete all remaining tempfiles"""
for tf in _tempfiles[:]: tf.delete()
class TempFile(rpath.RPath):
"""Like an RPath, but keep track of which ones are still here"""
def rename(self, rp_dest):
"""Rename temp file to permanent location, possibly overwriting"""
if not self.lstat(): # "Moving" empty file, so just delete
if rp_dest.lstat(): rp_dest.delete()
remove_listing(self)
return
if self.isdir() and not rp_dest.isdir():
# Cannot move a directory directly over another file
rp_dest.delete()
rpath.rename(self, rp_dest)
# Sometimes this just seems to fail silently, as in one
# hardlinked twin is moved over the other. So check to make
# sure below.
self.setdata()
if self.lstat():
rp_dest.delete()
rpath.rename(self, rp_dest)
self.setdata()
if self.lstat(): raise OSError("Cannot rename tmp file correctly")
remove_listing(self)
def delete(self):
rpath.RPath.delete(self)
remove_listing(self)
...@@ -285,10 +285,128 @@ class IncrementITRB(statistics.ITRB): ...@@ -285,10 +285,128 @@ class IncrementITRB(statistics.ITRB):
self.add_file_stats(branch) self.add_file_stats(branch)
class PatchITRB(statistics.ITRB):
"""Patch an rpath with the given diff iters (use with IterTreeReducer)
The main complication here involves directories. We have to
finish processing the directory after what's in the directory, as
the directory may have inappropriate permissions to alter the
contents or the dir's mtime could change as we change the
contents.
"""
def __init__(self, basis_root_rp):
"""Set basis_root_rp, the base of the tree to be incremented"""
self.basis_root_rp = basis_root_rp
assert basis_root_rp.conn is Globals.local_connection
#statistics.ITRB.__init__(self)
self.dir_replacement, self.dir_update = None, None
self.cached_rp = None
def get_rp_from_root(self, index):
"""Return RPath by adding index to self.basis_root_rp"""
if not self.cached_rp or self.cached_rp.index != index:
self.cached_rp = self.basis_root_rp.new_index(index)
return self.cached_rp
def can_fast_process(self, index, diff_rorp):
"""True if diff_rorp and mirror are not directories"""
rp = self.get_rp_from_root(index)
return not diff_rorp.isdir() and not rp.isdir()
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp (case where neither is directory)"""
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
self.patch_to_temp(rp, diff_rorp, tf)
tf.rename(rp)
def patch_to_temp(self, basis_rp, diff_rorp, new):
"""Patch basis_rp, writing output in new, which doesn't exist yet"""
if diff_rorp.isflaglinked():
Hardlink.link_rp(diff_rorp, new, self.basis_root_rp)
elif diff_rorp.get_attached_filetype() == 'snapshot':
rpath.copy(diff_rorp, new)
else:
assert diff_rorp.get_attached_filetype() == 'diff'
Rdiff.patch_local(basis_rp, diff_rorp, new)
if new.lstat(): rpath.copy_attribs(diff_rorp, new)
def start_process(self, index, diff_rorp):
"""Start processing directory - record information for later"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir()
if diff_rorp.isdir(): self.prepare_dir(diff_rorp, base_rp)
else: self.set_dir_replacement(diff_rorp, base_rp)
def set_dir_replacement(self, diff_rorp, base_rp):
"""Set self.dir_replacement, which holds data until done with dir
This is used when base_rp is a dir, and diff_rorp is not.
"""
assert diff_rorp.get_attached_filetype() == 'snapshot'
self.dir_replacement = TempFile.new(base_rp)
rpath.copy_with_attribs(diff_rorp, self.dir_replacement)
def prepare_dir(self, diff_rorp, base_rp):
"""Prepare base_rp to turn into a directory"""
self.dir_update = diff_rorp.getRORPath() # make copy in case changes
if not base_rp.isdir():
if base_rp.lstat(): base_rp.delete()
base_rp.mkdir()
base_rp.chmod(0700)
def end_process(self):
"""Finish processing directory"""
if self.dir_update:
assert self.base_rp.isdir()
rpath.copy_attribs(self.dir_update, self.base_rp)
else:
assert self.dir_replacement and self.base_rp.isdir()
self.base_rp.rmdir()
self.dir_replacement.rename(self.base_rp)
class IncrementITRB(PatchITRB):
"""Patch an rpath with the given diff iters and write increments
Like PatchITRB, but this time also write increments.
"""
def __init__(self, basis_root_rp, inc_root_rp):
self.inc_root_rp = inc_root_rp
self.cached_incrp = None
PatchITRB.__init__(self, basis_root_rp)
def get_incrp(self, index):
"""Return inc RPath by adding index to self.basis_root_rp"""
if not self.cached_incrp or self.cached_incrp.index != index:
self.cached_incrp = self.inc_root_rp.new_index(index)
return self.cached_incrp
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp and write increment (neither is dir)"""
rp = self.get_rp_from_root(index)
tf = TempFile.new(rp)
self.patch_to_temp(rp, diff_rorp, tf)
Increment(tf, rp, self.get_incrp(index))
tf.rename(rp)
def start_process(self, index, diff_rorp):
"""Start processing directory"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir()
if diff_rorp.isdir():
Increment(diff_rorp, base_rp, self.get_incrp(index))
self.prepare_dir(diff_rorp, base_rp)
else:
self.set_dir_replacement(diff_rorp, base_rp)
Increment(self.dir_replacement, base_rp, self.get_incrp(index))
class MirrorITRB(statistics.ITRB): class MirrorITRB(statistics.ITRB):
"""Like IncrementITR, but only patch mirror directory, don't increment""" """Like IncrementITR, but only patch mirror directory, don't increment"""
# This is always None since no increments will be created
incrp = None
def __init__(self, inc_rpath): def __init__(self, inc_rpath):
"""Set inc_rpath, an rpath of the base of the inc tree""" """Set inc_rpath, an rpath of the base of the inc tree"""
self.inc_rpath = inc_rpath self.inc_rpath = inc_rpath
...@@ -330,5 +448,3 @@ class MirrorITRB(statistics.ITRB): ...@@ -330,5 +448,3 @@ class MirrorITRB(statistics.ITRB):
if Globals.sleep_ratio is not None: Time.sleep(Globals.sleep_ratio) if Globals.sleep_ratio is not None: Time.sleep(Globals.sleep_ratio)
self.add_file_stats(branch) self.add_file_stats(branch)
...@@ -56,7 +56,7 @@ field names and values. ...@@ -56,7 +56,7 @@ field names and values.
from __future__ import generators from __future__ import generators
import re, gzip import re, gzip
import log, Globals, rpath, Time import log, Globals, rpath, Time, robust
class ParsingError(Exception): class ParsingError(Exception):
"""This is raised when bad or unparsable data is received""" """This is raised when bad or unparsable data is received"""
...@@ -259,7 +259,7 @@ class rorp_extractor: ...@@ -259,7 +259,7 @@ class rorp_extractor:
metadata_rp = None metadata_rp = None
metadata_fileobj = None metadata_fileobj = None
def OpenMetadata(rp = None, compress = 1): def OpenMetadata(rp = None, compress = 1):
"""Open the Metadata file for writing""" """Open the Metadata file for writing, return metadata fileobj"""
global metadata_rp, metadata_fileobj global metadata_rp, metadata_fileobj
assert not metadata_fileobj, "Metadata file already open" assert not metadata_fileobj, "Metadata file already open"
if rp: metadata_rp = rp if rp: metadata_rp = rp
...@@ -276,13 +276,13 @@ def WriteMetadata(rorp): ...@@ -276,13 +276,13 @@ def WriteMetadata(rorp):
def CloseMetadata(): def CloseMetadata():
"""Close the metadata file""" """Close the metadata file"""
global metadata_fileobj global metadata_rp, metadata_fileobj
result = metadata_fileobj.close() result = metadata_fileobj.close()
metadata_fileobj = None metadata_fileobj = None
metadata_rp.setdata() metadata_rp.setdata()
return result return result
def GetMetadata(rp = None, restrict_index = None, compressed = None): def GetMetadata(rp, restrict_index = None, compressed = None):
"""Return iterator of metadata from given metadata file rp""" """Return iterator of metadata from given metadata file rp"""
if compressed is None: if compressed is None:
if rp.isincfile(): if rp.isincfile():
...@@ -294,16 +294,17 @@ def GetMetadata(rp = None, restrict_index = None, compressed = None): ...@@ -294,16 +294,17 @@ def GetMetadata(rp = None, restrict_index = None, compressed = None):
if restrict_index is None: return rorp_extractor(fileobj).iterate() if restrict_index is None: return rorp_extractor(fileobj).iterate()
else: return rorp_extractor(fileobj).iterate_starting_with(restrict_index) else: return rorp_extractor(fileobj).iterate_starting_with(restrict_index)
def GetMetadata_at_time(rpdir, time, restrict_index = None, rplist = None): def GetMetadata_at_time(rbdir, time, restrict_index = None, rblist = None):
"""Scan through rpdir, finding metadata file at given time, iterate """Scan through rbdir, finding metadata file at given time, iterate
If rplist is given, use that instead of listing rpdir. Time here If rdlist is given, use that instead of listing rddir. Time here
is exact, we don't take the next one older or anything. Returns is exact, we don't take the next one older or anything. Returns
None if no matching metadata found. None if no matching metadata found.
""" """
if rplist is None: rplist = map(lambda x: rpdir.append(x), rpdir.listdir()) if rblist is None: rblist = map(lambda x: rbdir.append(x),
for rp in rplist: robust.listrp(rbdir))
for rp in rblist:
if (rp.isincfile() and rp.getinctype() == "data" and if (rp.isincfile() and rp.getinctype() == "data" and
rp.getincbase_str() == "mirror_metadata"): rp.getincbase_str() == "mirror_metadata"):
if Time.stringtotime(rp.getinctime()) == time: if Time.stringtotime(rp.getinctime()) == time:
......
...@@ -26,6 +26,11 @@ import Globals, Time, Rdiff, Hardlink, FilenameMapping, SetConnections, \ ...@@ -26,6 +26,11 @@ import Globals, Time, Rdiff, Hardlink, FilenameMapping, SetConnections, \
rorpiter, selection, destructive_stepping, rpath, lazy rorpiter, selection, destructive_stepping, rpath, lazy
# This should be set to selection.Select objects over the source and
# mirror directories respectively.
_select_source = None
_select_mirror = None
class RestoreError(Exception): pass class RestoreError(Exception): pass
def Restore(inc_rpath, mirror, target, rest_time): def Restore(inc_rpath, mirror, target, rest_time):
...@@ -151,10 +156,10 @@ def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time): ...@@ -151,10 +156,10 @@ def yield_rcds(index, mirrorrp, rid, target, rest_time, mirror_time):
the source directory. the source directory.
""" """
select_result = Globals.select_mirror.Select(target) select_result = _select_mirror.Select(target)
if select_result == 0: return if select_result == 0: return
if mirrorrp and not Globals.select_source.Select(mirrorrp): if mirrorrp and not _select_source.Select(mirrorrp):
mirrorrp = None mirrorrp = None
rcd = RestoreCombinedData(rid, mirrorrp, target) rcd = RestoreCombinedData(rid, mirrorrp, target)
......
...@@ -29,8 +29,9 @@ files), where files is the number of files attached (usually 1 or ...@@ -29,8 +29,9 @@ files), where files is the number of files attached (usually 1 or
""" """
from __future__ import generators from __future__ import generators
import tempfile, UserList, types, librsync, Globals, Rdiff, \ import os, tempfile, UserList, types
Hardlink, robust, log, static, rpath, iterfile, TempFile import librsync, Globals, Rdiff, Hardlink, robust, log, static, \
rpath, iterfile, TempFile
class RORPIterException(Exception): pass class RORPIterException(Exception): pass
...@@ -60,39 +61,6 @@ def FromFile(fileobj): ...@@ -60,39 +61,6 @@ def FromFile(fileobj):
"""Recover rorp iterator from file interface""" """Recover rorp iterator from file interface"""
return FromRaw(iterfile.IterWrappingFile(fileobj)) return FromRaw(iterfile.IterWrappingFile(fileobj))
def IterateRPaths(base_rp):
"""Return an iterator yielding RPaths with given base rp"""
yield base_rp
if base_rp.isdir():
dirlisting = base_rp.listdir()
dirlisting.sort()
for filename in dirlisting:
for rp in IterateRPaths(base_rp.append(filename)):
yield rp
def Signatures(rp_iter):
"""Yield signatures of rpaths in given rp_iter"""
def error_handler(exc, rp):
log.Log("Error generating signature for %s" % rp.path)
return None
for rp in rp_iter:
if rp.isplaceholder(): yield rp
else:
rorp = rp.getRORPath()
if rp.isreg():
if rp.isflaglinked(): rorp.flaglinked()
else:
fp = robust.check_common_error(
error_handler, Rdiff.get_signature, (rp,))
if fp: rorp.setfile(fp)
else: continue
yield rorp
def GetSignatureIter(base_rp):
"""Return a signature iterator recurring over the base_rp"""
return Signatures(IterateRPaths(base_rp))
def CollateIterators(*rorp_iters): def CollateIterators(*rorp_iters):
"""Collate RORPath iterators by index """Collate RORPath iterators by index
...@@ -151,28 +119,28 @@ def Collate2Iters(riter1, riter2): ...@@ -151,28 +119,28 @@ def Collate2Iters(riter1, riter2):
if not relem1: if not relem1:
try: relem1 = riter1.next() try: relem1 = riter1.next()
except StopIteration: except StopIteration:
if relem2: yield IndexedTuple(index2, (None, relem2)) if relem2: yield (None, relem2)
for relem2 in riter2: for relem2 in riter2:
yield IndexedTuple(relem2.index, (None, relem2)) yield (None, relem2)
break break
index1 = relem1.index index1 = relem1.index
if not relem2: if not relem2:
try: relem2 = riter2.next() try: relem2 = riter2.next()
except StopIteration: except StopIteration:
if relem1: yield IndexedTuple(index1, (relem1, None)) if relem1: yield (relem1, None)
for relem1 in riter1: for relem1 in riter1:
yield IndexedTuple(relem1.index, (relem1, None)) yield (relem1, None)
break break
index2 = relem2.index index2 = relem2.index
if index1 < index2: if index1 < index2:
yield IndexedTuple(index1, (relem1, None)) yield (relem1, None)
relem1 = None relem1 = None
elif index1 == index2: elif index1 == index2:
yield IndexedTuple(index1, (relem1, relem2)) yield (relem1, relem2)
relem1, relem2 = None, None relem1, relem2 = None, None
else: # index2 is less else: # index2 is less
yield IndexedTuple(index2, (None, relem2)) yield (None, relem2)
relem2 = None relem2 = None
def getnext(iter): def getnext(iter):
...@@ -181,6 +149,21 @@ def getnext(iter): ...@@ -181,6 +149,21 @@ def getnext(iter):
except StopIteration: raise RORPIterException("Unexpected end to iter") except StopIteration: raise RORPIterException("Unexpected end to iter")
return next return next
def get_dissimilar_indicies(src_init_iter, dest_init_iter):
"""Get dissimilar indicies given two rorpiters
Returns an iterator which enumerates the indicies of the rorps
which are different on the source and destination ends.
"""
collated = Collate2Iters(src_init_iter, dest_init_iter)
for src_rorp, dest_rorp in collated:
if not src_rorp: yield dest_rorp.index
elif not dest_rorp: yield src_rorp.index
elif not src_rorp == dest_rorp: yield dest_rorp.index
elif (Globals.preserve_hardlinks and not
Hardlink.rorp_eq(src_rorp, dest_rorp)): yield dest_rorp.index
def GetDiffIter(sig_iter, new_iter): def GetDiffIter(sig_iter, new_iter):
"""Return delta iterator from sig_iter to new_iter """Return delta iterator from sig_iter to new_iter
...@@ -225,13 +208,6 @@ def diffonce(sig_rorp, new_rp): ...@@ -225,13 +208,6 @@ def diffonce(sig_rorp, new_rp):
return diff_rorp return diff_rorp
else: return new_rp.getRORPath() else: return new_rp.getRORPath()
def PatchIter(base_rp, diff_iter):
"""Patch the appropriate rps in basis_iter using diff_iter"""
basis_iter = IterateRPaths(base_rp)
collated_iter = CollateIterators(basis_iter, diff_iter)
for basisrp, diff_rorp in collated_iter:
patchonce_action(base_rp, basisrp, diff_rorp).execute()
def patchonce_action(base_rp, basisrp, diff_rorp): def patchonce_action(base_rp, basisrp, diff_rorp):
"""Return action patching basisrp using diff_rorp""" """Return action patching basisrp using diff_rorp"""
assert diff_rorp, "Missing diff index %s" % basisrp.index assert diff_rorp, "Missing diff index %s" % basisrp.index
...@@ -293,91 +269,6 @@ class IndexedTuple(UserList.UserList): ...@@ -293,91 +269,6 @@ class IndexedTuple(UserList.UserList):
return "(%s).%s" % (", ".join(map(str, self.data)), self.index) return "(%s).%s" % (", ".join(map(str, self.data)), self.index)
class DirHandler:
"""Handle directories when entering and exiting in mirror
The problem is that we may need to write to a directory that may
have only read and exec permissions. Also, when leaving a
directory tree, we may have modified the directory and thus
changed the mod and access times. These need to be updated when
leaving.
"""
def __init__(self, rootrp):
"""DirHandler initializer - call with root rpath of mirror dir"""
self.rootrp = rootrp
assert rootrp.index == ()
self.cur_dir_index = None # Current directory we have descended into
self.last_index = None # last index processed
# This dictionary maps indicies to (rpath, (atime, mtime),
# perms) triples. Either or both of the time pair and perms
# can be None, which means not to update the times or the
# perms when leaving. We don't have to update the perms if we
# didn't have to change them in the first place. If a
# directory is explicitly given, then we don't have to update
# anything because it will be done by the normal process.
self.index_dict = {}
def process_old_directories(self, new_dir_index):
"""Update times/permissions for directories we are leaving
Returns greatest index of the current index that has been seen
before (i.e. no need to process up to then as new dir).
"""
if self.cur_dir_index is None: return -1 # no previous directory
i = len(self.cur_dir_index)
while 1:
if new_dir_index[:i] == self.cur_dir_index[:i]:
return i
self.process_old_dir(self.cur_dir_index[:i])
i-=1
def process_old_dir(self, dir_index):
"""Process outstanding changes for given dir index"""
rpath, times, perms = self.index_dict[dir_index]
if times: apply(rpath.settime, times)
if perms: rpath.chmod(perms)
def init_new_dirs(self, rpath, new_dir_index, common_dir_index):
"""Initialize any new directories
Record the time, and change permissions if no write access.
Use rpath if it is given to access permissions and times.
"""
for i in range(common_dir_index, len(new_dir_index)):
process_index = new_dir_index[:i]
if rpath.index == process_index:
self.index_dict[process_index] = (None, None, None)
else:
new_rpath = self.rootrp.new_index(process_index)
if new_rpath.hasfullperms(): perms = None
else: perms = new_rpath.getperms()
times = (new_rpath.getatime(), new_rpath.getmtime())
self.index_dict[process_index] = new_rpath, times, perms
def __call__(self, rpath):
"""Given rpath, process containing directories"""
if rpath.isdir(): new_dir_index = rpath.index
elif not rpath.index: return # no directory contains root
else: new_dir_index = rpath.index[:-1]
common_dir_index = self.process_old_directories(new_dir_index)
self.init_new_dirs(rpath, new_dir_index, common_dir_index)
self.cur_dir_index = new_dir_index
def Finish(self):
"""Process any remaining directories"""
indicies = self.index_dict.keys()
indicies.sort()
assert len(indicies) >= 1, indicies
indicies.reverse()
map(self.process_old_dir, indicies)
def FillInIter(rpiter, rootrp): def FillInIter(rpiter, rootrp):
"""Given ordered rpiter and rootrp, fill in missing indicies with rpaths """Given ordered rpiter and rootrp, fill in missing indicies with rpaths
...@@ -396,13 +287,15 @@ def FillInIter(rpiter, rootrp): ...@@ -396,13 +287,15 @@ def FillInIter(rpiter, rootrp):
del first_rp del first_rp
old_index = cur_index old_index = cur_index
# Now do the others (1,2,3) (1,4,5) # Now do all the other elements
for rp in rpiter: for rp in rpiter:
cur_index = rp.index cur_index = rp.index
if not cur_index[:-1] == old_index[:-1]: # Handle special case quickly if not cur_index[:-1] == old_index[:-1]: # Handle special case quickly
for i in range(1, len(cur_index)): # i==0 case already handled for i in range(1, len(cur_index)): # i==0 case already handled
if cur_index[:i] != old_index[:i]: if cur_index[:i] != old_index[:i]:
yield rootrp.new_index(cur_index[:i]) filler_rp = rootrp.new_index(cur_index[:i])
assert filler_rp.isdir(), "This shouldn't be possible"
yield filler_rp
yield rp yield rp
old_index = cur_index old_index = cur_index
...@@ -514,9 +407,6 @@ class ITRBranch: ...@@ -514,9 +407,6 @@ class ITRBranch:
subclasses this one will probably fill in these functions to do subclasses this one will probably fill in these functions to do
more. more.
It is important that this class be pickable, so keep that in mind
when subclassing (this is used to resume failed sessions).
""" """
base_index = index = None base_index = index = None
finished = None finished = None
...@@ -566,25 +456,3 @@ class ITRBranch: ...@@ -566,25 +456,3 @@ class ITRBranch:
(os.path.join(*index),), 2) (os.path.join(*index),), 2)
class DestructiveSteppingFinalizer(ITRBranch):
"""Finalizer that can work on an iterator of dsrpaths
The reason we have to use an IterTreeReducer is that some files
should be updated immediately, but for directories we sometimes
need to update all the files in the directory before finally
coming back to it.
"""
dsrpath = None
def start_process(self, index, dsrpath):
self.dsrpath = dsrpath
def end_process(self):
if self.dsrpath: self.dsrpath.write_changes()
def can_fast_process(self, index, dsrpath):
return not self.dsrpath.isdir()
def fast_process(self, index, dsrpath):
if self.dsrpath: self.dsrpath.write_changes()
...@@ -71,8 +71,8 @@ def cmpfileobj(fp1, fp2): ...@@ -71,8 +71,8 @@ def cmpfileobj(fp1, fp2):
def check_for_files(*rps): def check_for_files(*rps):
"""Make sure that all the rps exist, raise error if not""" """Make sure that all the rps exist, raise error if not"""
for rp in rps: for rp in rps:
if not rp.lstat(): if not rp.lstat(): raise RPathException("File %s does not exist"
raise RPathException("File %s does not exist" % rp.path) % rp.get_indexpath())
def move(rpin, rpout): def move(rpin, rpout):
"""Move rpin to rpout, renaming if possible""" """Move rpin to rpout, renaming if possible"""
...@@ -85,7 +85,8 @@ def copy(rpin, rpout): ...@@ -85,7 +85,8 @@ def copy(rpin, rpout):
"""Copy RPath rpin to rpout. Works for symlinks, dirs, etc.""" """Copy RPath rpin to rpout. Works for symlinks, dirs, etc."""
log.Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6) log.Log("Regular copying %s to %s" % (rpin.index, rpout.path), 6)
if not rpin.lstat(): if not rpin.lstat():
raise RPathException, ("File %s does not exist" % rpin.index) if rpout.lstat(): rpout.delete()
return
if rpout.lstat(): if rpout.lstat():
if rpin.isreg() or not cmp(rpin, rpout): if rpin.isreg() or not cmp(rpin, rpout):
...@@ -181,7 +182,7 @@ def cmp_attribs(rp1, rp2): ...@@ -181,7 +182,7 @@ def cmp_attribs(rp1, rp2):
def copy_with_attribs(rpin, rpout): def copy_with_attribs(rpin, rpout):
"""Copy file and then copy over attributes""" """Copy file and then copy over attributes"""
copy(rpin, rpout) copy(rpin, rpout)
copy_attribs(rpin, rpout) if rpin.lstat(): copy_attribs(rpin, rpout)
def quick_cmp_with_attribs(rp1, rp2): def quick_cmp_with_attribs(rp1, rp2):
"""Quicker version of cmp_with_attribs """Quicker version of cmp_with_attribs
...@@ -204,9 +205,11 @@ def rename(rp_source, rp_dest): ...@@ -204,9 +205,11 @@ def rename(rp_source, rp_dest):
assert rp_source.conn is rp_dest.conn assert rp_source.conn is rp_dest.conn
log.Log(lambda: "Renaming %s to %s" % log.Log(lambda: "Renaming %s to %s" %
(rp_source.path, rp_dest.path), 7) (rp_source.path, rp_dest.path), 7)
rp_source.conn.os.rename(rp_source.path, rp_dest.path) if not rp_source.lstat(): rp_dest.delete()
rp_dest.data = rp_source.data else:
rp_source.data = {'type': None} rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
def tupled_lstat(filename): def tupled_lstat(filename):
"""Like os.lstat, but return only a tuple, or None if os.error """Like os.lstat, but return only a tuple, or None if os.error
...@@ -286,8 +289,6 @@ class RORPath: ...@@ -286,8 +289,6 @@ class RORPath:
(not Globals.change_ownership or self.issym())): (not Globals.change_ownership or self.issym())):
# Don't compare gid/uid for symlinks or if not change_ownership # Don't compare gid/uid for symlinks or if not change_ownership
pass pass
elif key == 'mtime':
log.Log("%s differs only in mtime, skipping" % (self.path,), 2)
elif key == 'atime' and not Globals.preserve_atime: pass elif key == 'atime' and not Globals.preserve_atime: pass
elif key == 'devloc' or key == 'inode' or key == 'nlink': pass elif key == 'devloc' or key == 'inode' or key == 'nlink': pass
elif key == 'size' and not self.isreg(): pass elif key == 'size' and not self.isreg(): pass
...@@ -319,28 +320,10 @@ class RORPath: ...@@ -319,28 +320,10 @@ class RORPath:
"""Reproduce RORPath from __getstate__ output""" """Reproduce RORPath from __getstate__ output"""
self.index, self.data = rorp_state self.index, self.data = rorp_state
def get_rorpath(self): def getRORPath(self):
"""Return new rorpath based on self""" """Return new rorpath based on self"""
return RORPath(self.index, self.data.copy()) return RORPath(self.index, self.data.copy())
def make_placeholder(self):
"""Make rorp into a placeholder
This object doesn't contain any information about the file,
but, when passed along, may show where the previous stages are
in their processing. It is the RORPath equivalent of fiber.
This placeholder size, in conjunction with the placeholder
threshold in Highlevel .. generate_dissimilar seem to yield an
OK tradeoff between unnecessary placeholders and lots of
memory usage, but I'm not sure exactly why.
"""
self.data = {'placeholder': " "*500}
def isplaceholder(self):
"""True if the object is a placeholder"""
return self.data.has_key('placeholder')
def lstat(self): def lstat(self):
"""Returns type of file """Returns type of file
...@@ -863,12 +846,6 @@ class RPath(RORPath): ...@@ -863,12 +846,6 @@ class RPath(RORPath):
else: raise RPathException else: raise RPathException
self.setdata() self.setdata()
def getRORPath(self, include_contents = None):
"""Return read only version of self"""
rorp = RORPath(self.index, self.data)
if include_contents: rorp.setfile(self.open("rb"))
return rorp
class RPathFileHook: class RPathFileHook:
"""Look like a file, but add closing hook""" """Look like a file, but add closing hook"""
......
...@@ -131,7 +131,7 @@ def InternalRestore(mirror_local, dest_local, mirror_dir, dest_dir, time): ...@@ -131,7 +131,7 @@ def InternalRestore(mirror_local, dest_local, mirror_dir, dest_dir, time):
% (SourceDir, dest_dir) % (SourceDir, dest_dir)
mirror_rp, dest_rp = cmd_schemas2rps([mirror_dir, dest_dir], remote_schema) mirror_rp, dest_rp = cmd_schemas2rps([mirror_dir, dest_dir], remote_schema)
Time.setcurtime() Main.misc_setup([mirror_rp, dest_rp])
inc = get_increment_rp(mirror_rp, time) inc = get_increment_rp(mirror_rp, time)
if inc: Main.Restore(get_increment_rp(mirror_rp, time), dest_rp) if inc: Main.Restore(get_increment_rp(mirror_rp, time), dest_rp)
else: # use alternate syntax else: # use alternate syntax
......
import os, unittest import os, unittest
from commontest import * from commontest import *
from rdiff_backup.rpath import * from rdiff_backup import Globals, Hardlink, selection, rpath
from rdiff_backup import Globals, Hardlink
Log.setverbosity(7) Log.setverbosity(7)
class HardlinkTest(unittest.TestCase): class HardlinkTest(unittest.TestCase):
"""Test cases for Hard links""" """Test cases for Hard links"""
outputrp = RPath(Globals.local_connection, "testfiles/output") outputrp = rpath.RPath(Globals.local_connection, "testfiles/output")
hardlink_dir1 = RPath(Globals.local_connection, "testfiles/hardlinks/dir1") hardlink_dir1 = rpath.RPath(Globals.local_connection,
hardlink_dir1copy = \ "testfiles/hardlinks/dir1")
RPath(Globals.local_connection, "testfiles/hardlinks/dir1copy") hardlink_dir1copy = rpath.RPath(Globals.local_connection,
hardlink_dir2 = RPath(Globals.local_connection, "testfiles/hardlinks/dir2") "testfiles/hardlinks/dir1copy")
hardlink_dir3 = RPath(Globals.local_connection, "testfiles/hardlinks/dir3") hardlink_dir2 = rpath.RPath(Globals.local_connection,
"testfiles/hardlinks/dir2")
hardlink_dir3 = rpath.RPath(Globals.local_connection,
"testfiles/hardlinks/dir3")
def reset_output(self): def reset_output(self):
"""Erase and recreate testfiles/output directory""" """Erase and recreate testfiles/output directory"""
...@@ -73,7 +76,7 @@ class HardlinkTest(unittest.TestCase): ...@@ -73,7 +76,7 @@ class HardlinkTest(unittest.TestCase):
"""See if the partial inode dictionary is correct""" """See if the partial inode dictionary is correct"""
Globals.preserve_hardlinks = 1 Globals.preserve_hardlinks = 1
reset_hardlink_dicts() reset_hardlink_dicts()
for dsrp in Select(DSRPath(1, self.hardlink_dir3)).set_iter(): for dsrp in selection.Select(self.hardlink_dir3).set_iter():
Hardlink.add_rorp(dsrp, 1) Hardlink.add_rorp(dsrp, 1)
assert len(Hardlink._src_inode_indicies.keys()) == 3, \ assert len(Hardlink._src_inode_indicies.keys()) == 3, \
...@@ -90,7 +93,7 @@ class HardlinkTest(unittest.TestCase): ...@@ -90,7 +93,7 @@ class HardlinkTest(unittest.TestCase):
"""Same as testBuildingDict but test destination building""" """Same as testBuildingDict but test destination building"""
Globals.preserve_hardlinks = 1 Globals.preserve_hardlinks = 1
reset_hardlink_dicts() reset_hardlink_dicts()
for dsrp in Select(DSRPath(None, self.hardlink_dir3)).set_iter(): for dsrp in selection.Select(self.hardlink_dir3).set_iter():
Hardlink.add_rorp(dsrp, None) Hardlink.add_rorp(dsrp, None)
assert len(Hardlink._dest_inode_indicies.keys()) == 3, \ assert len(Hardlink._dest_inode_indicies.keys()) == 3, \
...@@ -106,7 +109,7 @@ class HardlinkTest(unittest.TestCase): ...@@ -106,7 +109,7 @@ class HardlinkTest(unittest.TestCase):
def testCompletedDict(self): def testCompletedDict(self):
"""See if the hardlink dictionaries are built correctly""" """See if the hardlink dictionaries are built correctly"""
reset_hardlink_dicts() reset_hardlink_dicts()
for dsrp in Select(DSRPath(1, self.hardlink_dir1)).set_iter(): for dsrp in selection.Select(self.hardlink_dir1).set_iter():
Hardlink.add_rorp(dsrp, 1) Hardlink.add_rorp(dsrp, 1)
assert Hardlink._src_inode_indicies == {}, \ assert Hardlink._src_inode_indicies == {}, \
Hardlink._src_inode_indicies Hardlink._src_inode_indicies
...@@ -119,7 +122,7 @@ class HardlinkTest(unittest.TestCase): ...@@ -119,7 +122,7 @@ class HardlinkTest(unittest.TestCase):
assert Hardlink._src_index_indicies == dict assert Hardlink._src_index_indicies == dict
reset_hardlink_dicts() reset_hardlink_dicts()
for dsrp in Select(DSRPath(1, self.hardlink_dir2)).set_iter(): for dsrp in selection.Select(self.hardlink_dir2).set_iter():
Hardlink.add_rorp(dsrp, 1) Hardlink.add_rorp(dsrp, 1)
assert Hardlink._src_inode_indicies == {}, \ assert Hardlink._src_inode_indicies == {}, \
Hardlink._src_inode_indicies Hardlink._src_inode_indicies
......
...@@ -166,8 +166,7 @@ class IncrementTest2(PathSetter): ...@@ -166,8 +166,7 @@ class IncrementTest2(PathSetter):
recovery_out = self.get_dest_rp('testfiles/recovery_out_backup') recovery_out = self.get_dest_rp('testfiles/recovery_out_backup')
recovery_inc = self.get_dest_rp('testfiles/recovery_out_backup/' recovery_inc = self.get_dest_rp('testfiles/recovery_out_backup/'
'rdiff-backup-data/increments') 'rdiff-backup-data/increments')
HighLevel.Mirror_and_increment(recovery_in, recovery_out, highlevel.Mirror_and_increment(recovery_in, recovery_out, recovery_inc)
recovery_inc)
# Should probably check integrity of increments, but for now # Should probably check integrity of increments, but for now
# allow if it doesn't during the Mirror_and_increment # allow if it doesn't during the Mirror_and_increment
...@@ -183,56 +182,54 @@ class IncrementTest2(PathSetter): ...@@ -183,56 +182,54 @@ class IncrementTest2(PathSetter):
recovery_out = self.get_dest_rp('testfiles/recovery_out_backup') recovery_out = self.get_dest_rp('testfiles/recovery_out_backup')
recovery_inc = self.get_dest_rp('testfiles/recovery_out_backup/' recovery_inc = self.get_dest_rp('testfiles/recovery_out_backup/'
'rdiff-backup-data/increments') 'rdiff-backup-data/increments')
HighLevel.Mirror_and_increment(recovery_in, recovery_out, highlevel.Mirror_and_increment(recovery_in, recovery_out, recovery_inc)
recovery_inc)
# Should probably check integrity of increments, but for now # Should probably check integrity of increments, but for now
# allow if it doesn't during the Mirror_and_increment # allow if it doesn't during the Mirror_and_increment
def runtest(self): def runtest(self):
"""After setting connections, etc., run actual test using this""" """After setting connections, etc., run actual test using this"""
Time.setcurtime() Time.setcurtime()
SaveState.init_filenames()
Main.backup_init_select(Local.inc1rp, Local.rpout) Main.backup_set_select(Local.inc1rp)
HighLevel.Mirror(self.inc1rp, self.rpout) highlevel.Mirror(self.inc1rp, self.rpout)
assert CompareRecursive(Local.inc1rp, Local.rpout) assert CompareRecursive(Local.inc1rp, Local.rpout)
Time.setcurtime() Time.setcurtime()
Time.setprevtime(999500000) Time.setprevtime(999500000)
Main.backup_init_select(self.inc2rp, self.rpout) Main.backup_set_select(self.inc2rp)
HighLevel.Mirror_and_increment(self.inc2rp, self.rpout, self.rpout_inc) highlevel.Mirror_and_increment(self.inc2rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc2rp, Local.rpout) assert CompareRecursive(Local.inc2rp, Local.rpout)
Time.setcurtime() Time.setcurtime()
Time.setprevtime(999510000) Time.setprevtime(999510000)
Main.backup_init_select(self.inc3rp, self.rpout) Main.backup_set_select(self.inc3rp)
HighLevel.Mirror_and_increment(self.inc3rp, self.rpout, self.rpout_inc) highlevel.Mirror_and_increment(self.inc3rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc3rp, Local.rpout) assert CompareRecursive(Local.inc3rp, Local.rpout)
Time.setcurtime() Time.setcurtime()
Time.setprevtime(999520000) Time.setprevtime(999520000)
Main.backup_init_select(self.inc4rp, self.rpout) Main.backup_set_select(self.inc4rp)
HighLevel.Mirror_and_increment(self.inc4rp, self.rpout, self.rpout_inc) highlevel.Mirror_and_increment(self.inc4rp, self.rpout, self.rpout_inc)
assert CompareRecursive(Local.inc4rp, Local.rpout) assert CompareRecursive(Local.inc4rp, Local.rpout)
print "Restoring to self.inc4" print "Restoring to self.inc4"
HighLevel.Restore(999530000, self.rpout, self.get_inctup(), highlevel.Restore(999530000, self.rpout, self.get_inctup(),
self.rpout4) self.rpout4)
assert CompareRecursive(Local.inc4rp, Local.rpout4) assert CompareRecursive(Local.inc4rp, Local.rpout4)
print "Restoring to self.inc3" print "Restoring to self.inc3"
HighLevel.Restore(999520000, self.rpout, self.get_inctup(), highlevel.Restore(999520000, self.rpout, self.get_inctup(),
self.rpout3) self.rpout3)
assert CompareRecursive(Local.inc3rp, Local.rpout3) assert CompareRecursive(Local.inc3rp, Local.rpout3)
print "Restoring to self.inc2" print "Restoring to self.inc2"
HighLevel.Restore(999510000, self.rpout, self.get_inctup(), highlevel.Restore(999510000, self.rpout, self.get_inctup(),
self.rpout2) self.rpout2)
assert CompareRecursive(Local.inc2rp, Local.rpout2) assert CompareRecursive(Local.inc2rp, Local.rpout2)
print "Restoring to self.inc1" print "Restoring to self.inc1"
HighLevel.Restore(999500000, self.rpout, self.get_inctup(), highlevel.Restore(999500000, self.rpout, self.get_inctup(),
self.rpout1) self.rpout1)
assert CompareRecursive(Local.inc1rp, Local.rpout1) assert CompareRecursive(Local.inc1rp, Local.rpout1)
...@@ -296,7 +293,6 @@ class MirrorTest(PathSetter): ...@@ -296,7 +293,6 @@ class MirrorTest(PathSetter):
self.setPathnames(None, None, None, None) self.setPathnames(None, None, None, None)
Globals.change_source_perms = None Globals.change_source_perms = None
Time.setcurtime() Time.setcurtime()
SaveState.init_filenames()
self.Mirror(self.one_unreadable, self.one_unreadable_out) self.Mirror(self.one_unreadable, self.one_unreadable_out)
Globals.change_source_perms = 1 Globals.change_source_perms = 1
self.Mirror(self.one_unreadable, self.one_unreadable_out) self.Mirror(self.one_unreadable, self.one_unreadable_out)
...@@ -307,7 +303,6 @@ class MirrorTest(PathSetter): ...@@ -307,7 +303,6 @@ class MirrorTest(PathSetter):
self.setPathnames('test1', '../', 'test2/tmp', '../../') self.setPathnames('test1', '../', 'test2/tmp', '../../')
Globals.change_source_perms = None Globals.change_source_perms = None
Time.setcurtime() Time.setcurtime()
SaveState.init_filenames()
self.Mirror(self.one_unreadable, self.one_unreadable_out) self.Mirror(self.one_unreadable, self.one_unreadable_out)
Globals.change_source_perms = 1 Globals.change_source_perms = 1
self.Mirror(self.one_unreadable, self.one_unreadable_out) self.Mirror(self.one_unreadable, self.one_unreadable_out)
...@@ -322,7 +317,7 @@ class MirrorTest(PathSetter): ...@@ -322,7 +317,7 @@ class MirrorTest(PathSetter):
Globals.change_ownership = 1 Globals.change_ownership = 1
self.refresh(self.rootfiles, self.rootfiles_out, self.refresh(self.rootfiles, self.rootfiles_out,
Local.rootfiles, Local.rootfiles_out) # add uid/gid info Local.rootfiles, Local.rootfiles_out) # add uid/gid info
HighLevel.Mirror(self.rootfiles, self.rootfiles_out) highlevel.Mirror(self.rootfiles, self.rootfiles_out)
assert CompareRecursive(Local.rootfiles, Local.rootfiles_out) assert CompareRecursive(Local.rootfiles, Local.rootfiles_out)
Globals.change_ownership = None Globals.change_ownership = None
self.refresh(self.rootfiles, self.rootfiles_out, self.refresh(self.rootfiles, self.rootfiles_out,
...@@ -335,7 +330,7 @@ class MirrorTest(PathSetter): ...@@ -335,7 +330,7 @@ class MirrorTest(PathSetter):
conn.Globals.set('change_ownership', 1) conn.Globals.set('change_ownership', 1)
self.refresh(self.rootfiles, self.rootfiles_out, self.refresh(self.rootfiles, self.rootfiles_out,
Local.rootfiles, Local.rootfiles_out) # add uid/gid info Local.rootfiles, Local.rootfiles_out) # add uid/gid info
HighLevel.Mirror(self.rootfiles, self.rootfiles_out) highlevel.Mirror(self.rootfiles, self.rootfiles_out)
assert CompareRecursive(Local.rootfiles, Local.rootfiles_out) assert CompareRecursive(Local.rootfiles, Local.rootfiles_out)
for coon in Globals.connections: for coon in Globals.connections:
conn.Globals.set('change_ownership', None) conn.Globals.set('change_ownership', None)
...@@ -377,7 +372,6 @@ class MirrorTest(PathSetter): ...@@ -377,7 +372,6 @@ class MirrorTest(PathSetter):
def runtest(self): def runtest(self):
Time.setcurtime() Time.setcurtime()
SaveState.init_filenames()
assert self.rbdir.lstat() assert self.rbdir.lstat()
self.Mirror(self.inc1rp, self.rpout) self.Mirror(self.inc1rp, self.rpout)
assert CompareRecursive(Local.inc1rp, Local.rpout) assert CompareRecursive(Local.inc1rp, Local.rpout)
...@@ -388,11 +382,11 @@ class MirrorTest(PathSetter): ...@@ -388,11 +382,11 @@ class MirrorTest(PathSetter):
assert CompareRecursive(Local.inc2rp, Local.rpout) assert CompareRecursive(Local.inc2rp, Local.rpout)
def run_partial_test(self): def run_partial_test(self):
os.system("cp -a testfiles/increment3 testfiles/output") assert not os.system("rm -rf testfiles/output")
assert not os.system("cp -a testfiles/increment3 testfiles/output")
self.reset_rps() self.reset_rps()
Time.setcurtime() Time.setcurtime()
SaveState.init_filenames()
self.Mirror(self.inc1rp, self.rpout) self.Mirror(self.inc1rp, self.rpout)
#rpath.RPath.copy_attribs(self.inc1rp, self.rpout) #rpath.RPath.copy_attribs(self.inc1rp, self.rpout)
assert CompareRecursive(Local.inc1rp, Local.rpout) assert CompareRecursive(Local.inc1rp, Local.rpout)
...@@ -401,10 +395,13 @@ class MirrorTest(PathSetter): ...@@ -401,10 +395,13 @@ class MirrorTest(PathSetter):
assert CompareRecursive(Local.inc2rp, Local.rpout) assert CompareRecursive(Local.inc2rp, Local.rpout)
def Mirror(self, rpin, rpout): def Mirror(self, rpin, rpout):
"""Like HighLevel.Mirror, but run misc_setup first""" """Like highlevel.Mirror, but run misc_setup first"""
Main.force = 1
Main.misc_setup([rpin, rpout]) Main.misc_setup([rpin, rpout])
Main.backup_init_select(rpin, rpout) Main.backup_set_select(rpin)
HighLevel.Mirror(rpin, rpout, Main.backup_init_dirs(rpin, rpout)
rpout.append_path("rdiff-backup-data/increments")) highlevel.Mirror(rpin, rpout)
Log.close_logfile()
Hardlink.clear_dictionaries()
if __name__ == "__main__": unittest.main() if __name__ == "__main__": unittest.main()
...@@ -33,6 +33,7 @@ class RestoreTest(unittest.TestCase): ...@@ -33,6 +33,7 @@ class RestoreTest(unittest.TestCase):
tuples, incs = self.maketesttuples(basename) tuples, incs = self.maketesttuples(basename)
rpbase = rpath.RPath(lc, self.prefix + basename) rpbase = rpath.RPath(lc, self.prefix + basename)
rptarget = rpath.RPath(lc, "testfiles/outfile") rptarget = rpath.RPath(lc, "testfiles/outfile")
Hardlink.initialize_dictionaries()
for pair in tuples: for pair in tuples:
print "Processing file " + pair[0].path print "Processing file " + pair[0].path
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment