Commit f1e93191 authored by ben's avatar ben

Various CPU optimizations


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@126 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent 977188ed
......@@ -40,11 +40,7 @@ class LocalConnection(Connection):
def __getattr__(self, name):
try: return globals()[name]
except KeyError:
builtins = globals()["__builtins__"]
try:
if type(builtins) is types.ModuleType:
return builtins.__dict__[name]
else: return builtins[name]
try: return __builtins__.__dict__[name]
except KeyError: raise NameError, name
def __setattr__(self, name, value):
......
......@@ -105,7 +105,7 @@ class HLSourceStruct:
if dest_sig.isplaceholder(): yield dest_sig
else:
diff = Robust.check_common_error(
error_handler, RORPIter.diffonce, dest_sig, dsrp)
error_handler, RORPIter.diffonce, [dest_sig, dsrp])
if diff: yield diff
if dsrp: finalizer(dsrp.index, dsrp)
finalizer.Finish()
......@@ -237,10 +237,10 @@ class HLDestinationStruct:
Log("Error: %s processing file %s" % (exc, filename), 2)
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
dsrp = Robust.check_common_error(error_handler, patch,
diff_rorp, dsrp)
[diff_rorp, dsrp])
finalizer(dsrp.index, dsrp)
finalizer.Finish()
......@@ -253,7 +253,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, diff_rorp.index)
if diff_rorp and diff_rorp.isplaceholder(): diff_rorp = None
......@@ -279,7 +279,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
index = indexed_tuple.index
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, index)
......
......@@ -196,9 +196,9 @@ class IterTreeReducer:
iterator nature of the connection between hosts and the temporal
order in which the files are processed.
There are four stub functions below: start_process, end_process,
branch_process, and check_for_errors. A class that subclasses
this one will probably fill in these functions to do more.
There are three stub functions below: start_process, end_process,
and branch_process. A class that subclasses this one will
probably fill in these functions to do more.
It is important that this class be pickable, so keep that in mind
when subclassing (this is used to resume failed sessions).
......@@ -207,26 +207,49 @@ class IterTreeReducer:
def __init__(self, *args):
"""ITR initializer"""
self.init_args = args
self.index = None
self.subinstance = None
self.base_index = self.index = None
self.subinstances = [self]
self.finished = None
self.caught_exception, self.start_successful = None, None
self.caught_exception = None
def intree(self, index):
"""Return true if index is still in current tree"""
return self.base_index == index[:len(self.base_index)]
def finish_subinstances(self, index):
"""Run Finish() on all subinstances index has passed
def set_subinstance(self):
"""Return subinstance of same type as self"""
self.subinstance = self.__class__(*self.init_args)
When we pass out of a subinstance's tree, delete it and
process it with the parent. The innermost subinstances will
be the last in the list. Return None if we are out of the
entire tree, and 1 otherwise.
def process_w_subinstance(self, args):
"""Give object to subinstance, if necessary update branch_val"""
if not self.subinstance: self.set_subinstance()
if not self.subinstance(*args):
self.branch_process(self.subinstance)
self.set_subinstance()
assert self.subinstance(*args)
"""
subinstances = self.subinstances
while 1:
to_be_finished = subinstances[-1]
base_index = to_be_finished.base_index
if base_index != index[:len(base_index)]:
# out of the tree, finish with to_be_finished
to_be_finished.call_end_proc()
del subinstances[-1]
if not subinstances: return None
subinstances[-1].branch_process(to_be_finished)
else: return 1
def call_end_proc(self):
"""Runs the end_process on self, checking for errors"""
if self.finished: self.caught_exception = 1
if self.caught_exception: self.log_pref_error(self.base_index)
Robust.check_common_error(self.on_error, self.end_process)
self.finished = 1
def add_subinstance(self):
"""Return subinstance of same type as self, add to subinstances"""
subinst = self.__class__(*self.init_args)
self.subinstances.append(subinst)
return subinst
def process_w_subinstance(self, index, subinst, args):
"""Run start_process on latest subinstance"""
Robust.check_common_error(self.on_error, subinst.start_process, args)
subinst.base_index = index
def start_process(self, *args):
"""Do some initial processing (stub)"""
......@@ -238,31 +261,20 @@ class IterTreeReducer:
def branch_process(self, subinstance):
"""Process a branch right after it is finished (stub)"""
assert subinstance.finished
pass
def check_for_errors(self, function, *args):
"""start/end_process is called by this function
Usually it will distinguish between two types of errors. Some
are serious and will be reraised, others are caught and simply
invalidate the current instance by setting
self.caught_exception.
"""
try: return apply(function, args)
except: raise
def on_error(self, exc, *args):
"""This will be run on any exception in start/end-process"""
pass
def Finish(self):
"""Call at end of sequence to tie everything up"""
if not self.start_successful or self.finished:
self.caught_exception = 1
if self.caught_exception: self.log_prev_error(self.index)
else:
if self.subinstance:
self.subinstance.Finish()
self.branch_process(self.subinstance)
self.check_for_errors(self.end_process)
self.finished = 1
while 1:
to_be_finished = self.subinstances.pop()
to_be_finished.call_end_proc()
if not self.subinstances: break
self.subinstances[-1].branch_process(to_be_finished)
def log_prev_error(self, index):
"""Call function if no pending exception"""
......@@ -280,24 +292,22 @@ class IterTreeReducer:
"""
index = args[0]
assert type(index) is types.TupleType, type(index)
if self.index is None:
self.check_for_errors(self.start_process, *args)
self.start_successful = 1
self.index = self.base_index = index
if self.base_index is None:
self.process_w_subinstance(index, self, args)
self.index = index
return 1
if index <= self.index:
Log("Warning: oldindex %s >= newindex %s" % (self.index, index), 2)
return 1
if not self.intree(index):
self.Finish()
return None
if self.finish_subinstances(index) is None:
return None # We are no longer in the main tree
if self.caught_exception: self.log_prev_error(index)
else: self.process_w_subinstance(args)
else:
subinst = self.add_subinstance()
self.process_w_subinstance(index, subinst, args)
self.index = index
return 1
......@@ -313,7 +323,4 @@ class ErrorITR(IterTreeReducer):
else: filename = "."
Log("Error '%s' processing %s" % (exc, filename), 2)
def check_for_errors(self, function, *args):
"""Catch some non-fatal errors"""
return Robust.check_common_error(self.on_error, function, *args)
......@@ -81,7 +81,21 @@ class Logger:
message)
def __call__(self, message, verbosity):
"""Log message that has verbosity importance"""
"""Log message that has verbosity importance
message can be a string, which is logged as-is, or a function,
which is then called and should return the string to be
logged. We do it this way in case producing the string would
take a significant amount of CPU.
"""
if verbosity > self.verbosity and verbosity > self.term_verbosity:
return
if not type(message) is types.StringType:
assert type(message) is types.FunctionType
message = message()
if verbosity <= self.verbosity: self.log_to_file(message)
if verbosity <= self.term_verbosity:
self.log_to_term(message, verbosity)
......
......@@ -13,8 +13,7 @@ import profile, pstats
profile.run("Globals.Main.Main(%s)" % repr(sys.argv[1:]), "profile-output")
p = pstats.Stats("profile-output")
p.sort_stats('time')
p.print_stats(20)
p.sort_stats('cumulative')
p.print_stats(20)
p.print_stats(40)
#p.print_callers(20)
......@@ -234,7 +234,7 @@ class Robust:
tf.setdata()
return Robust.make_tf_robustaction(init, tf, rp)
def check_common_error(error_handler, function, *args):
def check_common_error(error_handler, function, args = []):
"""Apply function to args, if error, run error_handler on exception
This only catches certain exceptions which seems innocent
......
......@@ -69,7 +69,7 @@ class RORPIter:
if rp.isflaglinked(): rorp.flaglinked()
else:
fp = Robust.check_common_error(
error_handler, Rdiff.get_signature, rp)
error_handler, Rdiff.get_signature, (rp,))
if fp: rorp.setfile(fp)
else: continue
yield rorp
......
......@@ -163,7 +163,7 @@ class RPathStatic:
def rename(rp_source, rp_dest):
"""Rename rp_source to rp_dest"""
assert rp_source.conn is rp_dest.conn
Log("Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
Log(lambda: "Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
......@@ -422,7 +422,7 @@ class RPath(RORPath):
self.conn = connection
self.index = index
self.base = base
self.path = apply(os.path.join, (base,) + self.index)
if base is not None: self.path = "/".join((base,) + index)
self.file = None
if data or base is None: self.data = data
else: self.setdata()
......@@ -445,7 +445,7 @@ class RPath(RORPath):
"""Reproduce RPath from __getstate__ output"""
self.conn = Globals.local_connection
self.index, self.base, self.data = rpath_state
self.path = apply(os.path.join, (self.base,) + self.index)
self.path = "/".join((self.base,) + self.index)
def setdata(self):
"""Create the data dictionary"""
......@@ -456,8 +456,7 @@ class RPath(RORPath):
data = {}
mode = statblock[stat.ST_MODE]
if stat.S_ISREG(mode):
type = 'reg'
if stat.S_ISREG(mode): type = 'reg'
elif stat.S_ISDIR(mode): type = 'dir'
elif stat.S_ISCHR(mode):
type = 'dev'
......@@ -509,7 +508,7 @@ class RPath(RORPath):
def quote_path(self):
"""Set path from quoted version of index"""
quoted_list = [FilenameMapping.quote(path) for path in self.index]
self.path = apply(os.path.join, [self.base] + quoted_list)
self.path = "/".join([self.base] + quoted_list)
self.setdata()
def chmod(self, permissions):
......@@ -526,7 +525,7 @@ class RPath(RORPath):
def setmtime(self, modtime):
"""Set only modtime (access time to present)"""
Log("Setting time of %s to %d" % (self.path, modtime), 7)
Log(lambda: "Setting time of %s to %d" % (self.path, modtime), 7)
self.conn.os.utime(self.path, (time.time(), modtime))
self.data['mtime'] = modtime
......@@ -594,7 +593,7 @@ class RPath(RORPath):
def isowner(self):
"""Return true if current process is owner of rp or root"""
uid = self.conn.Globals.get('process_uid')
uid = self.conn.os.getuid()
return uid == 0 or uid == self.data['uid']
def isgroup(self):
......@@ -666,8 +665,7 @@ class RPath(RORPath):
def append_path(self, ext, new_index = ()):
"""Like append, but add ext to path instead of to index"""
assert not self.index # doesn't make sense if index isn't ()
return self.__class__(self.conn, os.path.join(self.base, ext),
new_index)
return self.__class__(self.conn, "/".join((self.base, ext)), new_index)
def new_index(self, index):
"""Return similar RPath but with new index"""
......
......@@ -117,13 +117,15 @@ class Select:
if dsrpath.isdir():
for dsrp in self.iterate_in_dir(dsrpath, rec_func, sel_func):
yield dsrp
elif s == 2 and dsrpath.isdir(): # Directory is merely scanned
iid = self.iterate_in_dir(dsrpath, rec_func, sel_func)
try: first = iid.next()
except StopIteration: return # no files inside; skip dsrp
yield dsrpath
yield first
for dsrp in iid: yield dsrp
elif s == 2:
if dsrpath.isdir(): # Directory is merely scanned
iid = self.iterate_in_dir(dsrpath, rec_func, sel_func)
try: first = iid.next()
except StopIteration: return # no files inside; skip dsrp
yield dsrpath
yield first
for dsrp in iid: yield dsrp
else: assert 0, "Invalid selection result %s" % (str(s),)
def iterate_in_dir(self, dsrpath, rec_func, sel_func):
"""Iterate the dsrps in directory dsrpath."""
......@@ -137,7 +139,7 @@ class Select:
else:
for filename in Robust.listrp(dsrpath):
new_dsrp = Robust.check_common_error(
error_handler, dsrpath.append, filename)
error_handler, dsrpath.append, [filename])
if new_dsrp:
for dsrp in rec_func(new_dsrp, rec_func, sel_func):
yield dsrp
......
......@@ -32,21 +32,19 @@ class StatsObj:
('ChangedSourceSize', 1), ('ChangedMirrorSize', 1),
('IncrementFiles', None), ('IncrementFileSize', 1))
# Set all stats to None, indicating info not available
for attr in stat_attrs: locals()[attr] = None
# This is used in get_byte_summary_string below
byte_abbrev_list = ((1024*1024*1024*1024, "TB"),
(1024*1024*1024, "GB"),
(1024*1024, "MB"),
(1024, "KB"))
def __init__(self):
"""Set attributes to None"""
for attr in self.stat_attrs: self.__dict__[attr] = None
def get_stat(self, attribute):
"""Get a statistic"""
try: return self.__dict__[attribute]
except KeyError:
# this may be a hack, but seems no good way to get attrs in python
return eval("self.%s" % attribute)
return self.__dict__[attribute]
def set_stat(self, attr, value):
"""Set attribute to given value"""
......@@ -54,7 +52,7 @@ class StatsObj:
def increment_stat(self, attr):
"""Add 1 to value of attribute"""
self.__dict__[attr] = self.get_stat(attr) + 1
self.__dict__[attr] += 1
def get_stats_line(self, index, use_repr = 1):
"""Return one line abbreviated version of full stats string"""
......@@ -217,8 +215,12 @@ class StatsITR(IterTreeReducer, StatsObj):
This is subclassed by the mirroring and incrementing ITRs.
"""
# zero out file statistics
for attr in StatsObj.stat_file_attrs: locals()[attr] = 0
def __init__(self, *args):
"""StatsITR initializer - zero out statistics"""
attr_dict = self.__dict__
for attr in StatsObj.stat_file_attrs: attr_dict[attr] = 0
self.ElapsedTime = self.Filename = None
IterTreeReducer.__init__(self, *args)
def start_stats(self, mirror_dsrp):
"""Record status of mirror dsrp
......@@ -272,8 +274,7 @@ class StatsITR(IterTreeReducer, StatsObj):
def add_file_stats(self, subinstance):
"""Add all file statistics from subinstance to current totals"""
for attr in self.stat_file_attrs:
self.set_stat(attr,
self.get_stat(attr) + subinstance.get_stat(attr))
self.__dict__[attr] += subinstance.__dict__[attr]
class Stats:
......
......@@ -40,11 +40,7 @@ class LocalConnection(Connection):
def __getattr__(self, name):
try: return globals()[name]
except KeyError:
builtins = globals()["__builtins__"]
try:
if type(builtins) is types.ModuleType:
return builtins.__dict__[name]
else: return builtins[name]
try: return __builtins__.__dict__[name]
except KeyError: raise NameError, name
def __setattr__(self, name, value):
......
......@@ -105,7 +105,7 @@ class HLSourceStruct:
if dest_sig.isplaceholder(): yield dest_sig
else:
diff = Robust.check_common_error(
error_handler, RORPIter.diffonce, dest_sig, dsrp)
error_handler, RORPIter.diffonce, [dest_sig, dsrp])
if diff: yield diff
if dsrp: finalizer(dsrp.index, dsrp)
finalizer.Finish()
......@@ -237,10 +237,10 @@ class HLDestinationStruct:
Log("Error: %s processing file %s" % (exc, filename), 2)
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
dsrp = Robust.check_common_error(error_handler, patch,
diff_rorp, dsrp)
[diff_rorp, dsrp])
finalizer(dsrp.index, dsrp)
finalizer.Finish()
......@@ -253,7 +253,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, diff_rorp.index)
if diff_rorp and diff_rorp.isplaceholder(): diff_rorp = None
......@@ -279,7 +279,7 @@ class HLDestinationStruct:
try:
for indexed_tuple in collated:
Log("Processing %s" % str(indexed_tuple), 7)
Log(lambda: "Processing %s" % str(indexed_tuple), 7)
diff_rorp, dsrp = indexed_tuple
index = indexed_tuple.index
if not dsrp: dsrp = cls.get_dsrp(dest_rpath, index)
......
......@@ -196,9 +196,9 @@ class IterTreeReducer:
iterator nature of the connection between hosts and the temporal
order in which the files are processed.
There are four stub functions below: start_process, end_process,
branch_process, and check_for_errors. A class that subclasses
this one will probably fill in these functions to do more.
There are three stub functions below: start_process, end_process,
and branch_process. A class that subclasses this one will
probably fill in these functions to do more.
It is important that this class be pickable, so keep that in mind
when subclassing (this is used to resume failed sessions).
......@@ -207,26 +207,49 @@ class IterTreeReducer:
def __init__(self, *args):
"""ITR initializer"""
self.init_args = args
self.index = None
self.subinstance = None
self.base_index = self.index = None
self.subinstances = [self]
self.finished = None
self.caught_exception, self.start_successful = None, None
self.caught_exception = None
def intree(self, index):
"""Return true if index is still in current tree"""
return self.base_index == index[:len(self.base_index)]
def finish_subinstances(self, index):
"""Run Finish() on all subinstances index has passed
def set_subinstance(self):
"""Return subinstance of same type as self"""
self.subinstance = self.__class__(*self.init_args)
When we pass out of a subinstance's tree, delete it and
process it with the parent. The innermost subinstances will
be the last in the list. Return None if we are out of the
entire tree, and 1 otherwise.
def process_w_subinstance(self, args):
"""Give object to subinstance, if necessary update branch_val"""
if not self.subinstance: self.set_subinstance()
if not self.subinstance(*args):
self.branch_process(self.subinstance)
self.set_subinstance()
assert self.subinstance(*args)
"""
subinstances = self.subinstances
while 1:
to_be_finished = subinstances[-1]
base_index = to_be_finished.base_index
if base_index != index[:len(base_index)]:
# out of the tree, finish with to_be_finished
to_be_finished.call_end_proc()
del subinstances[-1]
if not subinstances: return None
subinstances[-1].branch_process(to_be_finished)
else: return 1
def call_end_proc(self):
"""Runs the end_process on self, checking for errors"""
if self.finished: self.caught_exception = 1
if self.caught_exception: self.log_pref_error(self.base_index)
Robust.check_common_error(self.on_error, self.end_process)
self.finished = 1
def add_subinstance(self):
"""Return subinstance of same type as self, add to subinstances"""
subinst = self.__class__(*self.init_args)
self.subinstances.append(subinst)
return subinst
def process_w_subinstance(self, index, subinst, args):
"""Run start_process on latest subinstance"""
Robust.check_common_error(self.on_error, subinst.start_process, args)
subinst.base_index = index
def start_process(self, *args):
"""Do some initial processing (stub)"""
......@@ -238,31 +261,20 @@ class IterTreeReducer:
def branch_process(self, subinstance):
"""Process a branch right after it is finished (stub)"""
assert subinstance.finished
pass
def check_for_errors(self, function, *args):
"""start/end_process is called by this function
Usually it will distinguish between two types of errors. Some
are serious and will be reraised, others are caught and simply
invalidate the current instance by setting
self.caught_exception.
"""
try: return apply(function, args)
except: raise
def on_error(self, exc, *args):
"""This will be run on any exception in start/end-process"""
pass
def Finish(self):
"""Call at end of sequence to tie everything up"""
if not self.start_successful or self.finished:
self.caught_exception = 1
if self.caught_exception: self.log_prev_error(self.index)
else:
if self.subinstance:
self.subinstance.Finish()
self.branch_process(self.subinstance)
self.check_for_errors(self.end_process)
self.finished = 1
while 1:
to_be_finished = self.subinstances.pop()
to_be_finished.call_end_proc()
if not self.subinstances: break
self.subinstances[-1].branch_process(to_be_finished)
def log_prev_error(self, index):
"""Call function if no pending exception"""
......@@ -280,24 +292,22 @@ class IterTreeReducer:
"""
index = args[0]
assert type(index) is types.TupleType, type(index)
if self.index is None:
self.check_for_errors(self.start_process, *args)
self.start_successful = 1
self.index = self.base_index = index
if self.base_index is None:
self.process_w_subinstance(index, self, args)
self.index = index
return 1
if index <= self.index:
Log("Warning: oldindex %s >= newindex %s" % (self.index, index), 2)
return 1
if not self.intree(index):
self.Finish()
return None
if self.finish_subinstances(index) is None:
return None # We are no longer in the main tree
if self.caught_exception: self.log_prev_error(index)
else: self.process_w_subinstance(args)
else:
subinst = self.add_subinstance()
self.process_w_subinstance(index, subinst, args)
self.index = index
return 1
......@@ -313,7 +323,4 @@ class ErrorITR(IterTreeReducer):
else: filename = "."
Log("Error '%s' processing %s" % (exc, filename), 2)
def check_for_errors(self, function, *args):
"""Catch some non-fatal errors"""
return Robust.check_common_error(self.on_error, function, *args)
......@@ -81,7 +81,21 @@ class Logger:
message)
def __call__(self, message, verbosity):
"""Log message that has verbosity importance"""
"""Log message that has verbosity importance
message can be a string, which is logged as-is, or a function,
which is then called and should return the string to be
logged. We do it this way in case producing the string would
take a significant amount of CPU.
"""
if verbosity > self.verbosity and verbosity > self.term_verbosity:
return
if not type(message) is types.StringType:
assert type(message) is types.FunctionType
message = message()
if verbosity <= self.verbosity: self.log_to_file(message)
if verbosity <= self.term_verbosity:
self.log_to_term(message, verbosity)
......
......@@ -13,8 +13,7 @@ import profile, pstats
profile.run("Globals.Main.Main(%s)" % repr(sys.argv[1:]), "profile-output")
p = pstats.Stats("profile-output")
p.sort_stats('time')
p.print_stats(20)
p.sort_stats('cumulative')
p.print_stats(20)
p.print_stats(40)
#p.print_callers(20)
......@@ -234,7 +234,7 @@ class Robust:
tf.setdata()
return Robust.make_tf_robustaction(init, tf, rp)
def check_common_error(error_handler, function, *args):
def check_common_error(error_handler, function, args = []):
"""Apply function to args, if error, run error_handler on exception
This only catches certain exceptions which seems innocent
......
......@@ -69,7 +69,7 @@ class RORPIter:
if rp.isflaglinked(): rorp.flaglinked()
else:
fp = Robust.check_common_error(
error_handler, Rdiff.get_signature, rp)
error_handler, Rdiff.get_signature, (rp,))
if fp: rorp.setfile(fp)
else: continue
yield rorp
......
......@@ -163,7 +163,7 @@ class RPathStatic:
def rename(rp_source, rp_dest):
"""Rename rp_source to rp_dest"""
assert rp_source.conn is rp_dest.conn
Log("Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
Log(lambda: "Renaming %s to %s" % (rp_source.path, rp_dest.path), 7)
rp_source.conn.os.rename(rp_source.path, rp_dest.path)
rp_dest.data = rp_source.data
rp_source.data = {'type': None}
......@@ -422,7 +422,7 @@ class RPath(RORPath):
self.conn = connection
self.index = index
self.base = base
self.path = apply(os.path.join, (base,) + self.index)
if base is not None: self.path = "/".join((base,) + index)
self.file = None
if data or base is None: self.data = data
else: self.setdata()
......@@ -445,7 +445,7 @@ class RPath(RORPath):
"""Reproduce RPath from __getstate__ output"""
self.conn = Globals.local_connection
self.index, self.base, self.data = rpath_state
self.path = apply(os.path.join, (self.base,) + self.index)
self.path = "/".join((self.base,) + self.index)
def setdata(self):
"""Create the data dictionary"""
......@@ -456,8 +456,7 @@ class RPath(RORPath):
data = {}
mode = statblock[stat.ST_MODE]
if stat.S_ISREG(mode):
type = 'reg'
if stat.S_ISREG(mode): type = 'reg'
elif stat.S_ISDIR(mode): type = 'dir'
elif stat.S_ISCHR(mode):
type = 'dev'
......@@ -509,7 +508,7 @@ class RPath(RORPath):
def quote_path(self):
"""Set path from quoted version of index"""
quoted_list = [FilenameMapping.quote(path) for path in self.index]
self.path = apply(os.path.join, [self.base] + quoted_list)
self.path = "/".join([self.base] + quoted_list)
self.setdata()
def chmod(self, permissions):
......@@ -526,7 +525,7 @@ class RPath(RORPath):
def setmtime(self, modtime):
"""Set only modtime (access time to present)"""
Log("Setting time of %s to %d" % (self.path, modtime), 7)
Log(lambda: "Setting time of %s to %d" % (self.path, modtime), 7)
self.conn.os.utime(self.path, (time.time(), modtime))
self.data['mtime'] = modtime
......@@ -594,7 +593,7 @@ class RPath(RORPath):
def isowner(self):
"""Return true if current process is owner of rp or root"""
uid = self.conn.Globals.get('process_uid')
uid = self.conn.os.getuid()
return uid == 0 or uid == self.data['uid']
def isgroup(self):
......@@ -666,8 +665,7 @@ class RPath(RORPath):
def append_path(self, ext, new_index = ()):
"""Like append, but add ext to path instead of to index"""
assert not self.index # doesn't make sense if index isn't ()
return self.__class__(self.conn, os.path.join(self.base, ext),
new_index)
return self.__class__(self.conn, "/".join((self.base, ext)), new_index)
def new_index(self, index):
"""Return similar RPath but with new index"""
......
......@@ -117,13 +117,15 @@ class Select:
if dsrpath.isdir():
for dsrp in self.iterate_in_dir(dsrpath, rec_func, sel_func):
yield dsrp
elif s == 2 and dsrpath.isdir(): # Directory is merely scanned
iid = self.iterate_in_dir(dsrpath, rec_func, sel_func)
try: first = iid.next()
except StopIteration: return # no files inside; skip dsrp
yield dsrpath
yield first
for dsrp in iid: yield dsrp
elif s == 2:
if dsrpath.isdir(): # Directory is merely scanned
iid = self.iterate_in_dir(dsrpath, rec_func, sel_func)
try: first = iid.next()
except StopIteration: return # no files inside; skip dsrp
yield dsrpath
yield first
for dsrp in iid: yield dsrp
else: assert 0, "Invalid selection result %s" % (str(s),)
def iterate_in_dir(self, dsrpath, rec_func, sel_func):
"""Iterate the dsrps in directory dsrpath."""
......@@ -137,7 +139,7 @@ class Select:
else:
for filename in Robust.listrp(dsrpath):
new_dsrp = Robust.check_common_error(
error_handler, dsrpath.append, filename)
error_handler, dsrpath.append, [filename])
if new_dsrp:
for dsrp in rec_func(new_dsrp, rec_func, sel_func):
yield dsrp
......
......@@ -32,21 +32,19 @@ class StatsObj:
('ChangedSourceSize', 1), ('ChangedMirrorSize', 1),
('IncrementFiles', None), ('IncrementFileSize', 1))
# Set all stats to None, indicating info not available
for attr in stat_attrs: locals()[attr] = None
# This is used in get_byte_summary_string below
byte_abbrev_list = ((1024*1024*1024*1024, "TB"),
(1024*1024*1024, "GB"),
(1024*1024, "MB"),
(1024, "KB"))
def __init__(self):
"""Set attributes to None"""
for attr in self.stat_attrs: self.__dict__[attr] = None
def get_stat(self, attribute):
"""Get a statistic"""
try: return self.__dict__[attribute]
except KeyError:
# this may be a hack, but seems no good way to get attrs in python
return eval("self.%s" % attribute)
return self.__dict__[attribute]
def set_stat(self, attr, value):
"""Set attribute to given value"""
......@@ -54,7 +52,7 @@ class StatsObj:
def increment_stat(self, attr):
"""Add 1 to value of attribute"""
self.__dict__[attr] = self.get_stat(attr) + 1
self.__dict__[attr] += 1
def get_stats_line(self, index, use_repr = 1):
"""Return one line abbreviated version of full stats string"""
......@@ -217,8 +215,12 @@ class StatsITR(IterTreeReducer, StatsObj):
This is subclassed by the mirroring and incrementing ITRs.
"""
# zero out file statistics
for attr in StatsObj.stat_file_attrs: locals()[attr] = 0
def __init__(self, *args):
"""StatsITR initializer - zero out statistics"""
attr_dict = self.__dict__
for attr in StatsObj.stat_file_attrs: attr_dict[attr] = 0
self.ElapsedTime = self.Filename = None
IterTreeReducer.__init__(self, *args)
def start_stats(self, mirror_dsrp):
"""Record status of mirror dsrp
......@@ -272,8 +274,7 @@ class StatsITR(IterTreeReducer, StatsObj):
def add_file_stats(self, subinstance):
"""Add all file statistics from subinstance to current totals"""
for attr in self.stat_file_attrs:
self.set_stat(attr,
self.get_stat(attr) + subinstance.get_stat(attr))
self.__dict__[attr] += subinstance.__dict__[attr]
class Stats:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment