Commit 7c562fd1 authored by 's avatar

Untabified.

parent c58c4d18
......@@ -26,55 +26,55 @@ import stat # v
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class file_logger:
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
self.file = sys.stdout
else:
self.file = open (file, mode)
else:
self.file = file
self.do_flush = flush
def __repr__ (self):
return '<file logger: %s>' % self.file
def write (self, data):
self.file.write (data)
self.maybe_flush()
def writeline (self, line):
self.file.writeline (line)
self.maybe_flush()
def writelines (self, lines):
self.file.writelines (lines)
self.maybe_flush()
def maybe_flush (self):
if self.do_flush:
self.file.flush()
def flush (self):
self.file.flush()
def softspace (self, *args):
pass
def log (self, message):
if message[-1] not in ('\r', '\n'):
self.write (message + '\n')
else:
self.write (message)
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
self.file = sys.stdout
else:
self.file = open (file, mode)
else:
self.file = file
self.do_flush = flush
def __repr__ (self):
return '<file logger: %s>' % self.file
def write (self, data):
self.file.write (data)
self.maybe_flush()
def writeline (self, line):
self.file.writeline (line)
self.maybe_flush()
def writelines (self, lines):
self.file.writelines (lines)
self.maybe_flush()
def maybe_flush (self):
if self.do_flush:
self.file.flush()
def flush (self):
self.file.flush()
def softspace (self, *args):
pass
def log (self, message):
if message[-1] not in ('\r', '\n'):
self.write (message + '\n')
else:
self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
......@@ -83,64 +83,64 @@ class file_logger:
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
......@@ -155,108 +155,108 @@ import m_syslog
syslog_logger = m_syslog.syslog_client
class syslog_logger (m_syslog.syslog_client):
def __init__ (self, address, facility='user'):
m_syslog.syslog_client.__init__ (self, address)
self.facility = m_syslog.facility_names[facility]
self.address=address
def __repr__ (self):
return '<syslog logger address=%s>' % (repr(self.address))
def log (self, message):
m_syslog.syslog_client.log (
self,
message,
facility=self.facility,
priority=m_syslog.LOG_INFO
)
def __init__ (self, address, facility='user'):
m_syslog.syslog_client.__init__ (self, address)
self.facility = m_syslog.facility_names[facility]
self.address=address
def __repr__ (self):
return '<syslog logger address=%s>' % (repr(self.address))
def log (self, message):
m_syslog.syslog_client.log (
self,
message,
facility=self.facility,
priority=m_syslog.LOG_INFO
)
# log to a stream socket, asynchronously
class socket_logger (asynchat.async_chat):
def __init__ (self, address):
def __init__ (self, address):
if type(address) == type(''):
self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
if type(address) == type(''):
self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.connect (address)
self.address = address
def __repr__ (self):
return '<socket logger: address=%s>' % (self.address)
self.connect (address)
self.address = address
def __repr__ (self):
return '<socket logger: address=%s>' % (self.address)
def log (self, message):
if message[-2:] != '\r\n':
self.socket.push (message + '\r\n')
else:
self.socket.push (message)
def log (self, message):
if message[-2:] != '\r\n':
self.socket.push (message + '\r\n')
else:
self.socket.push (message)
# log to multiple places
class multi_logger:
def __init__ (self, loggers):
self.loggers = loggers
def __init__ (self, loggers):
self.loggers = loggers
def __repr__ (self):
return '<multi logger: %s>' % (repr(self.loggers))
def __repr__ (self):
return '<multi logger: %s>' % (repr(self.loggers))
def log (self, message):
for logger in self.loggers:
logger.log (message)
def log (self, message):
for logger in self.loggers:
logger.log (message)
class resolving_logger:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def __init__ (self, resolver, logger):
self.resolver = resolver
self.logger = logger
class logger_thunk:
def __init__ (self, message, logger):
self.message = message
self.logger = logger
def __call__ (self, host, ttl, answer):
if not answer:
answer = host
self.logger.log ('%s%s' % (answer, self.message))
def log (self, ip, message):
self.resolver.resolve_ptr (
ip,
self.logger_thunk (
message,
self.logger
)
)
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def __init__ (self, resolver, logger):
self.resolver = resolver
self.logger = logger
class logger_thunk:
def __init__ (self, message, logger):
self.message = message
self.logger = logger
def __call__ (self, host, ttl, answer):
if not answer:
answer = host
self.logger.log ('%s%s' % (answer, self.message))
def log (self, ip, message):
self.resolver.resolve_ptr (
ip,
self.logger_thunk (
message,
self.logger
)
)
class unresolving_logger:
"Just in case you don't want to resolve"
def __init__ (self, logger):
self.logger = logger
"Just in case you don't want to resolve"
def __init__ (self, logger):
self.logger = logger
def log (self, ip, message):
self.logger.log ('%s%s' % (ip, message))
def log (self, ip, message):
self.logger.log ('%s%s' % (ip, message))
def strip_eol (line):
while line and line[-1] in '\r\n':
line = line[:-1]
return line
while line and line[-1] in '\r\n':
line = line[:-1]
return line
class tail_logger:
"Keep track of the last <size> log messages"
def __init__ (self, logger, size=500):
self.size = size
self.logger = logger
self.messages = []
def log (self, message):
self.messages.append (strip_eol (message))
if len (self.messages) > self.size:
del self.messages[0]
self.logger.log (message)
"Keep track of the last <size> log messages"
def __init__ (self, logger, size=500):
self.size = size
self.logger = logger
self.messages = []
def log (self, message):
self.messages.append (strip_eol (message))
if len (self.messages) > self.size:
del self.messages[0]
self.logger.log (message)
......@@ -165,12 +165,12 @@ class ZCatalog(Folder, Persistent, Implicit):
'manage_addIndex', 'manage_delIndexes', 'manage_main',],
['Manager']),
('Search ZCatalog',
['searchResults', '__call__', 'uniqueValuesFor',
'getpath', 'schema', 'indexes', 'index_objects',
'all_meta_types', 'valid_roles', 'resolve_url',
'getobject'],
['Anonymous', 'Manager']),
('Search ZCatalog',
['searchResults', '__call__', 'uniqueValuesFor',
'getpath', 'schema', 'indexes', 'index_objects',
'all_meta_types', 'valid_roles', 'resolve_url',
'getobject'],
['Anonymous', 'Manager']),
)
......@@ -213,7 +213,7 @@ class ZCatalog(Folder, Persistent, Implicit):
""" edit the catalog """
self.threshold = threshold
RESPONSE.redirect(URL1 + '/manage_main?manage_tabs_message=Catalog%20Changed')
RESPONSE.redirect(URL1 + '/manage_main?manage_tabs_message=Catalog%20Changed')
def manage_subbingToggle(self, REQUEST, RESPONSE, URL1):
......@@ -223,7 +223,7 @@ class ZCatalog(Folder, Persistent, Implicit):
else:
self.threshold = 10000
RESPONSE.redirect(URL1 + '/manage_catalogStatus?manage_tabs_message=Catalog%20Changed')
RESPONSE.redirect(URL1 + '/manage_catalogStatus?manage_tabs_message=Catalog%20Changed')
def manage_catalogObject(self, REQUEST, RESPONSE, URL1, urls=None):
......@@ -234,7 +234,7 @@ class ZCatalog(Folder, Persistent, Implicit):
if obj is not None:
self.catalog_object(obj, url)
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Object%20Cataloged')
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Object%20Cataloged')
def manage_uncatalogObject(self, REQUEST, RESPONSE, URL1, urls=None):
......@@ -242,41 +242,41 @@ class ZCatalog(Folder, Persistent, Implicit):
if urls:
for url in urls:
self.uncatalog_object(url)
self.uncatalog_object(url)
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Object%20Uncataloged')
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Object%20Uncataloged')
def manage_catalogReindex(self, REQUEST, RESPONSE, URL1):
""" clear the catalog, then re-index everything """
elapse = time.time()
c_elapse = time.clock()
elapse = time.time()
c_elapse = time.clock()
paths = tuple(self._catalog.paths.values())
self._catalog.clear()
self._catalog.clear()
for p in paths:
obj = self.resolve_url(p, REQUEST)
if obj is not None:
self.catalog_object(obj, p)
for p in paths:
obj = self.resolve_url(p, REQUEST)
if obj is not None:
self.catalog_object(obj, p)
elapse = time.time() - elapse
c_elapse = time.clock() - c_elapse
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=' +
urllib.quote('Catalog Updated<br>Total time: %s<br>Total CPU time: %s' % (`elapse`, `c_elapse`)))
elapse = time.time() - elapse
c_elapse = time.clock() - c_elapse
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=' +
urllib.quote('Catalog Updated<br>Total time: %s<br>Total CPU time: %s' % (`elapse`, `c_elapse`)))
def manage_catalogClear(self, REQUEST, RESPONSE, URL1):
""" clears the whole enchelada """
self._catalog.clear()
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Catalog%20Cleared')
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=Catalog%20Cleared')
def manage_catalogFoundItems(self, REQUEST, RESPONSE, URL2, URL1,
obj_metatypes=None,
obj_metatypes=None,
obj_ids=None, obj_searchterm=None,
obj_expr=None, obj_mtime=None,
obj_mspec=None, obj_roles=None,
......@@ -285,9 +285,9 @@ class ZCatalog(Folder, Persistent, Implicit):
""" Find object according to search criteria and Catalog them
"""
elapse = time.time()
c_elapse = time.clock()
elapse = time.time()
c_elapse = time.clock()
words = 0
path=string.split(URL2, REQUEST.script)[1][1:]
......@@ -305,38 +305,38 @@ class ZCatalog(Folder, Persistent, Implicit):
apply_func=self.catalog_object,
apply_path=path)
elapse = time.time() - elapse
c_elapse = time.clock() - c_elapse
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=' +
urllib.quote('Catalog Updated<br>Total time: %s<br>Total CPU time: %s' % (`elapse`, `c_elapse`)))
elapse = time.time() - elapse
c_elapse = time.clock() - c_elapse
RESPONSE.redirect(URL1 + '/manage_catalogView?manage_tabs_message=' +
urllib.quote('Catalog Updated<br>Total time: %s<br>Total CPU time: %s' % (`elapse`, `c_elapse`)))
def manage_addColumn(self, name, REQUEST, RESPONSE, URL1):
""" add a column """
self._catalog.addColumn(name)
RESPONSE.redirect(URL1 + '/manage_catalogSchema?manage_tabs_message=Column%20Added')
RESPONSE.redirect(URL1 + '/manage_catalogSchema?manage_tabs_message=Column%20Added')
def manage_delColumns(self, names, REQUEST, RESPONSE, URL1):
""" del a column """
for name in names:
self._catalog.delColumn(name)
RESPONSE.redirect(URL1 + '/manage_catalogSchema?manage_tabs_message=Column%20Deleted')
RESPONSE.redirect(URL1 + '/manage_catalogSchema?manage_tabs_message=Column%20Deleted')
def manage_addIndex(self, name, type, REQUEST, RESPONSE, URL1):
""" add an index """
self._catalog.addIndex(name, type)
RESPONSE.redirect(URL1 + '/manage_catalogIndexes?manage_tabs_message=Index%20Added')
RESPONSE.redirect(URL1 + '/manage_catalogIndexes?manage_tabs_message=Index%20Added')
def manage_delIndexes(self, names, REQUEST, RESPONSE, URL1):
""" del an index """
for name in names:
self._catalog.delIndex(name)
RESPONSE.redirect(URL1 + '/manage_catalogIndexes?manage_tabs_message=Index%20Deleted')
RESPONSE.redirect(URL1 + '/manage_catalogIndexes?manage_tabs_message=Index%20Deleted')
def catalog_object(self, obj, uid):
......@@ -372,7 +372,7 @@ class ZCatalog(Folder, Persistent, Implicit):
"""
if REQUEST is None:
REQUEST=self.REQUEST
return self.resolve_url(self.getpath(rid), REQUEST)
return self.resolve_url(self.getpath(rid), REQUEST)
def schema(self):
return self._catalog.schema.keys()
......
......@@ -26,55 +26,55 @@ import stat # v
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class file_logger:
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
self.file = sys.stdout
else:
self.file = open (file, mode)
else:
self.file = file
self.do_flush = flush
def __repr__ (self):
return '<file logger: %s>' % self.file
def write (self, data):
self.file.write (data)
self.maybe_flush()
def writeline (self, line):
self.file.writeline (line)
self.maybe_flush()
def writelines (self, lines):
self.file.writelines (lines)
self.maybe_flush()
def maybe_flush (self):
if self.do_flush:
self.file.flush()
def flush (self):
self.file.flush()
def softspace (self, *args):
pass
def log (self, message):
if message[-1] not in ('\r', '\n'):
self.write (message + '\n')
else:
self.write (message)
# pass this either a path or a file object.
def __init__ (self, file, flush=1, mode='a'):
if type(file) == type(''):
if (file == '-'):
import sys
self.file = sys.stdout
else:
self.file = open (file, mode)
else:
self.file = file
self.do_flush = flush
def __repr__ (self):
return '<file logger: %s>' % self.file
def write (self, data):
self.file.write (data)
self.maybe_flush()
def writeline (self, line):
self.file.writeline (line)
self.maybe_flush()
def writelines (self, lines):
self.file.writelines (lines)
self.maybe_flush()
def maybe_flush (self):
if self.do_flush:
self.file.flush()
def flush (self):
self.file.flush()
def softspace (self, *args):
pass
def log (self, message):
if message[-1] not in ('\r', '\n'):
self.write (message + '\n')
else:
self.write (message)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
......@@ -83,64 +83,64 @@ class file_logger:
# would take time, during which medusa would do nothing else.
class rotating_file_logger (file_logger):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def __init__ (self, file, freq=None, maxsize=None, flush=1, mode='a'):
self.filename = file
self.mode = mode
self.file = open (file, mode)
self.freq = freq
self.maxsize = maxsize
self.rotate_when = self.next_backup(self.freq)
self.do_flush = flush
def __repr__ (self):
return '<rotating-file logger: %s>' % self.file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def next_backup (self, freq):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
if freq == 'daily':
return time.mktime(yr,mo,day+1, 0,0,0, 0,0,-1)
elif freq == 'weekly':
return time.mktime(yr,mo,day-wd+7, 0,0,0, 0,0,-1) # wd(monday)==0
elif freq == 'monthly':
return time.mktime(yr,mo+1,1, 0,0,0, 0,0,-1)
else:
return None # not a date-based backup
def maybe_flush (self): # rotate first if necessary
self.maybe_rotate()
if self.do_flush: # from file_logger()
self.file.flush()
def maybe_rotate (self):
if self.freq and time.time() > self.rotate_when:
self.rotate()
self.rotate_when = self.next_backup(self.freq)
elif self.maxsize: # rotate when we get too big
try:
if os.stat(self.filename)[stat.ST_SIZE] > self.maxsize:
self.rotate()
except os.error: # file not found, probably
self.rotate() # will create a new file
def rotate (self):
(yr, mo, day, hr, min, sec, wd, jday, dst) = time.localtime(time.time())
try:
self.file.close()
newname = '%s.ends%04d%02d%02d' % (self.filename, yr, mo, day)
try:
open(newname, "r").close() # check if file exists
newname = newname + "-%02d%02d%02d" % (hr, min, sec)
except: # YEARMODY is unique
pass
os.rename(self.filename, newname)
self.file = open(self.filename, self.mode)
except:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
......@@ -155,108 +155,108 @@ import m_syslog
syslog_logger = m_syslog.syslog_client
class syslog_logger (m_syslog.syslog_client):
def __init__ (self, address, facility='user'):
m_syslog.syslog_client.__init__ (self, address)
self.facility = m_syslog.facility_names[facility]
self.address=address
def __repr__ (self):
return '<syslog logger address=%s>' % (repr(self.address))
def log (self, message):
m_syslog.syslog_client.log (
self,
message,
facility=self.facility,
priority=m_syslog.LOG_INFO
)
def __init__ (self, address, facility='user'):
m_syslog.syslog_client.__init__ (self, address)
self.facility = m_syslog.facility_names[facility]
self.address=address
def __repr__ (self):
return '<syslog logger address=%s>' % (repr(self.address))
def log (self, message):
m_syslog.syslog_client.log (
self,
message,
facility=self.facility,
priority=m_syslog.LOG_INFO
)
# log to a stream socket, asynchronously
class socket_logger (asynchat.async_chat):
def __init__ (self, address):
def __init__ (self, address):
if type(address) == type(''):
self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
if type(address) == type(''):
self.create_socket (socket.AF_UNIX, socket.SOCK_STREAM)
else:
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.connect (address)
self.address = address
def __repr__ (self):
return '<socket logger: address=%s>' % (self.address)
self.connect (address)
self.address = address
def __repr__ (self):
return '<socket logger: address=%s>' % (self.address)
def log (self, message):
if message[-2:] != '\r\n':
self.socket.push (message + '\r\n')
else:
self.socket.push (message)
def log (self, message):
if message[-2:] != '\r\n':
self.socket.push (message + '\r\n')
else:
self.socket.push (message)
# log to multiple places
class multi_logger:
def __init__ (self, loggers):
self.loggers = loggers
def __init__ (self, loggers):
self.loggers = loggers
def __repr__ (self):
return '<multi logger: %s>' % (repr(self.loggers))
def __repr__ (self):
return '<multi logger: %s>' % (repr(self.loggers))
def log (self, message):
for logger in self.loggers:
logger.log (message)
def log (self, message):
for logger in self.loggers:
logger.log (message)
class resolving_logger:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def __init__ (self, resolver, logger):
self.resolver = resolver
self.logger = logger
class logger_thunk:
def __init__ (self, message, logger):
self.message = message
self.logger = logger
def __call__ (self, host, ttl, answer):
if not answer:
answer = host
self.logger.log ('%s%s' % (answer, self.message))
def log (self, ip, message):
self.resolver.resolve_ptr (
ip,
self.logger_thunk (
message,
self.logger
)
)
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def __init__ (self, resolver, logger):
self.resolver = resolver
self.logger = logger
class logger_thunk:
def __init__ (self, message, logger):
self.message = message
self.logger = logger
def __call__ (self, host, ttl, answer):
if not answer:
answer = host
self.logger.log ('%s%s' % (answer, self.message))
def log (self, ip, message):
self.resolver.resolve_ptr (
ip,
self.logger_thunk (
message,
self.logger
)
)
class unresolving_logger:
"Just in case you don't want to resolve"
def __init__ (self, logger):
self.logger = logger
"Just in case you don't want to resolve"
def __init__ (self, logger):
self.logger = logger
def log (self, ip, message):
self.logger.log ('%s%s' % (ip, message))
def log (self, ip, message):
self.logger.log ('%s%s' % (ip, message))
def strip_eol (line):
while line and line[-1] in '\r\n':
line = line[:-1]
return line
while line and line[-1] in '\r\n':
line = line[:-1]
return line
class tail_logger:
"Keep track of the last <size> log messages"
def __init__ (self, logger, size=500):
self.size = size
self.logger = logger
self.messages = []
def log (self, message):
self.messages.append (strip_eol (message))
if len (self.messages) > self.size:
del self.messages[0]
self.logger.log (message)
"Keep track of the last <size> log messages"
def __init__ (self, logger, size=500):
self.size = size
self.logger = logger
self.messages = []
def log (self, message):
self.messages.append (strip_eol (message))
if len (self.messages) > self.size:
del self.messages[0]
self.logger.log (message)
......@@ -86,15 +86,15 @@ class Error:
class ProtocolError(Error):
# indicates an HTTP protocol error
def __init__(self, url, errcode, errmsg, headers):
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
"<ProtocolError for %s: %s %s>" %
(self.url, self.errcode, self.errmsg)
)
return (
"<ProtocolError for %s: %s %s>" %
(self.url, self.errcode, self.errmsg)
)
class ResponseError(Error):
# indicates a broken response package
......@@ -103,13 +103,13 @@ class ResponseError(Error):
class Fault(Error):
# indicates a XML-RPC fault package
def __init__(self, faultCode, faultString, **extra):
self.faultCode = faultCode
self.faultString = faultString
self.faultCode = faultCode
self.faultString = faultString
def __repr__(self):
return (
"<Fault %s: %s>" %
(self.faultCode, repr(self.faultString))
)
return (
"<Fault %s: %s>" %
(self.faultCode, repr(self.faultString))
)
# --------------------------------------------------------------------
......@@ -121,22 +121,22 @@ class Fault(Error):
class Boolean:
def __init__(self, value = 0):
self.value = (value != 0)
self.value = (value != 0)
def encode(self, out):
out.write("<value><boolean>%d</boolean></value>\n" % self.value)
out.write("<value><boolean>%d</boolean></value>\n" % self.value)
def __repr__(self):
if self.value:
return "<Boolean True at %x>" % id(self)
else:
return "<Boolean False at %x>" % id(self)
if self.value:
return "<Boolean True at %x>" % id(self)
else:
return "<Boolean False at %x>" % id(self)
def __int__(self):
return self.value
return self.value
def __nonzero__(self):
return self.value
return self.value
True, False = Boolean(1), Boolean(0)
......@@ -148,23 +148,23 @@ True, False = Boolean(1), Boolean(0)
class DateTime:
def __init__(self, value = 0):
t = type(value)
if t is not StringType:
if t is not TupleType:
value = time.localtime(value)
value = time.strftime("%Y%m%dT%H:%M:%S", value)
self.value = value
t = type(value)
if t is not StringType:
if t is not TupleType:
value = time.localtime(value)
value = time.strftime("%Y%m%dT%H:%M:%S", value)
self.value = value
def __repr__(self):
return "<DateTime %s at %x>" % (self.value, id(self))
return "<DateTime %s at %x>" % (self.value, id(self))
def decode(self, data):
self.value = string.strip(data)
self.value = string.strip(data)
def encode(self, out):
out.write("<value><dateTime.iso8601>")
out.write(self.value)
out.write("</dateTime.iso8601></value>\n")
out.write("<value><dateTime.iso8601>")
out.write(self.value)
out.write("</dateTime.iso8601></value>\n")
#
# binary data wrapper (NOTE: this is an extension to Userland's
......@@ -173,17 +173,17 @@ class DateTime:
class Binary:
def __init__(self, data=None):
self.data = data
self.data = data
def decode(self, data):
import base64
self.data = base64.decodestring(data)
import base64
self.data = base64.decodestring(data)
def encode(self, out):
import base64, StringIO
out.write("<value><base64>\n")
base64.encode(StringIO.StringIO(self.data), out)
out.write("</base64></value>\n")
import base64, StringIO
out.write("<value><base64>\n")
base64.encode(StringIO.StringIO(self.data), out)
out.write("</base64></value>\n")
WRAPPERS = DateTime, Binary, Boolean
......@@ -194,37 +194,37 @@ WRAPPERS = DateTime, Binary, Boolean
if sgmlop:
class FastParser:
# sgmlop based XML parser. this is typically 15x faster
# than SlowParser...
def __init__(self, target):
# setup callbacks
self.finish_starttag = target.start
self.finish_endtag = target.end
self.handle_data = target.data
# activate parser
self.parser = sgmlop.XMLParser()
self.parser.register(self)
self.feed = self.parser.feed
self.entity = {
"amp": "&", "gt": ">", "lt": "<",
"apos": "'", "quot": '"'
}
def close(self):
try:
self.parser.close()
finally:
self.parser = None # nuke circular reference
def handle_entityref(self, entity):
# <string> entity
try:
self.handle_data(self.entity[entity])
except KeyError:
self.handle_data("&%s;" % entity)
# sgmlop based XML parser. this is typically 15x faster
# than SlowParser...
def __init__(self, target):
# setup callbacks
self.finish_starttag = target.start
self.finish_endtag = target.end
self.handle_data = target.data
# activate parser
self.parser = sgmlop.XMLParser()
self.parser.register(self)
self.feed = self.parser.feed
self.entity = {
"amp": "&", "gt": ">", "lt": "<",
"apos": "'", "quot": '"'
}
def close(self):
try:
self.parser.close()
finally:
self.parser = None # nuke circular reference
def handle_entityref(self, entity):
# <string> entity
try:
self.handle_data(self.entity[entity])
except KeyError:
self.handle_data("&%s;" % entity)
else:
......@@ -235,10 +235,10 @@ class SlowParser(xmllib.XMLParser):
# Python's standard library
def __init__(self, target):
self.unknown_starttag = target.start
self.handle_data = target.data
self.unknown_endtag = target.end
xmllib.XMLParser.__init__(self)
self.unknown_starttag = target.start
self.handle_data = target.data
self.unknown_endtag = target.end
xmllib.XMLParser.__init__(self)
# --------------------------------------------------------------------
......@@ -257,89 +257,89 @@ class Marshaller:
# that's perfectly ok.
def __init__(self):
self.memo = {}
self.data = None
self.memo = {}
self.data = None
dispatch = {}
def dumps(self, values):
self.__out = []
self.write = write = self.__out.append
if isinstance(values, Fault):
# fault instance
write("<fault>\n")
self.__dump(vars(values))
write("</fault>\n")
else:
# parameter block
write("<params>\n")
for v in values:
write("<param>\n")
self.__dump(v)
write("</param>\n")
write("</params>\n")
result = string.join(self.__out, "")
del self.__out, self.write # don't need this any more
return result
self.__out = []
self.write = write = self.__out.append
if isinstance(values, Fault):
# fault instance
write("<fault>\n")
self.__dump(vars(values))
write("</fault>\n")
else:
# parameter block
write("<params>\n")
for v in values:
write("<param>\n")
self.__dump(v)
write("</param>\n")
write("</params>\n")
result = string.join(self.__out, "")
del self.__out, self.write # don't need this any more
return result
def __dump(self, value):
try:
f = self.dispatch[type(value)]
except KeyError:
raise TypeError, "cannot marshal %s objects" % type(value)
else:
f(self, value)
try:
f = self.dispatch[type(value)]
except KeyError:
raise TypeError, "cannot marshal %s objects" % type(value)
else:
f(self, value)
def dump_int(self, value):
self.write("<value><int>%s</int></value>\n" % value)
self.write("<value><int>%s</int></value>\n" % value)
dispatch[IntType] = dump_int
def dump_double(self, value):
self.write("<value><double>%s</double></value>\n" % value)
self.write("<value><double>%s</double></value>\n" % value)
dispatch[FloatType] = dump_double
def dump_string(self, value):
self.write("<value><string>%s</string></value>\n" % escape(value))
self.write("<value><string>%s</string></value>\n" % escape(value))
dispatch[StringType] = dump_string
def container(self, value):
if value:
i = id(value)
if self.memo.has_key(i):
raise TypeError, "cannot marshal recursive data structures"
self.memo[i] = None
if value:
i = id(value)
if self.memo.has_key(i):
raise TypeError, "cannot marshal recursive data structures"
self.memo[i] = None
def dump_array(self, value):
self.container(value)
write = self.write
write("<value><array><data>\n")
for v in value:
self.__dump(v)
write("</data></array></value>\n")
self.container(value)
write = self.write
write("<value><array><data>\n")
for v in value:
self.__dump(v)
write("</data></array></value>\n")
dispatch[TupleType] = dump_array
dispatch[ListType] = dump_array
def dump_struct(self, value):
self.container(value)
write = self.write
write("<value><struct>\n")
for k, v in value.items():
write("<member>\n")
if type(k) is not StringType:
raise TypeError, "dictionary key must be string"
write("<name>%s</name>\n" % escape(k))
self.__dump(v)
write("</member>\n")
write("</struct></value>\n")
self.container(value)
write = self.write
write("<value><struct>\n")
for k, v in value.items():
write("<member>\n")
if type(k) is not StringType:
raise TypeError, "dictionary key must be string"
write("<name>%s</name>\n" % escape(k))
self.__dump(v)
write("</member>\n")
write("</struct></value>\n")
dispatch[DictType] = dump_struct
def dump_instance(self, value):
# check for special wrappers
if value.__class__ in WRAPPERS:
value.encode(self)
else:
# store instance attributes as a struct (really?)
self.dump_struct(value.__dict__)
# check for special wrappers
if value.__class__ in WRAPPERS:
value.encode(self)
else:
# store instance attributes as a struct (really?)
self.dump_struct(value.__dict__)
dispatch[InstanceType] = dump_instance
......@@ -356,129 +356,129 @@ class Unmarshaller:
# that's perfectly ok.
def __init__(self):
self._type = None
self._stack = []
self._type = None
self._stack = []
self._marks = []
self._data = []
self._methodname = None
self.append = self._stack.append
self._data = []
self._methodname = None
self.append = self._stack.append
def close(self):
# return response code and the actual response
if self._type is None or self._marks:
raise ResponseError()
if self._type == "fault":
raise apply(Fault, (), self._stack[0])
return tuple(self._stack)
# return response code and the actual response
if self._type is None or self._marks:
raise ResponseError()
if self._type == "fault":
raise apply(Fault, (), self._stack[0])
return tuple(self._stack)
def getmethodname(self):
return self._methodname
return self._methodname
#
# event handlers
def start(self, tag, attrs):
# prepare to handle this element
if tag in ("array", "struct"):
self._marks.append(len(self._stack))
self._data = []
self._value = (tag == "value")
# prepare to handle this element
if tag in ("array", "struct"):
self._marks.append(len(self._stack))
self._data = []
self._value = (tag == "value")
def data(self, text):
self._data.append(text)
self._data.append(text)
dispatch = {}
def end(self, tag):
# call the appropriate end tag handler
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self)
# call the appropriate end tag handler
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self)
#
# element decoders
def end_boolean(self, join=string.join):
value = join(self._data, "")
if value == "0":
self.append(False)
elif value == "1":
self.append(True)
else:
raise TypeError, "bad boolean value"
self._value = 0
value = join(self._data, "")
if value == "0":
self.append(False)
elif value == "1":
self.append(True)
else:
raise TypeError, "bad boolean value"
self._value = 0
dispatch["boolean"] = end_boolean
def end_int(self, join=string.join):
self.append(int(join(self._data, "")))
self._value = 0
self.append(int(join(self._data, "")))
self._value = 0
dispatch["i4"] = end_int
dispatch["int"] = end_int
def end_double(self, join=string.join):
self.append(float(join(self._data, "")))
self._value = 0
self.append(float(join(self._data, "")))
self._value = 0
dispatch["double"] = end_double
def end_string(self, join=string.join):
self.append(join(self._data, ""))
self._value = 0
self.append(join(self._data, ""))
self._value = 0
dispatch["string"] = end_string
dispatch["name"] = end_string # struct keys are always strings
def end_array(self):
mark = self._marks[-1]
del self._marks[-1]
# map arrays to Python lists
del self._marks[-1]
# map arrays to Python lists
self._stack[mark:] = [self._stack[mark:]]
self._value = 0
self._value = 0
dispatch["array"] = end_array
def end_struct(self):
mark = self._marks[-1]
del self._marks[-1]
# map structs to Python dictionaries
del self._marks[-1]
# map structs to Python dictionaries
dict = {}
items = self._stack[mark:]
for i in range(0, len(items), 2):
dict[items[i]] = items[i+1]
self._stack[mark:] = [dict]
self._value = 0
self._value = 0
dispatch["struct"] = end_struct
def end_base64(self, join=string.join):
value = Binary()
value.decode(join(self._data, ""))
self.append(value)
self._value = 0
value = Binary()
value.decode(join(self._data, ""))
self.append(value)
self._value = 0
dispatch["base64"] = end_base64
def end_dateTime(self, join=string.join):
value = DateTime()
value.decode(join(self._data, ""))
self.append(value)
value = DateTime()
value.decode(join(self._data, ""))
self.append(value)
dispatch["dateTime.iso8601"] = end_dateTime
def end_value(self):
# if we stumble upon an value element with no internal
# elements, treat it as a string element
if self._value:
self.end_string()
# if we stumble upon an value element with no internal
# elements, treat it as a string element
if self._value:
self.end_string()
dispatch["value"] = end_value
def end_params(self):
self._type = "params"
self._type = "params"
dispatch["params"] = end_params
def end_fault(self):
self._type = "fault"
self._type = "fault"
dispatch["fault"] = end_fault
def end_methodName(self, join=string.join):
self._methodname = join(self._data, "")
self._methodname = join(self._data, "")
dispatch["methodName"] = end_methodName
......@@ -490,36 +490,36 @@ def getparser():
# unmarshalling object. return both objects.
target = Unmarshaller()
if FastParser:
return FastParser(target), target
return FastParser(target), target
return SlowParser(target), target
def dumps(params, methodname=None, methodresponse=None):
# convert a tuple or a fault object to an XML-RPC packet
assert type(params) == TupleType or isinstance(params, Fault),\
"argument must be tuple or Fault instance"
"argument must be tuple or Fault instance"
m = Marshaller()
data = m.dumps(params)
# standard XML-RPC wrappings
if methodname:
# a method call
data = (
"<?xml version='1.0'?>\n"
"<methodCall>\n"
"<methodName>%s</methodName>\n"
"%s\n"
"</methodCall>\n" % (methodname, data)
)
# a method call
data = (
"<?xml version='1.0'?>\n"
"<methodCall>\n"
"<methodName>%s</methodName>\n"
"%s\n"
"</methodCall>\n" % (methodname, data)
)
elif methodresponse or isinstance(params, Fault):
# a method response
data = (
"<?xml version='1.0'?>\n"
"<methodResponse>\n"
"%s\n"
"</methodResponse>\n" % data
)
# a method response
data = (
"<?xml version='1.0'?>\n"
"<methodResponse>\n"
"%s\n"
"</methodResponse>\n" % data
)
return data
def loads(data):
......@@ -539,12 +539,12 @@ class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
return self.__send(self.__name, args)
class Transport:
......@@ -554,98 +554,98 @@ class Transport:
user_agent = "xmlrpclib.py/%s (by www.pythonware.com)" % __version__
def request(self, host, handler, request_body):
# issue XML-RPC request
# issue XML-RPC request
import httplib
h = httplib.HTTP(host)
h.putrequest("POST", handler)
import httplib
h = httplib.HTTP(host)
h.putrequest("POST", handler)
# required by HTTP/1.1
h.putheader("Host", host)
# required by HTTP/1.1
h.putheader("Host", host)
# required by XML-RPC
h.putheader("User-Agent", self.user_agent)
h.putheader("Content-Type", "text/xml")
h.putheader("Content-Length", str(len(request_body)))
# required by XML-RPC
h.putheader("User-Agent", self.user_agent)
h.putheader("Content-Type", "text/xml")
h.putheader("Content-Length", str(len(request_body)))
h.endheaders()
h.endheaders()
if request_body:
h.send(request_body)
if request_body:
h.send(request_body)
errcode, errmsg, headers = h.getreply()
errcode, errmsg, headers = h.getreply()
if errcode != 200:
raise ProtocolError(
host + handler,
errcode, errmsg,
headers
)
if errcode != 200:
raise ProtocolError(
host + handler,
errcode, errmsg,
headers
)
return self.parse_response(h.getfile())
return self.parse_response(h.getfile())
def parse_response(self, f):
# read response from input file, and parse it
# read response from input file, and parse it
p, u = getparser()
p, u = getparser()
while 1:
response = f.read(1024)
if not response:
break
p.feed(response)
while 1:
response = f.read(1024)
if not response:
break
p.feed(response)
f.close()
p.close()
f.close()
p.close()
return u.close()
return u.close()
class Server:
"""Represents a connection to an XML-RPC server"""
def __init__(self, uri, transport=None):
# establish a "logical" server connection
# establish a "logical" server connection
# get the url
type, uri = urllib.splittype(uri)
if type != "http":
raise IOError, "unsupported XML-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
# get the url
type, uri = urllib.splittype(uri)
if type != "http":
raise IOError, "unsupported XML-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
transport = Transport()
self.__transport = transport
if transport is None:
transport = Transport()
self.__transport = transport
def __request(self, methodname, params):
# call a method on the remote server
# call a method on the remote server
request = dumps(params, methodname)
request = dumps(params, methodname)
response = self.__transport.request(
self.__host,
self.__handler,
request
)
response = self.__transport.request(
self.__host,
self.__handler,
request
)
if len(response) == 1:
return response[0]
if len(response) == 1:
return response[0]
return response
return response
def __repr__(self):
return (
"<Server proxy for %s%s>" %
(self.__host, self.__handler)
)
return (
"<Server proxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# magic method dispatcher
return _Method(self.__request, name)
if __name__ == "__main__":
......@@ -658,6 +658,6 @@ if __name__ == "__main__":
print server
try:
print server.examples.getStateName(41)
print server.examples.getStateName(41)
except Error, v:
print "ERROR", v
print "ERROR", v
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment