Commit 24d566b4 authored by Hanno Schlichting's avatar Hanno Schlichting

Split out Lifetime, webdav and ZServer packages into a ZServer project.

Moved to zopefoundation/ZServer@23f8ec7b39c9558bbce83e66b700b8c26693255a.
parent d1536a57
......@@ -28,6 +28,8 @@ Features Added
Restructuring
+++++++++++++
- Split out Lifetime, webdav and ZServer packages into a ZServer project.
- Move webdav's EtagSupport, Lockable and LockItem into OFS.
- Split `Products.TemporaryFolder` and `Products.ZODBMountPoint` into
......
......@@ -18,6 +18,7 @@ parts =
requirements
sources-dir = develop
auto-checkout =
ZServer
[test]
......@@ -72,6 +73,7 @@ eggs =
Products.ZCTextIndex
Record
tempstorage
ZServer
zLOG
......
......@@ -33,6 +33,7 @@ Products.ZCTextIndex = git ${remotes:github}/Products.ZCTextIndex pushurl=${remo
Record = git ${remotes:github}/Record pushurl=${remotes:github_push}/Record
tempstorage = git ${remotes:github}/tempstorage pushurl=${remotes:github_push}/tempstorage
zLOG = git ${remotes:github}/zLOG pushurl=${remotes:github_push}/zLOG
ZServer = git ${remotes:github}/ZServer pushurl=${remotes:github_push}/ZServer
# ZTK
zope.annotation = git ${remotes:github}/zope.annotation
......
import asyncore
import logging
import sys
import time
from Signals.threads import dump_threads
logger = logging.getLogger("Z2")
_shutdown_phase = 0
_shutdown_timeout = 30 # seconds per phase
# The shutdown phase counts up from 0 to 4.
#
# 0 Not yet terminating. running in main loop
#
# 1 Loss of service is imminent. Prepare any front-end proxies for this
# happening by stopping any ICP servers, so that they can choose to send
# requests to other Zope servers in the cluster.
#
# 2 Stop accepting any new requests.
#
# 3 Wait for all old requests to have been processed
#
# 4 Already terminated
#
# It is up to individual socket handlers to implement these actions, by
# providing the 'clean_shutdown_control' method. This is called intermittantly
# during shutdown with two parameters; the current phase number, and the amount
# of time that it has currently been in that phase. This method should return
# true if it does not yet want shutdown to proceed to the next phase.
def shutdown(exit_code, fast=0):
global _shutdown_phase
global _shutdown_timeout
if _shutdown_phase == 0:
# Thread safety? proably no need to care
from Zope2.Startup import config
config.ZSERVER_EXIT_CODE = exit_code
_shutdown_phase = 1
if fast:
# Someone wants us to shutdown fast. This is hooked into SIGTERM - so
# possibly the system is going down and we can expect a SIGKILL within
# a few seconds. Limit each shutdown phase to one second. This is fast
# enough, but still clean.
_shutdown_timeout = 1.0
def loop():
# Run the main loop until someone calls shutdown()
lifetime_loop()
# Gradually close sockets in the right order, while running a select
# loop to allow remaining requests to trickle away.
graceful_shutdown_loop()
def lifetime_loop():
# The main loop. Stay in here until we need to shutdown
map = asyncore.socket_map
timeout = 30.0
while map and _shutdown_phase == 0:
asyncore.poll(timeout, map)
def graceful_shutdown_loop():
# The shutdown loop. Allow various services to shutdown gradually.
global _shutdown_phase
timestamp = time.time()
timeout = 1.0
map = asyncore.socket_map
while map and _shutdown_phase < 4:
time_in_this_phase = time.time() - timestamp
veto = 0
for fd, obj in map.items():
try:
fn = getattr(obj, 'clean_shutdown_control')
except AttributeError:
pass
else:
try:
veto = veto or fn(_shutdown_phase, time_in_this_phase)
except:
obj.handle_error()
if veto and time_in_this_phase < _shutdown_timeout:
# Any open socket handler can veto moving on to the next shutdown
# phase. (but not forever)
asyncore.poll(timeout, map)
else:
# No vetos? That is one step closer to shutting down
_shutdown_phase += 1
timestamp = time.time()
def shutdownFastHandler():
"""Shutdown cleanly on SIGTERM. This is registered first,
so it should be called after all other handlers."""
logger.info("Shutting down fast")
shutdown(0, fast=1)
def shutdownHandler():
"""Shutdown cleanly on SIGINT. This is registered first,
so it should be called after all other handlers."""
logger.info("Shutting down")
sys.exit(0)
def restartHandler():
"""Restart cleanly on SIGHUP. This is registered first, so it
should be called after all other SIGHUP handlers."""
logger.info("Restarting")
shutdown(1)
def showStacks():
"""Dump a stracktrace of all threads on the console."""
print(dump_threads())
sys.stdout.flush()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
A logging module which handles ZServer access log messages.
This depends on Vinay Sajip's PEP 282 logging module.
"""
from ZServer.BaseLogger import BaseLogger
class AccessLogger(BaseLogger):
def __init__(self):
BaseLogger.__init__(self, 'access')
def log(self, message):
if not self.logger.handlers: # don't log if we have no handlers
return
if message.endswith('\n'):
message = message[:-1]
self.logger.warn(message)
access_logger = AccessLogger()
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
An abstract logger meant to provide features to the access logger and
the debug logger.
"""
import logging
class BaseLogger:
def __init__(self, name):
self.logger = logging.getLogger(name)
self.logger.propagate = False
def reopen(self):
for handler in self.logger.handlers:
if hasattr(handler, 'reopen') and callable(handler.reopen):
handler.reopen()
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
""" Zope clock server. Generate a faux HTTP request on a regular basis
by coopting the asyncore API. """
import posixpath
import os
import socket
import time
import StringIO
import asyncore
from ZServer.medusa.http_server import http_request
from ZServer.medusa.default_handler import unquote
from ZServer.PubCore import handle
from ZServer.HTTPResponse import make_response
from ZPublisher.HTTPRequest import HTTPRequest
def timeslice(period, when=None, t=time.time):
if when is None:
when = t()
return when - (when % period)
class LogHelper:
def __init__(self, logger):
self.logger = logger
def log(self, ip, msg, **kw):
self.logger.log(ip + ' ' + msg)
class DummyChannel:
# we need this minimal do-almost-nothing channel class to appease medusa
addr = ['127.0.0.1']
closed = 1
def __init__(self, server):
self.server = server
def push_with_producer(self):
pass
def close_when_done(self):
pass
class ClockServer(asyncore.dispatcher):
# prototype request environment
_ENV = dict(REQUEST_METHOD = 'GET',
SERVER_PORT = 'Clock',
SERVER_NAME = 'Zope Clock Server',
SERVER_SOFTWARE = 'Zope',
SERVER_PROTOCOL = 'HTTP/1.0',
SCRIPT_NAME = '',
GATEWAY_INTERFACE='CGI/1.1',
REMOTE_ADDR = '0')
# required by ZServer
SERVER_IDENT = 'Zope Clock'
def __init__ (self, method, period=60, user=None, password=None,
host=None, logger=None, handler=None):
self.period = period
self.method = method
self.last_slice = timeslice(period)
h = self.headers = []
h.append('User-Agent: Zope Clock Server Client')
h.append('Accept: text/html,text/plain')
if not host:
host = socket.gethostname()
h.append('Host: %s' % host)
auth = False
if user and password:
encoded = ('%s:%s' % (user, password)).encode('base64')
h.append('Authorization: Basic %s' % encoded)
auth = True
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger = LogHelper(logger)
self.log_info('Clock server for "%s" started (user: %s, period: %s)'
% (method, auth and user or 'Anonymous', self.period))
if handler is None:
# for unit testing
handler = handle
self.zhandler = handler
def get_requests_and_response(self):
out = StringIO.StringIO()
s_req = '%s %s HTTP/%s' % ('GET', self.method, '1.0')
req = http_request(DummyChannel(self), s_req, 'GET', self.method,
'1.0', self.headers)
env = self.get_env(req)
resp = make_response(req, env)
zreq = HTTPRequest(out, env, resp)
return req, zreq, resp
def get_env(self, req):
env = self._ENV.copy()
(path, params, query, fragment) = req.split_uri()
if params:
path = path + params # undo medusa bug
while path and path[0] == '/':
path = path[1:]
if '%' in path:
path = unquote(path)
if query:
# ZPublisher doesn't want the leading '?'
query = query[1:]
env['PATH_INFO']= '/' + path
env['PATH_TRANSLATED']= posixpath.normpath(
posixpath.join(os.getcwd(), env['PATH_INFO']))
if query:
env['QUERY_STRING'] = query
env['channel.creation_time']=time.time()
for header in req.header:
key,value = header.split(":",1)
key = key.upper()
value = value.strip()
key = 'HTTP_%s' % ("_".join(key.split( "-")))
if value:
env[key]=value
return env
def readable(self):
# generate a request at most once every self.period seconds
slice = timeslice(self.period)
if slice != self.last_slice:
# no need for threadsafety here, as we're only ever in one thread
self.last_slice = slice
req, zreq, resp = self.get_requests_and_response()
self.zhandler('Zope2', zreq, resp)
return False
def handle_read(self):
return True
def handle_write (self):
self.log_info('unexpected write event', 'warning')
return True
def writable(self):
return False
def handle_error (self): # don't close the socket on error
(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('Problem in Clock (%s:%s %s)' % (t, v, tbinfo),
'error')
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Logs debugging information about how ZServer is handling requests
and responses. This log can be used to help locate troublesome requests.
The format of a log message is:
<code> <request id> <time> <data>
where:
<code> is B for begin, I for received input, A for received output,
E for sent output.
<request id> is a unique request id.
<time> is the local time in ISO 6801 format.
<data> is the HTTP method and the PATH INFO for B, the size of the
input for I, the HTTP status code and the size of the output for
A, or nothing for E.
"""
import time
import logging
from ZServer.BaseLogger import BaseLogger
class DebugLogger(BaseLogger):
def __init__(self):
BaseLogger.__init__(self, 'trace')
def log(self, code, request_id, data=''):
if not self.logger.handlers:
return
# Omitting the second parameter requires Python 2.2 or newer.
t = time.strftime('%Y-%m-%dT%H:%M:%S')
message = '%s %s %s %s' % (code, request_id, t, data)
self.logger.warn(message)
debug_logger = DebugLogger()
log = debug_logger.log
reopen = debug_logger.reopen
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
FTP Request class for FTP server.
The FTP Request does the dirty work of turning an FTP request into something
that ZPublisher can understand.
"""
from ZPublisher.HTTPRequest import HTTPRequest
from cStringIO import StringIO
import os
from base64 import encodestring
import re
class FTPRequest(HTTPRequest):
def __init__(self, path, command, channel, response, stdin=None,
environ=None, globbing=None, recursive=0, size=None):
# we need to store the globbing information to pass it
# to the ZPublisher and the manage_FTPlist function
# (ajung)
self.globbing = globbing
self.recursive= recursive
if stdin is None:
size = 0
stdin = StringIO()
if environ is None:
environ = self._get_env(path, command, channel, stdin, size)
self._orig_env=environ
HTTPRequest.__init__(self, stdin, environ, response, clean=1)
# support for cookies and cookie authentication
self.cookies=channel.cookies
if not self.cookies.has_key('__ac') and channel.userid != 'anonymous':
self.other['__ac_name']=channel.userid
self.other['__ac_password']=channel.password
for k,v in self.cookies.items():
if not self.other.has_key(k):
self.other[k]=v
def retry(self):
self.retry_count=self.retry_count+1
r=self.__class__(stdin=self.stdin,
environ=self._orig_env,
response=self.response.retry(),
channel=self, # For my cookies
)
return r
def _get_env(self, path, command, channel, stdin, size):
"Returns a CGI style environment"
env={}
env['SCRIPT_NAME']='/%s' % channel.module
env['REQUEST_METHOD']='GET' # XXX what should this be?
env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT
if channel.userid != 'anonymous':
env['HTTP_AUTHORIZATION']='Basic %s' % re.sub('\012','',
encodestring('%s:%s' % (channel.userid, channel.password)))
env['SERVER_NAME']=channel.server.hostname
env['SERVER_PORT']=str(channel.server.port)
env['REMOTE_ADDR']=channel.client_addr[0]
env['GATEWAY_INTERFACE']='CGI/1.1' # that's stretching it ;-)
# FTP commands
#
if type(command)==type(()):
args=command[1:]
command=command[0]
if command in ('LST','CWD','PASS'):
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_FTPlist')
elif command in ('MDTM','SIZE'):
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_FTPstat')
elif command=='RETR':
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_FTPget')
elif command in ('RMD','DELE'):
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_delObjects')
env['QUERY_STRING']='ids=%s' % args[0]
elif command=='MKD':
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_addFolder')
env['QUERY_STRING']='id=%s' % args[0]
elif command=='RNFR':
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_hasId')
env['QUERY_STRING']='id=%s' % (args[0])
elif command=='RNTO':
env['PATH_INFO']=self._join_paths(channel.path,
path, 'manage_renameObject')
env['QUERY_STRING']='id=%s&new_id=%s' % (args[0],args[1])
elif command=='STOR':
env['PATH_INFO'] = self._join_paths(channel.path, path)
env['REQUEST_METHOD'] = 'PUT'
env['CONTENT_LENGTH'] = long(size)
else:
env['PATH_INFO']=self._join_paths(channel.path, path, command)
# Fake in globbing information
env['GLOBBING'] = self.globbing
env['FTP_RECURSIVE'] = self.recursive
return env
def _join_paths(self,*args):
path=apply(os.path.join,args)
path=os.path.normpath(path)
if os.sep != '/':
path=path.replace(os.sep,'/')
return path
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Response class for the FTP Server.
"""
from ZServer.HTTPResponse import ZServerHTTPResponse
from PubCore.ZEvent import Wakeup
from cStringIO import StringIO
import marshal
class FTPResponse(ZServerHTTPResponse):
"""
Response to an FTP command
"""
def __str__(self):
return ''
def outputBody(self):
pass
def setCookie(self, name, value, **kw):
self.cookies[name]=value
def appendCookie(self, name, value):
self.cookies[name]=self.cookies[name] + value
def expireCookie(self, name, **kw):
if self.cookies.has_key(name):
del self.cookies[name]
def _cookie_list(self):
return []
def _marshalledBody(self):
return marshal.loads(self.body)
def setMessage(self, message):
self._message = message
def getMessage(self):
return getattr(self, '_message', '')
class CallbackPipe:
"""
Sends response object to a callback. Doesn't write anything.
The callback takes place in Medusa's thread, not the request thread.
"""
def __init__(self, callback, args):
self._callback=callback
self._args=args
self._producers=[]
def close(self):
pass
def write(self, text, l=None):
if text:
self._producers.append(text)
def finish(self, response):
self._response=response
Wakeup(self.apply) # move callback to medusas thread
def apply(self):
result=apply(self._callback, self._args+(self._response,))
# break cycles
self._callback=None
self._response=None
self._args=None
return result
def make_response(channel, callback, *args):
# XXX should this be the FTPResponse constructor instead?
r=FTPResponse(stdout=CallbackPipe(callback, args), stderr=StringIO())
r.setHeader('content-type','text/plain')
r.cookies=channel.cookies
return r
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
# Medusa ICP server
#
# Why would you want to use this?
# see http://www.zope.org/Members/htrd/icp/intro
import sys, string, os, socket, errno, struct
import asyncore
from medusa import counter
ICP_OP_QUERY = 1
ICP_OP_HIT = 2
ICP_OP_MISS = 3
ICP_OP_ERR = 4
ICP_OP_MISS_NOFETCH = 21
ICP_OP_DENIED = 22
class BaseICPServer(asyncore.dispatcher):
REQUESTS_PER_LOOP = 4
_shutdown = 0
def __init__ (self,ip,port):
asyncore.dispatcher.__init__(self)
self.ip = ip
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_DGRAM)
self.set_reuse_addr()
self.bind((ip,port))
if ip=='':
addr = 'any'
else:
addr = ip
self.log_info('ICP server started\n\tAddress: %s\n\tPort: %s' % (addr,port) )
def clean_shutdown_control(self,phase,time_in_this_phase):
if phase==1:
# Stop responding to requests.
if not self._shutdown:
self._shutdown = 1
self.log_info('shutting down ICP')
if time_in_this_phase<2.0:
# We have not yet been deaf long enough for our front end proxies to notice.
# Do not allow shutdown to proceed yet
return 1
else:
# Shutdown can proceed. We dont need a socket any more
self.close()
return 0
def handle_read(self):
for i in range(self.REQUESTS_PER_LOOP):
try:
request, whence = self.socket.recvfrom(16384)
except socket.error,e:
if e[0]==errno.EWOULDBLOCK:
break
else:
raise
else:
if self.check_whence(whence):
reply = self.calc_reply(request)
if reply:
self.socket.sendto(reply,whence)
def readable(self):
return not self._shutdown
def writable(self):
return 0
def handle_write (self):
self.log_info ('unexpected write event', 'warning')
def handle_error (self): # don't close the socket on error
(file,fun,line), t, v, tbinfo = asyncore.compact_traceback()
self.log_info('Problem in ICP (%s:%s %s)' % (t, v, tbinfo),
'error')
def check_whence(self,whence):
return 1
def calc_reply(self,request):
if len(request)>20:
opcode,version,length,number,options,opdata,junk = struct.unpack('!BBHIIII',request[:20])
if version==2:
if opcode==ICP_OP_QUERY:
if len(request)!=length:
out_opcode = ICP_OP_ERR
else:
url = request[24:]
if url[-1:]=='\x00':
url = url[:-1]
out_opcode = self.check_url(url)
return struct.pack('!BBHIIII',out_opcode,2,20,number,0,0,0)
def check_url(self,url):
# derived classes replace this with a more
# useful policy
return ICP_OP_MISS
class ICPServer(BaseICPServer):
# Products that want to do special ICP handling should .append their hooks into
# this list. Each hook is called in turn with the URL as a parameter, and
# they must return an ICP_OP code from above or None. The first
# non-None return is used as the ICP response
hooks = []
def check_url(self,url):
for hook in self.hooks:
r = hook(url)
if r is not None:
return r
return ICP_OP_MISS
This diff is collapsed.
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
ZServer pipe utils. These producers basically function as callbacks.
"""
import asyncore
import sys
class ShutdownProducer:
"shuts down medusa"
def more(self):
asyncore.close_all()
class LoggingProducer:
"logs request"
def __init__(self, logger, bytes, method='log'):
self.logger=logger
self.bytes=bytes
self.method=method
def more(self):
getattr(self.logger, self.method)(self.bytes)
self.logger=None
return ''
class CallbackProducer:
"Performs a callback in the channel's thread"
def __init__(self, callback):
self.callback=callback
def more(self):
self.callback()
self.callback=None
return ''
class file_part_producer:
"producer wrapper for part of a file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size = 1<<16
def __init__(self, file, lock, start, end):
self.file=file
self.lock=lock
self.start=start
self.end=end
def more(self):
end=self.end
if not end: return ''
start=self.start
if start >= end: return ''
file=self.file
size=end-start
bsize=self.out_buffer_size
if size > bsize: size=bsize
self.lock.acquire()
try:
file.seek(start)
data = file.read(size)
finally:
self.lock.release()
if data:
start=start+len(data)
if start < end:
self.start=start
return data
self.end=0
del self.file
return data
class file_close_producer:
def __init__(self, file):
self.file=file
def more(self):
file=self.file
if file is not None:
file.close()
self.file=None
return ''
class iterator_producer:
def __init__(self, iterator):
self.iterator = iterator
def more(self):
try:
return self.iterator.next()
except StopIteration:
return ''
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Simple Event Manager Based on Pipes
"""
from ZServer.medusa.thread.select_trigger import trigger
from asyncore import socket_map
class simple_trigger(trigger):
def handle_close(self):
pass
the_trigger=simple_trigger()
def Wakeup(thunk=None):
global the_trigger
try:
the_trigger.pull_trigger(thunk)
except OSError, why:
# this broken pipe as a result of perhaps a signal
# we want to handle this gracefully so we get rid of the old
# trigger and install a new one.
if why[0] == 32:
del socket_map[the_trigger._fileno]
the_trigger = simple_trigger() # adds itself back into socket_map
the_trigger.pull_trigger(thunk)
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import thread
from ZServerPublisher import ZServerPublisher
class ZRendevous:
"""Worker thread pool
For better or worse, we hide locking sementics from the worker
threads. The worker threads do no locking.
"""
def __init__(self, n=1):
sync = thread.allocate_lock()
self._acquire = sync.acquire
self._release = sync.release
pool = []
self._lists = (
pool, # Collection of locks representing threads are not
# waiting for work to do
[], # Request queue
[], # Pool of locks representing threads that are
# waiting (ready) for work to do.
)
self._acquire() # callers will block
try:
while n > 0:
l = thread.allocate_lock()
l.acquire()
pool.append(l)
thread.start_new_thread(ZServerPublisher,
(self.accept,))
n = n-1
finally:
self._release() # let callers through now
def accept(self):
"""Return a request from the request queue
If no requests are in the request queue, then block until
there is nonw.
"""
self._acquire() # prevent other calls to protect data structures
try:
pool, requests, ready = self._lists
while not requests:
# There are no requests in the queue. Wait until there are.
# This thread is waiting, to remove a lock from the collection
# of locks corresponding to threads not waiting for work
l = pool.pop()
# And add it to the collection of locks representing threads
# ready and waiting for work.
ready.append(l)
self._release() # allow other calls
# Now try to acquire the lock. We will block until
# someone calls handle to queue a request and releases the lock
# which handle finds in the ready queue
l.acquire()
self._acquire() # prevent calls so we can update
# not waiting pool
pool.append(l)
# return the *first* request
return requests.pop(0)
finally:
self._release() # allow calls
def handle(self, name, request, response):
"""Queue a request for processing
"""
self._acquire() # prevent other calls to protect data structs
try:
pool, requests, ready = self._lists
# queue request
requests.append((name, request, response))
if ready:
# If any threads are ready and waiting for work
# then remove one of the locks from the ready pool
# and release it, letting the waiting thread go forward
# and consume the request
ready.pop().release()
finally:
self._release()
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import logging
LOG = logging.getLogger('ZServerPublisher')
class ZServerPublisher:
def __init__(self, accept):
from sys import exc_info
from ZPublisher import publish_module
from ZPublisher.WSGIPublisher import publish_module as publish_wsgi
while 1:
try:
name, a, b=accept()
if name == "Zope2":
try:
publish_module(
name,
request=a,
response=b)
finally:
b._finish()
a=b=None
elif name == "Zope2WSGI":
try:
res = publish_wsgi(a, b)
for r in res:
a['wsgi.output'].write(r)
finally:
# TODO: Support keeping connections open.
a['wsgi.output']._close = 1
a['wsgi.output'].close()
except:
LOG.error('exception caught', exc_info=True)
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from Zope2.Startup.config import ( # NOQA
setNumberOfThreads,
ZSERVER_THREADS as _n,
)
from ZServer.PubCore import ZRendezvous
_handle = None
def handle(*args, **kw):
global _handle
if _handle is None:
_handle = ZRendezvous.ZRendevous(_n).handle
return _handle(*args, **kw)
ZServer README
--------------
What is ZServer?
ZServer is an integration of the Zope application server and the
Medusa information server. See the ZServer architecture document for
more information::
http://www.zope.org/Documentation/Reference/ZServer
ZServer gives you HTTP, FTP, WebDAV, PCGI, and remote interactive
Python access. In later releases it will probably offer more
protocols such as FastCGI, etc.
What is Medusa?
Medusa is a Python server framework with uses a single threaded
asynchronous sockets approach. For more information see::
http://www.nightmare.com/medusa
There's also an interesting Medusa tutorial at::
http://www.nightmare.com:8080/nm/apps/medusa/docs/programming.html
ZServer HTTP support
ZServer offers HTTP 1.1 publishing for Zope. It does not support
publishing files from the file system. You can specify the HTTP port
using the -w command line argument for the z2.py start script. You
can also specify CGI environment variables on the command line using
z2.py
ZServer FTP support
What you can do with FTP
FTP access to Zope allows you to FTP to the Zope object hierarchy
in order to perform managerial tasks. You can:
* Navigate the object hierarchy with 'cd'
* Replace the content of Documents, Images, and Files
* Create Documents, Images, Files, Folders
* Delete objects and Folders.
So basically you can do more than is possible with HTTP PUT. Also,
unlike PUT, FTP gives you access to Document content. So when you
download a Document you are getting its content, not what it looks
like when it is rendered.
Using FTP
To FTP into Zope, ZServer must be configured to serve FTP. By
default ZServer serves FTP on port 9221. So to connect to Zope you
would issue a command like so::
$ ftp localhost 9221
When logging in to FTP, you have some choices. You can connect
anonymously by using a username of 'anonymous' and any password.
Or you can login as a Zope user. Since Zope users are defined at
different locations in the object hierarchy, authentication can be
problematic. There are two solutions:
* login and then cd to the directory where you are defined.
* login with a special name that indicates where you are
defined.
The format of the special name is '<username>@<path>'. For
example::
joe@Marketing/Projects
FTP permissions
FTP support is provided for Folders, Documents, Images, and Files.
You can control access to FTP via the new 'FTP access' permission.
This permission controls the ability to 'cd' to a Folder and to
download objects. Uploading and deleting and creating objects are
controlled by existing permissions.
FTP limits
You can set limits for the number of simultaneous FTP connections.
You can separately configure the number of anonymous and
authenticated connections. Right now this setting is set in
'ZServerFTP.py'. In the future, it may be more easy to configure.
Properties and FTP: The next step
The next phase of FTP support will allow you to edit properties of
all Zope objects. Probably properties will be exposed via special
files which will contain an XML representation of the object's
properties. You could then download the file, edit the XML and
upload it to change the object's properties.
We do not currently have a target date for FTP property support.
How does FTP work?
The ZServer's FTP channel object translates FTP requests into
ZPublisher requests. The FTP channel then analyses the response
and formulates an appropriate FTP response. The FTP channel
stores some state such as the current working directory and the
username and password.
On the Zope side of things, the 'lib/python/OFS/FTPInterface.py'
module defines the Zope FTP interface, for listing sub-items,
stating, and getting content. The interface is implemented in
'SimpleItem', and in other Zope classes. Programmers will not
need to implement the entire interface if they inherit from
'SimpleItem'. All the other FTP functions are handled by
existing methods like 'manage_delObjects', and 'PUT', etc.
ZServer PCGI support
ZServer will service PCGI requests with both inet and unix domain
sockets. This means you can use ZServer instead of
'pcgi_publisher.py' as your long running PCGI server process. In the
future, PCGI may be able to activate ZServer.
Using PCGI instead of HTTP allows you to forward requests from
another web server to ZServer. The CGI environment and HTTP headers
are controlled by the web server, so you don't need to worry about
managing the ZServer environment. However, this configuration will
impose a larger overhead than simply using the web server as an HTTP
proxy for ZServer.
To use PCGI, configure your PCGI info files to communicate with
ZServer by setting the PCGI_PORT, PCGI_SOCKET_FILE, and PCGI_NAME.
The other PCGI settings are currently ignored by ZServer.
ZServer's PCGI support will work with mod_pcgi.
ZServer monitor server
ZServer now includes the Medusa monitor server. This basically gives
you a remote, secure Python prompt. You can interactively access Zope.
This is a very powerful, but dangerous tool. Be careful.
To use the monitor server specify a monitor port number using the -m
option with the z2.py start script. The default port is 9999.
To connect to the monitor server use the 'ZServer/medusa/monitor_client.py'
or 'ZServer/medusa/monitor_client_win32.py' script. For example::
$ python2.1 ZServer/medusa/monitor_client.py localhost 9999
You will then be asked to enter a password. This is the Zope super manager
password which is stored in the 'access' file.
Then you will be greeted with a Python prompt. To access Zope import
the Zope module::
>>> import Zope
The Zope top level Zope object is available via the 'Zope.app' function::
>>> a=Zope.app()
From this object you can reach all other Zope objects as subobjects.
Remember if you make changes to Zope objects and want those changes to be
saved you need to commmit the transaction::
>>> import transaction
>>> transaction.commit()
ZServer WebDAV support
WebDAV is a new protocol for managing web resources. WebDAV operates
over HTTP. Since WebDAV uses HTTP, ZServer doesn't really have to do
anything special, except stay out of Zope's way when handling WebDAV
requests.
The only major WebDAV client at this time is Internet Explorer 5. It
works with Zope.
Differences between ZopeHTTPServer and ZServer
ZopeHTTPServer is old and no longer being actively maintained.
Both ZopeHTTPServer and ZServer are Python HTTP servers.
ZopeHTTPServer is built on the standard Python SimpleHTTPServer
framework. ZServer is built on Medusa.
ZopeHTTPServer is very limited. It can only publish one module at a
time. It can only publish via HTTP. It has no support for thread
pools.
ZServer on the other hand is more complex and supports publishing
multiple modules, thread pools, and it uses a new threaded
architecture for accessing ZPublisher.
Running ZServer as nobody
Normally ZServer will run with the userid of the user who starts
it. However, if ZServer is started by root, it will attempt to
become nobody or any userid you specify with the -u argument to the
z2.py start script.
ZServer is similar to ZopeHTTPServer in these respects.
If you run Zope with different userids you must be aware of
permission issues. Zope must be able to read and write to the 'var'
directory. If you change the userid Zope is running under you will
probably need to change the permissions on the 'var' directory
and the files in it in order for Zope to run under a different
userid.
Support
Questions and comments should go to 'support@digicool.com'.
You can report bugs and check on the status of bugs using the Zope
bug collector::
http://www.zope.org/Resources/Collector/
License
ZServer is covered by the ZPL despite the fact that it comes with
much of the Medusa source code. The portions of Medusa that come
with ZServer are licensed under the ZPL.
Outstanding issues
The FTP interface for Zope objects may be changed.
HTTP 1.1 support is ZServer is incomplete, though it should work for
most HTTP 1.1 clients.
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""HTTP handler which forces GET requests to return the document source.
Works around current WebDAV clients' failure to implement the
'source-link' feature of the specification. Uses manage_FTPget().
"""
import os
import posixpath
from ZServer.HTTPServer import zhttp_handler
class WebDAVSrcHandler(zhttp_handler):
def get_environment(self, request):
"""Munge the request to ensure that we call manage_FTPGet."""
env = zhttp_handler.get_environment(self, request)
# Set a flag to indicate this request came through the WebDAV source
# port server.
env['WEBDAV_SOURCE_PORT'] = 1
if env['REQUEST_METHOD'] == 'GET':
path_info = env['PATH_INFO']
if os.sep != '/':
path_info = path_info.replace(os.sep, '/')
path_info = posixpath.join(path_info, 'manage_DAVget')
path_info = posixpath.normpath(path_info)
env['PATH_INFO'] = path_info
# Workaround for lousy WebDAV implementation of M$ Office 2K.
# Requests for "index_html" are *sometimes* send as "index_html."
# We check the user-agent and remove a trailing dot for PATH_INFO
# and PATH_TRANSLATED
if env.get("HTTP_USER_AGENT", "").find(
"Microsoft Data Access Internet Publishing Provider") > -1:
if env["PATH_INFO"][-1] == '.':
env["PATH_INFO"] = env["PATH_INFO"][:-1]
if env["PATH_TRANSLATED"][-1] == '.':
env["PATH_TRANSLATED"] = env["PATH_TRANSLATED"][:-1]
return env
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import utils
from Zope2.Startup.config import ( # NOQA
ZSERVER_CONNECTION_LIMIT as CONNECTION_LIMIT,
ZSERVER_EXIT_CODE as exit_code,
ZSERVER_LARGE_FILE_THRESHOLD as LARGE_FILE_THRESHOLD,
setNumberOfThreads,
)
# the ZServer version number
ZSERVER_VERSION = '1.1'
# the Zope version string
ZOPE_VERSION = utils.getZopeVersion()
# backwards compatibility aliases
from utils import requestCloseOnExec
import asyncore
from medusa import resolver, logger
from HTTPServer import zhttp_server, zhttp_handler
from PCGIServer import PCGIServer
from FCGIServer import FCGIServer
from FTPServer import FTPServer
from medusa.monitor import secure_monitor_server
# we need to patch asyncore's dispatcher class with a new
# log_info method so we see medusa messages in the zLOG log
utils.patchAsyncoreLogger()
# we need to patch the 'service name' of the medusa syslog logger
utils.patchSyslogServiceName()
<component prefix="ZServer.datatypes">
<abstracttype name="ZServer.server">
<description>
The "server" type is used to describe a single type of server
instance. The value for a server section is an object with the
ServerFactory interface.
</description>
</abstracttype>
<sectiontype name="http-server"
datatype=".HTTPServerFactory"
implements="ZServer.server">
<key name="address" datatype="inet-binding-address"/>
<key name="force-connection-close" datatype="boolean" default="off"/>
<key name="webdav-source-clients">
<description>
Regular expression used to identify clients who should
receive WebDAV source responses to GET requests.
</description>
</key>
<key name="fast-listen" datatype="boolean" default="on">
<description>
Defines whether the HTTP server should listen for requests
immediately or only after Zope is ready to run.
</description>
</key>
<key name="use-wsgi" datatype="boolean" default="off" />
</sectiontype>
<sectiontype name="webdav-source-server"
datatype=".WebDAVSourceServerFactory"
implements="ZServer.server">
<key name="address" datatype="inet-binding-address"/>
<key name="force-connection-close" datatype="boolean" default="off"/>
<key name="use-wsgi" datatype="boolean" default="off" />
</sectiontype>
<sectiontype name="persistent-cgi"
datatype=".PCGIServerFactory"
implements="ZServer.server">
<key name="path" datatype="existing-file"/>
</sectiontype>
<sectiontype name="fast-cgi"
datatype=".FCGIServerFactory"
implements="ZServer.server">
<key name="address" datatype="socket-binding-address"/>
</sectiontype>
<sectiontype name="ftp-server"
datatype=".FTPServerFactory"
implements="ZServer.server">
<key name="address" datatype="inet-binding-address"/>
</sectiontype>
<sectiontype name="monitor-server"
datatype=".MonitorServerFactory"
implements="ZServer.server">
<key name="address" datatype="inet-binding-address"/>
</sectiontype>
<sectiontype name="icp-server"
datatype=".ICPServerFactory"
implements="ZServer.server">
<key name="address" datatype="inet-binding-address"/>
</sectiontype>
<sectiontype name="clock-server"
datatype=".ClockServerFactory"
implements="ZServer.server">
<key name="method" datatype="string">
<description>
The traversal path (from the Zope root) to an
executable Zope method (Python Script, external method, product
method, etc). The method must take no arguments. Ex: "/site/methodname"
</description>
</key>
<key name="period" datatype="integer" default="60">
<description>
The number of seconds between each clock "tick" (and
thus each call to the above "method"). The lowest number
providable here is typically 30 (this is the asyncore mainloop
"timeout" value). The default is 60. Ex: "30"
</description>
</key>
<key name="user" datatype="string">
<description>
A zope username. Ex: "admin"
</description>
</key>
<key name="password" datatype="string">
<description>
The password for the zope username provided above. Careful: this
is obviously not encrypted in the config file. Ex: "123"
</description>
</key>
<key name="host" datatype="string">
<description>
The hostname passed in via the "Host:" header in the
faux request. Could be useful if you have virtual host rules
set up inside Zope itself. Ex: "www.example.com"
</description>
</key>
</sectiontype>
</component>
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""ZConfig datatype support for ZServer.
Each server type is represented by a ServerFactory instance.
"""
import socket
import ZConfig
class ServerFactory:
def __init__(self, address=None):
self.ip = None
if address is None:
self.host = None
self.port = None
else:
self.host, self.port = address
def prepare(self, defaulthost='', dnsresolver=None,
module=None, env=None, portbase=None):
if not self.host:
ip = socket.gethostbyname(defaulthost)
self._set_default_host(defaulthost, ip)
else:
address_info = socket.getaddrinfo(self.host, self.port)
ips = [info[4][0] for info in address_info]
self.ip = ips[0]
self.dnsresolver = dnsresolver
self.module = module
self.cgienv = env
if portbase and self.port is not None:
self.port += portbase
def _set_default_host(self, host, ip):
self.host = host
self.ip = ip
def servertype(self):
s = self.__class__.__name__
if s.endswith("Factory"):
s = s[:-7]
return s
def create(self):
raise NotImplementedError(
"Concrete ServerFactory classes must implement create().")
class HTTPServerFactory(ServerFactory):
def __init__(self, section):
from ZServer import HTTPServer
if not section.address:
raise ZConfig.ConfigurationError(
"No 'address' settings found "
"within the 'http-server' or 'webdav-source-server' section")
ServerFactory.__init__(self, section.address)
self.server_class = HTTPServer.zhttp_server
self.force_connection_close = section.force_connection_close
# webdav-source-server sections won't have webdav_source_clients:
webdav_clients = getattr(section, "webdav_source_clients", None)
self.fast_listen = getattr(section, 'fast_listen', True)
self.webdav_source_clients = webdav_clients
self.use_wsgi = section.use_wsgi
def create(self):
from ZServer.AccessLogger import access_logger
handler = self.createHandler()
handler._force_connection_close = self.force_connection_close
if self.webdav_source_clients:
handler.set_webdav_source_clients(self.webdav_source_clients)
server = self.server_class(ip=self.ip, port=self.port,
resolver=self.dnsresolver,
fast_listen=self.fast_listen,
logger_object=access_logger)
server.install_handler(handler)
return server
def createHandler(self):
from ZServer import HTTPServer
if self.use_wsgi:
return HTTPServer.zwsgi_handler(self.module, '', self.cgienv)
else:
return HTTPServer.zhttp_handler(self.module, '', self.cgienv)
class WebDAVSourceServerFactory(HTTPServerFactory):
def __init__(self, section):
from ZServer import HTTPServer
HTTPServerFactory.__init__(self, section)
self.server_class = HTTPServer.zwebdav_server
def createHandler(self):
from ZServer.WebDAVSrcHandler import WebDAVSrcHandler
return WebDAVSrcHandler(self.module, '', self.cgienv)
class FTPServerFactory(ServerFactory):
def __init__(self, section):
if not section.address:
raise ZConfig.ConfigurationError(
"No 'address' settings found within the 'ftp-server' section")
ServerFactory.__init__(self, section.address)
def create(self):
from ZServer.AccessLogger import access_logger
from ZServer.FTPServer import FTPServer
return FTPServer(ip=self.ip, hostname=self.host, port=self.port,
module=self.module, resolver=self.dnsresolver,
logger_object=access_logger)
class PCGIServerFactory(ServerFactory):
def __init__(self, section):
ServerFactory.__init__(self)
self.path = section.path
def create(self):
from ZServer.AccessLogger import access_logger
from ZServer.PCGIServer import PCGIServer
return PCGIServer(ip=self.ip, port=self.port,
module=self.module, resolver=self.dnsresolver,
pcgi_file=self.path,
logger_object=access_logger)
class FCGIServerFactory(ServerFactory):
def __init__(self, section):
import warnings
warnings.warn("Using FastCGI is deprecated. You should use mod_proxy "
"to run Zope with Apache", DeprecationWarning,
stacklevel=2)
import socket
if section.address.family == socket.AF_INET:
address = section.address.address
path = None
else:
address = None
path = section.address.address
ServerFactory.__init__(self, address)
self.path = path
def _set_default_host(self, host, ip):
if self.path is None:
ServerFactory._set_default_host(self, host, ip)
def create(self):
from ZServer.AccessLogger import access_logger
from ZServer.FCGIServer import FCGIServer
return FCGIServer(ip=self.ip, port=self.port,
socket_file=self.path,
module=self.module, resolver=self.dnsresolver,
logger_object=access_logger)
class MonitorServerFactory(ServerFactory):
def __init__(self, section):
ServerFactory.__init__(self, section.address)
def create(self):
password = self.getPassword()
if password is None:
msg = ('Monitor server not started because no emergency user '
'exists.')
import logging
LOG = logging.getLogger('Zope')
LOG.error(msg)
return
from ZServer.medusa.monitor import secure_monitor_server
return secure_monitor_server(hostname=self.host, port=self.port,
password=password)
def getPassword(self):
# XXX This is really out of place; there should be a better
# way. For now, at least we can make it a separate method.
import ZODB # :-( required to import user
from AccessControl.User import emergency_user
if hasattr(emergency_user, '__null_user__'):
pw = None
else:
pw = emergency_user._getPassword()
return pw
class ICPServerFactory(ServerFactory):
def __init__(self, section):
ServerFactory.__init__(self, section.address)
def create(self):
from ZServer.ICPServer import ICPServer
return ICPServer(self.ip, self.port)
class ClockServerFactory(ServerFactory):
def __init__(self, section):
ServerFactory.__init__(self)
self.method = section.method
self.period = section.period
self.user = section.user
self.password = section.password
self.hostheader = section.host
self.host = None # appease configuration machinery
def create(self):
from ZServer.ClockServer import ClockServer
from ZServer.AccessLogger import access_logger
return ClockServer(self.method, self.period, self.user,
self.password, self.hostheader, access_logger)
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id$'
import string
VERSION = string.split(RCS_ID)[2]
import socket
import asyncore
import asynchat
import status_handler
class chat_channel (asynchat.async_chat):
def __init__ (self, server, sock, addr):
asynchat.async_chat.__init__ (self, sock)
self.server = server
self.addr = addr
self.set_terminator ('\r\n')
self.data = ''
self.nick = None
self.push ('nickname?: ')
def collect_incoming_data (self, data):
self.data = self.data + data
def found_terminator (self):
line = self.data
self.data = ''
if self.nick is None:
self.nick = string.split (line)[0]
if not self.nick:
self.nick = None
self.push ('huh? gimmee a nickname: ')
else:
self.greet()
else:
if not line:
pass
elif line[0] != '/':
self.server.push_line (self, line)
else:
self.handle_command (line)
def greet (self):
self.push ('Hello, %s\r\n' % self.nick)
num_channels = len(self.server.channels)-1
if num_channels == 0:
self.push ('[Kinda lonely in here... you\'re the only caller!]\r\n')
else:
self.push ('[There are %d other callers]\r\n' % (len(self.server.channels)-1))
nicks = map (lambda x: x.get_nick(), self.server.channels.keys())
self.push (string.join (nicks, '\r\n ') + '\r\n')
self.server.push_line (self, '[joined]')
def handle_command (self, command):
import types
command_line = string.split(command)
name = 'cmd_%s' % command_line[0][1:]
if hasattr (self, name):
# make sure it's a method...
method = getattr (self, name)
if type(method) == type(self.handle_command):
method (command_line[1:])
else:
self.push ('unknown command: %s' % command_line[0])
def cmd_quit (self, args):
self.server.push_line (self, '[left]')
self.push ('Goodbye!\r\n')
self.close_when_done()
# alias for '/quit' - '/q'
cmd_q = cmd_quit
def push_line (self, nick, line):
self.push ('%s: %s\r\n' % (nick, line))
def handle_close (self):
self.close()
def close (self):
del self.server.channels[self]
asynchat.async_chat.close (self)
def get_nick (self):
if self.nick is not None:
return self.nick
else:
return 'Unknown'
class chat_server (asyncore.dispatcher):
SERVER_IDENT = 'Chat Server (V%s)' % VERSION
channel_class = chat_channel
spy = 1
def __init__ (self, ip='', port=8518):
self.port = port
self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
self.bind ((ip, port))
print '%s started on port %d' % (self.SERVER_IDENT, port)
self.listen (5)
self.channels = {}
self.count = 0
def handle_accept (self):
conn, addr = self.accept()
self.count = self.count + 1
print 'client #%d - %s:%d' % (self.count, addr[0], addr[1])
self.channels[self.channel_class (self, conn, addr)] = 1
def push_line (self, from_channel, line):
nick = from_channel.get_nick()
if self.spy:
print '%s: %s' % (nick, line)
for c in self.channels.keys():
if c is not from_channel:
c.push ('%s: %s\r\n' % (nick, line))
def status (self):
lines = [
'<h2>%s</h2>' % self.SERVER_IDENT,
'<br>Listening on Port: %d' % self.port,
'<br><b>Total Sessions:</b> %d' % self.count,
'<br><b>Current Sessions:</b> %d' % (len(self.channels))
]
return status_handler.lines_producer (lines)
def writable (self):
return 0
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
port = string.atoi (sys.argv[1])
else:
port = 8518
s = chat_server ('', port)
asyncore.loop()
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class counter:
"general-purpose counter"
def __init__ (self, initial_value=0):
self.value = initial_value
def increment (self, delta=1):
result = self.value
try:
self.value = self.value + delta
except OverflowError:
self.value = long(self.value) + delta
return result
def decrement (self, delta=1):
result = self.value
try:
self.value = self.value - delta
except OverflowError:
self.value = long(self.value) - delta
return result
def as_long (self):
return long(self.value)
def __nonzero__ (self):
return self.value != 0
def __repr__ (self):
return '<counter value=%s at %x>' % (self.value, id(self))
def __str__ (self):
return str(long(self.value))[:-1]
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id$'
# standard python modules
import os
import re
import posixpath
import stat
import string
import time
# medusa modules
import http_date
import http_server
import mime_type_table
import status_handler
import producers
unquote = http_server.unquote
# This is the 'default' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a 'filesystem' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from counter import counter
class default_handler:
valid_commands = ['get', 'head']
IDENT = 'Default HTTP Request Handler'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults = [
'index.html',
'default.html'
]
default_file_producer = producers.file_producer
def __init__ (self, filesystem):
self.filesystem = filesystem
# count total hits
self.hit_counter = counter()
# count file deliveries
self.file_counter = counter()
# count cache hits
self.cache_counter = counter()
hit_counter = 0
def __repr__ (self):
return '<%s (%s hits) at %x>' % (
self.IDENT,
self.hit_counter,
id (self)
)
# always match, since this is a default
def match (self, request):
return 1
# handle a file request, with caching.
def handle_request (self, request):
if request.command not in self.valid_commands:
request.error (400) # bad request
return
self.hit_counter.increment()
path, params, query, fragment = request.split_uri()
if '%' in path:
path = unquote (path)
# strip off all leading slashes
while path and path[0] == '/':
path = path[1:]
if self.filesystem.isdir (path):
if path and path[-1] != '/':
request['Location'] = 'http://%s/%s/' % (
request.channel.server.server_name,
path
)
request.error (301)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found = 0
if path and path[-1] != '/':
path = path + '/'
for default in self.directory_defaults:
p = path + default
if self.filesystem.isfile (p):
path = p
found = 1
break
if not found:
request.error (404) # Not Found
return
elif not self.filesystem.isfile (path):
request.error (404) # Not Found
return
file_length = self.filesystem.stat (path)[stat.ST_SIZE]
ims = get_header_match (IF_MODIFIED_SINCE, request.header)
length_match = 1
if ims:
length = ims.group (4)
if length:
try:
length = string.atoi (length)
if length != file_length:
length_match = 0
except:
pass
ims_date = 0
if ims:
ims_date = http_date.parse_http_date (ims.group (1))
try:
mtime = self.filesystem.stat (path)[stat.ST_MTIME]
except:
request.error (404)
return
if length_match and ims_date:
if mtime <= ims_date:
request.reply_code = 304
request.done()
self.cache_counter.increment()
return
try:
file = self.filesystem.open (path, 'rb')
except IOError:
request.error (404)
return
request['Last-Modified'] = http_date.build_http_date (mtime)
request['Content-Length'] = file_length
self.set_content_type (path, request)
if request.command == 'get':
request.push (self.default_file_producer (file))
self.file_counter.increment()
request.done()
def set_content_type (self, path, request):
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['Content-Type'] = mime_type_table.content_type_map[ext]
else:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request['Content-Type'] = 'text/plain'
def status (self):
return producers.simple_producer (
'<li>%s' % status_handler.html_repr (self)
+ '<ul>'
+ ' <li><b>Total Hits:</b> %s' % self.hit_counter
+ ' <li><b>Files Delivered:</b> %s' % self.file_counter
+ ' <li><b>Cache Hits:</b> %s' % self.cache_counter
+ '</ul>'
)
# HTTP/1.0 doesn't say anything about the "; length=nnnn" addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE = re.compile (
'If-Modified-Since: ([^;]+)((; length=([0-9]+)$)|$)',
re.IGNORECASE
)
USER_AGENT = re.compile ('User-Agent: (.*)', re.IGNORECASE)
CONTENT_TYPE = re.compile (
r'Content-Type: ([^;]+)((; boundary=([A-Za-z0-9\'\(\)+_,./:=?-]+)$)|$)',
re.IGNORECASE
)
get_header = http_server.get_header
get_header_match = http_server.get_header_match
def get_extension (path):
dirsep = string.rfind (path, '/')
dotsep = string.rfind (path, '.')
if dotsep > dirsep:
return path[dotsep+1:]
else:
return ''
<html>
<head>
<title>Licensing terms for Medusa</title>
</head>
<body>
<h1>Medusa is now Free!</h1>
<p>
Medusa was previously distributed under a 'free for
non-commercial use' license. In May of 2000 I changed the
license to be identical to the standard Python license. The
standard Python license has always applied to the core
components of Medusa, this change just frees up the rest of the
system, including the http server, ftp server, utilities, etc..
</p>
<p>
I would like to take this opportunity to thank all of the folks
who supported Medusa over the years by purchasing commercial
licenses.
</p>
</body>
</html>
This diff is collapsed.
This diff is collapsed.
# we can build 'promises' to produce external data. Each producer
# contains a 'promise' to fetch external data (or an error
# message). writable() for that channel will only return true if the
# top-most producer is ready. This state can be flagged by the dns
# client making a callback.
# So, say 5 proxy requests come in, we can send out DNS queries for
# them immediately. If the replies to these come back before the
# promises get to the front of the queue, so much the better: no
# resolve delay. 8^)
#
# ok, there's still another complication:
# how to maintain replies in order?
# say three requests come in, (to different hosts? can this happen?)
# yet the connections happen third, second, and first. We can't buffer
# the entire request! We need to be able to specify how much to buffer.
#
# ===========================================================================
#
# the current setup is a 'pull' model: whenever the channel fires FD_WRITE,
# we 'pull' data from the producer fifo. what we need is a 'push' option/mode,
# where
# 1) we only check for FD_WRITE when data is in the buffer
# 2) whoever is 'pushing' is responsible for calling 'refill_buffer()'
#
# what is necessary to support this 'mode'?
# 1) writable() only fires when data is in the buffer
# 2) refill_buffer() is only called by the 'pusher'.
#
# how would such a mode affect things? with this mode could we support
# a true http/1.1 proxy? [i.e, support <n> pipelined proxy requests, possibly
# to different hosts, possibly even mixed in with non-proxy requests?] For
# example, it would be nice if we could have the proxy automatically apply the
# 1.1 chunking for 1.0 close-on-eof replies when feeding it to the client. This
# would let us keep our persistent connection.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# make test appear as a package
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
# make thread to appear as a package
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment