Mass check-in after untabifying all files that need it.

This commit is contained in:
Guido van Rossum 1998-03-26 21:13:24 +00:00
parent 9ea7024754
commit 45e2fbc2e7
50 changed files with 5249 additions and 5249 deletions

View File

@ -89,19 +89,19 @@ DEFAULT_ERROR_MESSAGE = """\
class HTTPServer(SocketServer.TCPServer):
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()
if not host or host == '0.0.0.0':
host = socket.gethostname()
hostname, hostnames, hostaddrs = socket.gethostbyaddr(host)
if '.' not in hostname:
for host in hostnames:
if '.' in host:
hostname = host
break
self.server_name = hostname
self.server_port = port
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()
if not host or host == '0.0.0.0':
host = socket.gethostname()
hostname, hostnames, hostaddrs = socket.gethostbyaddr(host)
if '.' not in hostname:
for host in hostnames:
if '.' in host:
hostname = host
break
self.server_name = hostname
self.server_port = port
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
@ -217,196 +217,196 @@ class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
server_version = "BaseHTTP/" + __version__
def handle(self):
"""Handle a single HTTP request.
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
"""
self.raw_requestline = self.rfile.readline()
self.request_version = version = "HTTP/0.9" # Default
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = string.split(requestline)
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%s)" % `version`)
return
elif len(words) == 2:
[command, path] = words
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%s)" % `command`)
return
else:
self.send_error(400, "Bad request syntax (%s)" % `requestline`)
return
self.command, self.path, self.request_version = command, path, version
self.headers = self.MessageClass(self.rfile, 0)
mname = 'do_' + command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%s)" % `mname`)
return
method = getattr(self, mname)
method()
self.raw_requestline = self.rfile.readline()
self.request_version = version = "HTTP/0.9" # Default
requestline = self.raw_requestline
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = string.split(requestline)
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%s)" % `version`)
return
elif len(words) == 2:
[command, path] = words
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%s)" % `command`)
return
else:
self.send_error(400, "Bad request syntax (%s)" % `requestline`)
return
self.command, self.path, self.request_version = command, path, version
self.headers = self.MessageClass(self.rfile, 0)
mname = 'do_' + command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%s)" % `mname`)
return
method = getattr(self, mname)
method()
def send_error(self, code, message=None):
"""Send and log an error reply.
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
"""
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if not message:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.end_headers()
self.wfile.write(self.error_message_format %
{'code': code,
'message': message,
'explain': explain})
try:
short, long = self.responses[code]
except KeyError:
short, long = '???', '???'
if not message:
message = short
explain = long
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.end_headers()
self.wfile.write(self.error_message_format %
{'code': code,
'message': message,
'explain': explain})
error_message_format = DEFAULT_ERROR_MESSAGE
def send_response(self, code, message=None):
"""Send the response header and log the response code.
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
if message is None:
if self.responses.has_key(code):
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %s %s\r\n" %
(self.protocol_version, str(code), message))
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
"""
self.log_request(code)
if message is None:
if self.responses.has_key(code):
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %s %s\r\n" %
(self.protocol_version, str(code), message))
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s: %s\r\n" % (keyword, value))
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self.wfile.write("\r\n")
def log_request(self, code='-', size='-'):
"""Log an accepted request.
"""Log an accepted request.
This is called by send_reponse().
This is called by send_reponse().
"""
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Log an error.
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
XXX This should go to the separate error log.
"""
"""
apply(self.log_message, args)
apply(self.log_message, args)
def log_message(self, format, *args):
"""Log an arbitrary message.
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
The client host and current date/time are prefixed to
every message.
"""
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self):
"""Return the current date and time formatted for a message header."""
now = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
"""Return the current date and time formatted for a message header."""
now = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(now)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
"""
(host, port) = self.client_address
try:
name, names, addresses = socket.gethostbyaddr(host)
except socket.error, msg:
return host
names.insert(0, name)
for name in names:
if '.' in name: return name
return names[0]
(host, port) = self.client_address
try:
name, names, addresses = socket.gethostbyaddr(host)
except socket.error, msg:
return host
names.insert(0, name)
for name in names:
if '.' in name: return name
return names[0]
# Essentially static class variables
@ -423,42 +423,42 @@ class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
# form {code: (shortmessage, longmessage)}.
# See http://www.w3.org/hypertext/WWW/Protocols/HTTP/HTRESP.html
responses = {
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Partial information', 'Request fulfilled from cache'),
204: ('No response', 'Request fulfilled, nothing follows'),
301: ('Moved', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('Method', 'Object moved -- see Method and URL list'),
304: ('Not modified',
'Document has not changed singe given time'),
400: ('Bad request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not found', 'Nothing matches the given URI'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not implemented',
'Server does not support this operation'),
502: ('Service temporarily overloaded',
'The server cannot process the request due to a high load'),
503: ('Gateway timeout',
'The gateway server did not receive a timely response'),
}
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Partial information', 'Request fulfilled from cache'),
204: ('No response', 'Request fulfilled, nothing follows'),
301: ('Moved', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('Method', 'Object moved -- see Method and URL list'),
304: ('Not modified',
'Document has not changed singe given time'),
400: ('Bad request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not found', 'Nothing matches the given URI'),
500: ('Internal error', 'Server got itself in trouble'),
501: ('Not implemented',
'Server does not support this operation'),
502: ('Service temporarily overloaded',
'The server cannot process the request due to a high load'),
503: ('Gateway timeout',
'The gateway server did not receive a timely response'),
}
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer):
ServerClass = HTTPServer):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
@ -467,9 +467,9 @@ def test(HandlerClass = BaseHTTPRequestHandler,
"""
if sys.argv[1:]:
port = string.atoi(sys.argv[1])
port = string.atoi(sys.argv[1])
else:
port = 8000
port = 8000
server_address = ('', port)
httpd = ServerClass(server_address, HandlerClass)

View File

@ -41,47 +41,47 @@ class BastionClass:
"""
def __init__(self, get, name):
"""Constructor.
"""Constructor.
Arguments:
Arguments:
get - a function that gets the attribute value (by name)
name - a human-readable name for the original object
(suggestion: use repr(object))
get - a function that gets the attribute value (by name)
name - a human-readable name for the original object
(suggestion: use repr(object))
"""
self._get_ = get
self._name_ = name
"""
self._get_ = get
self._name_ = name
def __repr__(self):
"""Return a representation string.
"""Return a representation string.
This includes the name passed in to the constructor, so that
if you print the bastion during debugging, at least you have
some idea of what it is.
This includes the name passed in to the constructor, so that
if you print the bastion during debugging, at least you have
some idea of what it is.
"""
return "<Bastion for %s>" % self._name_
"""
return "<Bastion for %s>" % self._name_
def __getattr__(self, name):
"""Get an as-yet undefined attribute value.
"""Get an as-yet undefined attribute value.
This calls the get() function that was passed to the
constructor. The result is stored as an instance variable so
that the next time the same attribute is requested,
__getattr__() won't be invoked.
This calls the get() function that was passed to the
constructor. The result is stored as an instance variable so
that the next time the same attribute is requested,
__getattr__() won't be invoked.
If the get() function raises an exception, this is simply
passed on -- exceptions are not cached.
If the get() function raises an exception, this is simply
passed on -- exceptions are not cached.
"""
attribute = self._get_(name)
self.__dict__[name] = attribute
return attribute
"""
attribute = self._get_(name)
self.__dict__[name] = attribute
return attribute
def Bastion(object, filter = lambda name: name[:1] != '_',
name=None, bastionclass=BastionClass):
name=None, bastionclass=BastionClass):
"""Create a bastion for an object, using an optional filter.
See the Bastion module's documentation for background.
@ -109,33 +109,33 @@ def Bastion(object, filter = lambda name: name[:1] != '_',
# the user has full access to all instance variables!
def get1(name, object=object, filter=filter):
"""Internal function for Bastion(). See source comments."""
if filter(name):
attribute = getattr(object, name)
if type(attribute) == MethodType:
return attribute
raise AttributeError, name
"""Internal function for Bastion(). See source comments."""
if filter(name):
attribute = getattr(object, name)
if type(attribute) == MethodType:
return attribute
raise AttributeError, name
def get2(name, get1=get1):
"""Internal function for Bastion(). See source comments."""
return get1(name)
"""Internal function for Bastion(). See source comments."""
return get1(name)
if name is None:
name = `object`
name = `object`
return bastionclass(get2, name)
def _test():
"""Test the Bastion() function."""
class Original:
def __init__(self):
self.sum = 0
def add(self, n):
self._add(n)
def _add(self, n):
self.sum = self.sum + n
def total(self):
return self.sum
def __init__(self):
self.sum = 0
def add(self, n):
self._add(n)
def _add(self, n):
self.sum = self.sum + n
def total(self):
return self.sum
o = Original()
b = Bastion(o)
testcode = """if 1:
@ -143,23 +143,23 @@ def _test():
b.add(18)
print "b.total() =", b.total()
try:
print "b.sum =", b.sum,
print "b.sum =", b.sum,
except:
print "inaccessible"
print "inaccessible"
else:
print "accessible"
print "accessible"
try:
print "b._add =", b._add,
print "b._add =", b._add,
except:
print "inaccessible"
print "inaccessible"
else:
print "accessible"
print "accessible"
try:
print "b._get_.func_defaults =", b._get_.func_defaults,
print "b._get_.func_defaults =", b._get_.func_defaults,
except:
print "inaccessible"
print "inaccessible"
else:
print "accessible"
print "accessible"
\n"""
exec testcode
print '='*20, "Using rexec:", '='*20

View File

@ -30,138 +30,138 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
def do_POST(self):
"""Serve a POST request.
"""Serve a POST request.
This is only implemented for CGI scripts.
This is only implemented for CGI scripts.
"""
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""test whether PATH corresponds to a CGI script.
"""test whether PATH corresponds to a CGI script.
Return a tuple (dir, rest) if PATH requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
Return a tuple (dir, rest) if PATH requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
"""
path = self.path
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return 1
return 0
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return 1
return 0
cgi_directories = ['/cgi-bin', '/htbin']
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = string.rfind(rest, '?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = string.find(rest, '/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
if not executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
nobody = nobody_uid()
self.send_response(200, "Script output follows")
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
if sts:
self.log_error("CGI script exit status x%x" % sts)
return
# Child
try:
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# AUTH_TYPE
# REMOTE_USER
# REMOTE_IDENT
env['CONTENT_TYPE'] = self.headers.type
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in string.whitespace:
accept.append(string.strip(line))
else:
accept = accept + string.split(line[7:])
env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
# XXX Other HTTP_* headers
decoded_query = string.replace(query, '+', ' ')
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
print scriptfile, script, decoded_query
os.execve(scriptfile,
[script, decoded_query],
env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = string.rfind(rest, '?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = string.find(rest, '/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
if not executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
nobody = nobody_uid()
self.send_response(200, "Script output follows")
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
if sts:
self.log_error("CGI script exit status x%x" % sts)
return
# Child
try:
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# AUTH_TYPE
# REMOTE_USER
# REMOTE_IDENT
env['CONTENT_TYPE'] = self.headers.type
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in string.whitespace:
accept.append(string.strip(line))
else:
accept = accept + string.split(line[7:])
env['HTTP_ACCEPT'] = string.joinfields(accept, ',')
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
# XXX Other HTTP_* headers
decoded_query = string.replace(query, '+', ' ')
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
print scriptfile, script, decoded_query
os.execve(scriptfile,
[script, decoded_query],
env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
nobody = None
@ -170,26 +170,26 @@ def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
return nobody
import pwd
try:
nobody = pwd.getpwnam('nobody')[2]
nobody = pwd.getpwnam('nobody')[2]
except pwd.error:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
st = os.stat(path)
except os.error:
return 0
return 0
return st[0] & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)

View File

@ -34,23 +34,23 @@ ConfigParser -- responsible for for parsing a list of
sections() -- return all the configuration section names, sans DEFAULT
options(section) -- return list of configuration options for the named
section
section
read(*filenames) -- read and parse the list of named configuration files
get(section, option, raw=0) -- return a string value for the named
option. All % interpolations are
expanded in the return values, based on
the defaults passed into the constructor
and the DEFAULT section.
option. All % interpolations are
expanded in the return values, based on
the defaults passed into the constructor
and the DEFAULT section.
getint(section, options) -- like get(), but convert value to an integer
getfloat(section, options) -- like get(), but convert value to a float
getboolean(section, options) -- like get(), but convert value to
a boolean (currently defined as 0
or 1, only)
a boolean (currently defined as 0
or 1, only)
"""
import sys
@ -71,186 +71,186 @@ DEFAULTSECT = "DEFAULT"
# exception classes
class Error:
def __init__(self, msg=''):
self.__msg = msg
self.__msg = msg
def __repr__(self):
return self.__msg
return self.__msg
class NoSectionError(Error):
def __init__(self, section):
Error.__init__(self, 'No section: %s' % section)
self.section = section
Error.__init__(self, 'No section: %s' % section)
self.section = section
class DuplicateSectionError(Error):
def __init__(self, section):
Error.__init__(self, "Section %s already exists" % section)
self.section = section
Error.__init__(self, "Section %s already exists" % section)
self.section = section
class NoOptionError(Error):
def __init__(self, option, section):
Error.__init__(self, "No option `%s' in section: %s" %
(option, section))
self.option = option
self.section = section
Error.__init__(self, "No option `%s' in section: %s" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
def __init__(self, reference, option, section):
Error.__init__(self,
"Bad value substitution: sect `%s', opt `%s', ref `%s'"
% (section, option, reference))
self.reference = reference
self.option = option
self.section = section
Error.__init__(self,
"Bad value substitution: sect `%s', opt `%s', ref `%s'"
% (section, option, reference))
self.reference = reference
self.option = option
self.section = section
class ConfigParser:
def __init__(self, defaults=None):
self.__sections = {}
if defaults is None:
self.__defaults = {}
else:
self.__defaults = defaults
self.__sections = {}
if defaults is None:
self.__defaults = {}
else:
self.__defaults = defaults
def defaults(self):
return self.__defaults
return self.__defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self.__sections will never have [DEFAULT] in it
return self.__sections.keys()
"""Return a list of section names, excluding [DEFAULT]"""
# self.__sections will never have [DEFAULT] in it
return self.__sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if self.__sections.has_key(section):
raise DuplicateSectionError(section)
self.__sections[section] = {}
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if self.__sections.has_key(section):
raise DuplicateSectionError(section)
self.__sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return self.__sections.has_key(section)
The DEFAULT section is not acknowledged.
"""
return self.__sections.has_key(section)
def options(self, section):
try:
opts = self.__sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self.__defaults)
return opts.keys()
try:
opts = self.__sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self.__defaults)
return opts.keys()
def read(self, filenames):
"""Read and parse a list of filenames."""
if type(filenames) is type(''):
filenames = [filenames]
for file in filenames:
try:
fp = open(file, 'r')
self.__read(fp)
except IOError:
pass
"""Read and parse a list of filenames."""
if type(filenames) is type(''):
filenames = [filenames]
for file in filenames:
try:
fp = open(file, 'r')
self.__read(fp)
except IOError:
pass
def get(self, section, option, raw=0):
"""Get an option value for a given section.
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based
on the defaults passed into the constructor.
All % interpolations are expanded in the return values, based
on the defaults passed into the constructor.
The section DEFAULT is special.
"""
try:
sectdict = self.__sections[section].copy()
except KeyError:
if section == DEFAULTSECT:
sectdict = {}
else:
raise NoSectionError(section)
d = self.__defaults.copy()
d.update(sectdict)
option = string.lower(option)
try:
rawval = d[option]
except KeyError:
raise NoOptionError(option, section)
# do the string interpolation
if raw:
return rawval
try:
return rawval % d
except KeyError, key:
raise InterpolationError(key, option, section)
The section DEFAULT is special.
"""
try:
sectdict = self.__sections[section].copy()
except KeyError:
if section == DEFAULTSECT:
sectdict = {}
else:
raise NoSectionError(section)
d = self.__defaults.copy()
d.update(sectdict)
option = string.lower(option)
try:
rawval = d[option]
except KeyError:
raise NoOptionError(option, section)
# do the string interpolation
if raw:
return rawval
try:
return rawval % d
except KeyError, key:
raise InterpolationError(key, option, section)
def __get(self, section, conv, option):
return conv(self.get(section, option))
return conv(self.get(section, option))
def getint(self, section, option):
return self.__get(section, string.atoi, option)
return self.__get(section, string.atoi, option)
def getfloat(self, section, option):
return self.__get(section, string.atof, option)
return self.__get(section, string.atof, option)
def getboolean(self, section, option):
v = self.get(section, option)
val = string.atoi(v)
if val not in (0, 1):
raise ValueError, 'Not a boolean: %s' % v
return val
v = self.get(section, option)
val = string.atoi(v)
if val not in (0, 1):
raise ValueError, 'Not a boolean: %s' % v
return val
def __read(self, fp):
"""Parse a sectioned setup file.
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuation are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else is ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if string.strip(line) == '' or line[0] in '#;':
continue
if string.lower(string.split(line)[0]) == 'rem' \
and line[0] == "r": # no leading whitespace
continue
# continuation line?
if line[0] in ' \t' and cursect <> None and optname:
value = string.strip(line)
if value:
cursect = cursect[optname] + '\n ' + value
# a section header?
elif secthead_cre.match(line) >= 0:
sectname = secthead_cre.group(1)
if self.__sections.has_key(sectname):
cursect = self.__sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self.__defaults
else:
cursect = {'name': sectname}
self.__sections[sectname] = cursect
# So sections can't start with a continuation line.
optname = None
# an option line?
elif option_cre.match(line) >= 0:
optname, optval = option_cre.group(1, 3)
optname = string.lower(optname)
optval = string.strip(optval)
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = optval
# an error
else:
print 'Error in %s at %d: %s', (fp.name, lineno, `line`)
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuation are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else is ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if string.strip(line) == '' or line[0] in '#;':
continue
if string.lower(string.split(line)[0]) == 'rem' \
and line[0] == "r": # no leading whitespace
continue
# continuation line?
if line[0] in ' \t' and cursect <> None and optname:
value = string.strip(line)
if value:
cursect = cursect[optname] + '\n ' + value
# a section header?
elif secthead_cre.match(line) >= 0:
sectname = secthead_cre.group(1)
if self.__sections.has_key(sectname):
cursect = self.__sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self.__defaults
else:
cursect = {'name': sectname}
self.__sections[sectname] = cursect
# So sections can't start with a continuation line.
optname = None
# an option line?
elif option_cre.match(line) >= 0:
optname, optval = option_cre.group(1, 3)
optname = string.lower(optname)
optval = string.strip(optval)
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = optval
# an error
else:
print 'Error in %s at %d: %s', (fp.name, lineno, `line`)

View File

@ -47,7 +47,7 @@ class MimeWriter:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
@ -82,46 +82,46 @@ class MimeWriter:
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
lines = string.splitfields(value, "\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + string.strip(lines[i])
value = string.joinfields(lines, "\n") + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
lines = string.splitfields(value, "\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + string.strip(lines[i])
value = string.joinfields(lines, "\n") + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
self._fp.writelines(self._headers)
self._headers = []
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
self._fp.write("\n--" + self._boundary + "--\n")
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':

View File

@ -4,102 +4,102 @@
# exceptions, but also when -X option is used.
try:
class Empty(Exception):
pass
pass
except TypeError:
# string based exceptions
Empty = 'Queue.Empty' # Exception raised by get_nowait()
Empty = 'Queue.Empty' # Exception raised by get_nowait()
class Queue:
def __init__(self, maxsize):
"""Initialize a queue object with a given maximum size.
"""Initialize a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
import thread
self._init(maxsize)
self.mutex = thread.allocate_lock()
self.esema = thread.allocate_lock()
self.esema.acquire_lock()
self.fsema = thread.allocate_lock()
If maxsize is <= 0, the queue size is infinite.
"""
import thread
self._init(maxsize)
self.mutex = thread.allocate_lock()
self.esema = thread.allocate_lock()
self.esema.acquire_lock()
self.fsema = thread.allocate_lock()
def qsize(self):
"""Returns the approximate size of the queue (not reliable!)."""
self.mutex.acquire_lock()
n = self._qsize()
self.mutex.release_lock()
return n
"""Returns the approximate size of the queue (not reliable!)."""
self.mutex.acquire_lock()
n = self._qsize()
self.mutex.release_lock()
return n
def empty(self):
"""Returns 1 if the queue is empty, 0 otherwise (not reliable!)."""
self.mutex.acquire_lock()
n = self._empty()
self.mutex.release_lock()
return n
"""Returns 1 if the queue is empty, 0 otherwise (not reliable!)."""
self.mutex.acquire_lock()
n = self._empty()
self.mutex.release_lock()
return n
def full(self):
"""Returns 1 if the queue is full, 0 otherwise (not reliable!)."""
self.mutex.acquire_lock()
n = self._full()
self.mutex.release_lock()
return n
"""Returns 1 if the queue is full, 0 otherwise (not reliable!)."""
self.mutex.acquire_lock()
n = self._full()
self.mutex.release_lock()
return n
def put(self, item):
"""Put an item into the queue."""
self.fsema.acquire_lock()
self.mutex.acquire_lock()
was_empty = self._empty()
self._put(item)
if was_empty:
self.esema.release_lock()
if not self._full():
self.fsema.release_lock()
self.mutex.release_lock()
"""Put an item into the queue."""
self.fsema.acquire_lock()
self.mutex.acquire_lock()
was_empty = self._empty()
self._put(item)
if was_empty:
self.esema.release_lock()
if not self._full():
self.fsema.release_lock()
self.mutex.release_lock()
def get(self):
"""Gets and returns an item from the queue.
This method blocks if necessary until an item is available.
"""
self.esema.acquire_lock()
self.mutex.acquire_lock()
was_full = self._full()
item = self._get()
if was_full:
self.fsema.release_lock()
if not self._empty():
self.esema.release_lock()
self.mutex.release_lock()
return item
"""Gets and returns an item from the queue.
This method blocks if necessary until an item is available.
"""
self.esema.acquire_lock()
self.mutex.acquire_lock()
was_full = self._full()
item = self._get()
if was_full:
self.fsema.release_lock()
if not self._empty():
self.esema.release_lock()
self.mutex.release_lock()
return item
# Get an item from the queue if one is immediately available,
# raise Empty if the queue is empty or temporarily unavailable
def get_nowait(self):
"""Gets and returns an item from the queue.
Only gets an item if one is immediately available, Otherwise
this raises the Empty exception if the queue is empty or
temporarily unavailable.
"""
locked = self.esema.acquire_lock(0)
self.mutex.acquire_lock()
if self._empty():
# The queue is empty -- we can't have esema
self.mutex.release_lock()
raise Empty
if not locked:
locked = self.esema.acquire_lock(0)
if not locked:
# Somebody else has esema
# but we have mutex --
# go out of their way
self.mutex.release_lock()
raise Empty
was_full = self._full()
item = self._get()
if was_full:
self.fsema.release_lock()
if not self._empty():
self.esema.release_lock()
self.mutex.release_lock()
return item
"""Gets and returns an item from the queue.
Only gets an item if one is immediately available, Otherwise
this raises the Empty exception if the queue is empty or
temporarily unavailable.
"""
locked = self.esema.acquire_lock(0)
self.mutex.acquire_lock()
if self._empty():
# The queue is empty -- we can't have esema
self.mutex.release_lock()
raise Empty
if not locked:
locked = self.esema.acquire_lock(0)
if not locked:
# Somebody else has esema
# but we have mutex --
# go out of their way
self.mutex.release_lock()
raise Empty
was_full = self._full()
item = self._get()
if was_full:
self.fsema.release_lock()
if not self._empty():
self.esema.release_lock()
self.mutex.release_lock()
return item
# XXX Need to define put_nowait() as well.
@ -110,26 +110,26 @@ class Queue:
# Initialize the queue representation
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
self.maxsize = maxsize
self.queue = []
def _qsize(self):
return len(self.queue)
return len(self.queue)
# Check wheter the queue is empty
def _empty(self):
return not self.queue
return not self.queue
# Check whether the queue is full
def _full(self):
return self.maxsize > 0 and len(self.queue) == self.maxsize
return self.maxsize > 0 and len(self.queue) == self.maxsize
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
self.queue.append(item)
# Get an item from the queue
def _get(self):
item = self.queue[0]
del self.queue[0]
return item
item = self.queue[0]
del self.queue[0]
return item

View File

@ -36,119 +36,119 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
if os.path.isdir(path):
self.send_error(403, "Directory listing not supported")
return None
try:
f = open(path)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", self.guess_type(path))
self.end_headers()
return f
"""
path = self.translate_path(self.path)
if os.path.isdir(path):
self.send_error(403, "Directory listing not supported")
return None
try:
f = open(path)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", self.guess_type(path))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
path = posixpath.normpath(path)
words = string.splitfields(path, '/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
"""
path = posixpath.normpath(path)
words = string.splitfields(path, '/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
"""
BLOCKSIZE = 8192
while 1:
data = source.read(BLOCKSIZE)
if not data: break
outputfile.write(data)
BLOCKSIZE = 8192
while 1:
data = source.read(BLOCKSIZE)
if not data: break
outputfile.write(data)
def guess_type(self, path):
"""Guess the type of a file.
"""Guess the type of a file.
Argument is a PATH (a filename).
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using text/plain
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
The default implementation looks the file's extension
up in the table self.extensions_map, using text/plain
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
"""
base, ext = posixpath.splitext(path)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
ext = string.lower(ext)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
else:
return self.extensions_map['']
base, ext = posixpath.splitext(path)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
ext = string.lower(ext)
if self.extensions_map.has_key(ext):
return self.extensions_map[ext]
else:
return self.extensions_map['']
extensions_map = {
'': 'text/plain', # Default, *must* be present
'.html': 'text/html',
'.htm': 'text/html',
'.gif': 'image/gif',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
}
'': 'text/plain', # Default, *must* be present
'.html': 'text/html',
'.htm': 'text/html',
'.gif': 'image/gif',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
}
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = SocketServer.TCPServer):
ServerClass = SocketServer.TCPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)

View File

@ -3,19 +3,19 @@
This module tries to capture the various aspects of defining a server:
- address family:
- AF_INET: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- AF_INET: IP (Internet Protocol) sockets (default)
- AF_UNIX: Unix domain sockets
- others, e.g. AF_DECNET are conceivable (see <socket.h>
- socket type:
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
- SOCK_STREAM (reliable stream, e.g. TCP)
- SOCK_DGRAM (datagrams, e.g. UDP)
- client address verification before further looking at the request
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
(This is actually a hook for any processing that needs to look
at the request before anything else, e.g. logging)
- how to handle multiple requests:
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
- synchronous (one request is handled at a time)
- forking (each request is handled by a new process)
- threading (each request is handled by a new thread)
The classes in this module favor the server type that is simplest to
write: a synchronous TCP/IP server. This is bad class design, but
@ -25,14 +25,14 @@ slows down method lookups.)
There are four classes in an inheritance diagram that represent
synchronous servers of four types:
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
+-----------+ +------------------+
| TCPServer |------->| UnixStreamServer |
+-----------+ +------------------+
|
v
+-----------+ +--------------------+
| UDPServer |------->| UnixDatagramServer |
+-----------+ +--------------------+
Note that UnixDatagramServer derives from UDPServer, not from
UnixStreamServer -- the only difference between an IP and a Unix
@ -43,7 +43,7 @@ Forking and threading versions of each type of server can be created
using the ForkingServer and ThreadingServer mix-in classes. For
instance, a threading UDP server class is created as follows:
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
The Mix-in class must come first, since it overrides a method defined
in UDPServer!
@ -119,8 +119,8 @@ class TCPServer:
- __init__(server_address, RequestHandlerClass)
- serve_forever()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
- handle_request() # if you don't use serve_forever()
- fileno() -> int # for select()
Methods that may be overridden:
@ -157,42 +157,42 @@ class TCPServer:
request_queue_size = 5
def __init__(self, server_address, RequestHandlerClass):
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
"""Constructor. May be extended, do not override."""
self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.socket = socket.socket(self.address_family,
self.socket_type)
self.server_bind()
self.server_activate()
def server_bind(self):
"""Called by constructor to bind the socket.
"""Called by constructor to bind the socket.
May be overridden.
May be overridden.
"""
self.socket.bind(self.server_address)
"""
self.socket.bind(self.server_address)
def server_activate(self):
"""Called by constructor to activate the server.
"""Called by constructor to activate the server.
May be overridden.
May be overridden.
"""
self.socket.listen(self.request_queue_size)
"""
self.socket.listen(self.request_queue_size)
def fileno(self):
"""Return socket file number.
"""Return socket file number.
Interface required by select().
Interface required by select().
"""
return self.socket.fileno()
"""
return self.socket.fileno()
def serve_forever(self):
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
"""Handle one request at a time until doomsday."""
while 1:
self.handle_request()
# The distinction between handling, getting, processing and
# finishing a request is fairly arbitrary. Remember:
@ -206,54 +206,54 @@ class TCPServer:
# this constructor will handle the request all by itself
def handle_request(self):
"""Handle one request, possibly blocking."""
request, client_address = self.get_request()
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
"""Handle one request, possibly blocking."""
request, client_address = self.get_request()
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
def get_request(self):
"""Get the request and client address from the socket.
"""Get the request and client address from the socket.
May be overridden.
May be overridden.
"""
return self.socket.accept()
"""
return self.socket.accept()
def verify_request(self, request, client_address):
"""Verify the request. May be overridden.
"""Verify the request. May be overridden.
Return true if we should proceed with this request.
Return true if we should proceed with this request.
"""
return 1
"""
return 1
def process_request(self, request, client_address):
"""Call finish_request.
"""Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.
Overridden by ForkingMixIn and ThreadingMixIn.
"""
self.finish_request(request, client_address)
"""
self.finish_request(request, client_address)
def finish_request(self, request, client_address):
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
"""Finish one request by instantiating RequestHandlerClass."""
self.RequestHandlerClass(request, client_address, self)
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
The default is to print a traceback and continue.
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc()
print '-'*40
"""
print '-'*40
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc()
print '-'*40
class UDPServer(TCPServer):
@ -265,19 +265,19 @@ class UDPServer(TCPServer):
max_packet_size = 8192
def get_request(self):
return self.socket.recvfrom(self.max_packet_size)
return self.socket.recvfrom(self.max_packet_size)
if hasattr(socket, 'AF_UNIX'):
class UnixStreamServer(TCPServer):
address_family = socket.AF_UNIX
address_family = socket.AF_UNIX
class UnixDatagramServer(UDPServer):
address_family = socket.AF_UNIX
address_family = socket.AF_UNIX
class ForkingMixIn:
@ -287,34 +287,34 @@ class ForkingMixIn:
active_children = None
def collect_children(self):
"""Internal routine to wait for died children."""
while self.active_children:
pid, status = os.waitpid(0, os.WNOHANG)
if not pid: break
self.active_children.remove(pid)
"""Internal routine to wait for died children."""
while self.active_children:
pid, status = os.waitpid(0, os.WNOHANG)
if not pid: break
self.active_children.remove(pid)
def process_request(self, request, client_address):
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request,
client_address)
finally:
os._exit(1)
"""Fork a new subprocess to process the request."""
self.collect_children()
pid = os.fork()
if pid:
# Parent process
if self.active_children is None:
self.active_children = []
self.active_children.append(pid)
return
else:
# Child process.
# This must never return, hence os._exit()!
try:
self.finish_request(request, client_address)
os._exit(0)
except:
try:
self.handle_error(request,
client_address)
finally:
os._exit(1)
class ThreadingMixIn:
@ -322,10 +322,10 @@ class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
import thread
thread.start_new_thread(self.finish_request,
(request, client_address))
"""Start a new thread to process the request."""
import thread
thread.start_new_thread(self.finish_request,
(request, client_address))
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
@ -354,27 +354,27 @@ class BaseRequestHandler:
"""
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
self.request = request
self.client_address = client_address
self.server = server
try:
self.setup()
self.handle()
self.finish()
finally:
sys.exc_traceback = None # Help garbage collection
def setup(self):
pass
pass
def __del__(self):
pass
pass
def handle(self):
pass
pass
def finish(self):
pass
pass
# The following two classes make it possible to use the same service
@ -390,12 +390,12 @@ class StreamRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for stream sockets."""
def setup(self):
self.connection = self.request
self.rfile = self.connection.makefile('rb', 0)
self.wfile = self.connection.makefile('wb', 0)
self.connection = self.request
self.rfile = self.connection.makefile('rb', 0)
self.wfile = self.connection.makefile('wb', 0)
def finish(self):
self.wfile.flush()
self.wfile.flush()
class DatagramRequestHandler(BaseRequestHandler):
@ -403,10 +403,10 @@ class DatagramRequestHandler(BaseRequestHandler):
"""Define self.rfile and self.wfile for datagram sockets."""
def setup(self):
import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO.StringIO(self.packet)
self.wfile = StringIO.StringIO(self.packet)
import StringIO
self.packet, self.socket = self.request
self.rfile = StringIO.StringIO(self.packet)
self.wfile = StringIO.StringIO(self.packet)
def finish(self):
self.socket.send(self.wfile.getvalue())
self.socket.send(self.wfile.getvalue())

View File

@ -4,30 +4,30 @@ class UserDict:
def __init__(self): self.data = {}
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if type(dict) == type(self.data):
return cmp(self.data, dict)
else:
return cmp(self.data, dict.data)
if type(dict) == type(self.data):
return cmp(self.data, dict)
else:
return cmp(self.data, dict.data)
def __len__(self): return len(self.data)
def __getitem__(self, key): return self.data[key]
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): return self.data.clear()
def copy(self):
import copy
return copy.copy(self)
import copy
return copy.copy(self)
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def values(self): return self.data.values()
def has_key(self, key): return self.data.has_key(key)
def update(self, other):
if type(other) is type(self.data):
self.data.update(other)
else:
for k, v in other.items():
self.data[k] = v
if type(other) is type(self.data):
self.data.update(other)
else:
for k, v in other.items():
self.data[k] = v
def get(self, key, failobj=None):
if self.data.has_key(key):
return self.data[key]
else:
return failobj
if self.data.has_key(key):
return self.data[key]
else:
return failobj

1058
Lib/cgi.py

File diff suppressed because it is too large Load Diff

View File

@ -32,32 +32,32 @@ def compile_command(source, filename="<input>", symbol="single"):
code = code1 = code2 = None
try:
code = compile(source, filename, symbol)
code = compile(source, filename, symbol)
except SyntaxError, err:
pass
pass
try:
code1 = compile(source + "\n", filename, symbol)
code1 = compile(source + "\n", filename, symbol)
except SyntaxError, err1:
pass
pass
try:
code2 = compile(source + "\n\n", filename, symbol)
code2 = compile(source + "\n\n", filename, symbol)
except SyntaxError, err2:
pass
pass
if code:
return code
return code
try:
e1 = err1.__dict__
e1 = err1.__dict__
except AttributeError:
e1 = err1
e1 = err1
try:
e2 = err2.__dict__
e2 = err2.__dict__
except AttributeError:
e2 = err2
e2 = err2
if not code1 and e1 == e2:
raise SyntaxError, err1
raise SyntaxError, err1
def interact(banner=None, readfunc=raw_input, local=None):
@ -70,41 +70,41 @@ def interact(banner=None, readfunc=raw_input, local=None):
sys.ps1 = '>>> '
sys.ps2 = '... '
if banner:
print banner
print banner
else:
print "Python Interactive Console", sys.version
print sys.copyright
print "Python Interactive Console", sys.version
print sys.copyright
buf = []
while 1:
if buf: prompt = sys.ps2
else: prompt = sys.ps1
try: line = readfunc(prompt)
except KeyboardInterrupt:
print "\nKeyboardInterrupt"
buf = []
continue
except EOFError: break
buf.append(line)
try: x = compile_command(string.join(buf, "\n"))
except SyntaxError:
traceback.print_exc(0)
buf = []
continue
if x == None: continue
else:
try: exec x in local
except:
exc_type, exc_value, exc_traceback = \
sys.exc_type, sys.exc_value, \
sys.exc_traceback
l = len(traceback.extract_tb(sys.exc_traceback))
try: 1/0
except:
m = len(traceback.extract_tb(
sys.exc_traceback))
traceback.print_exception(exc_type,
exc_value, exc_traceback, l-m)
buf = []
if buf: prompt = sys.ps2
else: prompt = sys.ps1
try: line = readfunc(prompt)
except KeyboardInterrupt:
print "\nKeyboardInterrupt"
buf = []
continue
except EOFError: break
buf.append(line)
try: x = compile_command(string.join(buf, "\n"))
except SyntaxError:
traceback.print_exc(0)
buf = []
continue
if x == None: continue
else:
try: exec x in local
except:
exc_type, exc_value, exc_traceback = \
sys.exc_type, sys.exc_value, \
sys.exc_traceback
l = len(traceback.extract_tb(sys.exc_traceback))
try: 1/0
except:
m = len(traceback.extract_tb(
sys.exc_traceback))
traceback.print_exception(exc_type,
exc_value, exc_traceback, l-m)
buf = []
if __name__ == '__main__':
interact()

View File

@ -72,11 +72,11 @@ def mk2arg(head, x):
#
def mkarg(x):
if '\'' not in x:
return ' \'' + x + '\''
return ' \'' + x + '\''
s = ' "'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s

View File

@ -29,36 +29,36 @@ def compile_dir(dir, maxlevels=10, ddir=None):
"""
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
print "Can't list", dir
names = []
names.sort()
for name in names:
fullname = os.path.join(dir, name)
if ddir:
dfile = os.path.join(ddir, name)
else:
dfile = None
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
print 'Compiling', fullname, '...'
try:
py_compile.compile(fullname, None, dfile)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
if type(sys.exc_type) == type(''):
exc_type_name = sys.exc_type
else: exc_type_name = sys.exc_type.__name__
print 'Sorry:', exc_type_name + ':',
print sys.exc_value
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
compile_dir(fullname, maxlevels - 1, dfile)
fullname = os.path.join(dir, name)
if ddir:
dfile = os.path.join(ddir, name)
else:
dfile = None
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
print 'Compiling', fullname, '...'
try:
py_compile.compile(fullname, None, dfile)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
if type(sys.exc_type) == type(''):
exc_type_name = sys.exc_type
else: exc_type_name = sys.exc_type.__name__
print 'Sorry:', exc_type_name + ':',
print sys.exc_value
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
compile_dir(fullname, maxlevels - 1, dfile)
def compile_path(skip_curdir=1, maxlevels=0):
"""Byte-compile all module on sys.path.
@ -70,40 +70,40 @@ def compile_path(skip_curdir=1, maxlevels=0):
"""
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
compile_dir(dir, maxlevels)
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
compile_dir(dir, maxlevels)
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'ld:')
opts, args = getopt.getopt(sys.argv[1:], 'ld:')
except getopt.error, msg:
print msg
print "usage: compileall [-l] [-d destdir] [directory ...]"
print "-l: don't recurse down"
print "-d destdir: purported directory name for error messages"
print "if no arguments, -l sys.path is assumed"
sys.exit(2)
print msg
print "usage: compileall [-l] [-d destdir] [directory ...]"
print "-l: don't recurse down"
print "-d destdir: purported directory name for error messages"
print "if no arguments, -l sys.path is assumed"
sys.exit(2)
maxlevels = 10
ddir = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
try:
if args:
for dir in args:
compile_dir(dir, maxlevels, ddir)
else:
compile_path()
if args:
for dir in args:
compile_dir(dir, maxlevels, ddir)
else:
compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
print "\n[interrupt]"
if __name__ == '__main__':
main()

View File

@ -48,18 +48,18 @@ Exception(*)
class Exception:
def __init__(self, *args):
self.args = args
self.args = args
def __str__(self):
if not self.args:
return ''
elif len(self.args) == 1:
return str(self.args[0])
else:
return str(self.args)
elif len(self.args) == 1:
return str(self.args[0])
else:
return str(self.args)
def __getitem__(self, i):
return self.args[i]
return self.args[i]
class StandardError(Exception):
pass
@ -68,21 +68,21 @@ class SyntaxError(StandardError):
filename = lineno = offset = text = None
msg = ""
def __init__(self, *args):
self.args = args
if len(self.args) >= 1:
self.msg = self.args[0]
if len(self.args) == 2:
info = self.args[1]
try:
self.filename, self.lineno, self.offset, self.text = info
except:
pass
self.args = args
if len(self.args) >= 1:
self.msg = self.args[0]
if len(self.args) == 2:
info = self.args[1]
try:
self.filename, self.lineno, self.offset, self.text = info
except:
pass
def __str__(self):
return str(self.msg)
class IOError(StandardError):
def __init__(self, *args):
self.args = args
self.args = args
self.errno = None
self.strerror = None
if len(args) == 2:
@ -146,7 +146,7 @@ class MemoryError(StandardError):
class SystemExit(Exception):
def __init__(self, *args):
self.args = args
self.args = args
if len(args) == 0:
self.code = None
elif len(args) == 1:

View File

@ -80,7 +80,7 @@ _state = None
def input(files=(), inplace=0, backup=""):
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup)
return _state
@ -89,151 +89,151 @@ def close():
state = _state
_state = None
if state:
state.close()
state.close()
def nextfile():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.filelineno()
def isfirstline():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
if not _state:
raise RuntimeError, "no active input()"
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
def __init__(self, files=(), inplace=0, backup=""):
if type(files) == type(''):
files = (files,)
else:
files = tuple(files)
if not files:
files = tuple(sys.argv[1:])
if not files:
files = ('-',)
self._files = files
self._inplace = inplace
self._backup = backup
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = 0
if type(files) == type(''):
files = (files,)
else:
files = tuple(files)
if not files:
files = tuple(sys.argv[1:])
if not files:
files = ('-',)
self._files = files
self._inplace = inplace
self._backup = backup
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = 0
def __del__(self):
self.close()
self.close()
def close(self):
self.nextfile()
self._files = ()
self.nextfile()
self._files = ()
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
line = self.readline()
if not line:
raise IndexError, "end of input reached"
return line
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
line = self.readline()
if not line:
raise IndexError, "end of input reached"
return line
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except: pass
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except: pass
self._isstdin = 0
self._isstdin = 0
def readline(self):
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = 0
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = 1
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or ".bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next three lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, "r")
self._output = open(self._filename, "w")
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
self._file = open(self._filename, "r")
line = self._file.readline()
if line:
self._lineno = self._lineno + 1
self._filelineno = self._filelineno + 1
return line
self.nextfile()
# Recursive call
return self.readline()
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = 0
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = 1
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or ".bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next three lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, "r")
self._output = open(self._filename, "w")
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
self._file = open(self._filename, "r")
line = self._file.readline()
if line:
self._lineno = self._lineno + 1
self._filelineno = self._filelineno + 1
return line
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
return self._filename
def lineno(self):
return self._lineno
return self._lineno
def filelineno(self):
return self._filelineno
return self._filelineno
def isfirstline(self):
return self._filelineno == 1
return self._filelineno == 1
def isstdin(self):
return self._isstdin
return self._isstdin
def _test():
import getopt
@ -241,13 +241,13 @@ def _test():
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':

View File

@ -9,9 +9,9 @@ AS_IS = None
class NullFormatter:
def __init__(self, writer=None):
if not writer:
writer = NullWriter()
self.writer = writer
if not writer:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
@ -39,88 +39,88 @@ class AbstractFormatter:
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
apply(self.writer.send_hor_rule, args, kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
if not self.hard_break:
self.writer.send_line_break()
apply(self.writer.send_hor_rule, args, kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if type(format) is StringType:
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if type(format) is StringType:
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
try:
if c == '1':
label = label + ('%d' % counter)
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
except:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
s = chr(ord(case) + x)
label = s + label
return label
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
@ -134,132 +134,132 @@ class AbstractFormatter:
else:
s = ''
s = s + ones[index]*x
label = s + label
label = s + label
index = index + 1
if case == 'I':
return string.upper(label)
return string.upper(label)
return label
def add_flowing_data(self, data,
# These are only here to load them into locals:
whitespace = string.whitespace,
join = string.join, split = string.split):
if not data: return
# The following looks a bit convoluted but is a great improvement over
# data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
prespace = data[:1] in whitespace
postspace = data[-1:] in whitespace
data = join(split(data))
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
# These are only here to load them into locals:
whitespace = string.whitespace,
join = string.join, split = string.split):
if not data: return
# The following looks a bit convoluted but is a great improvement over
# data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
prespace = data[:1] in whitespace
postspace = data[-1:] in whitespace
data = join(split(data))
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, (size, i, b, tt)):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = filter(None, self.margin_stack)
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
self.margin_stack.append(margin)
fstack = filter(None, self.margin_stack)
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = filter(None, self.margin_stack)
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
if self.margin_stack:
del self.margin_stack[-1]
fstack = filter(None, self.margin_stack)
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
@ -282,119 +282,119 @@ class NullWriter:
class AbstractWriter(NullWriter):
def __init__(self):
pass
pass
def new_alignment(self, align):
print "new_alignment(%s)" % `align`
print "new_alignment(%s)" % `align`
def new_font(self, font):
print "new_font(%s)" % `font`
print "new_font(%s)" % `font`
def new_margin(self, margin, level):
print "new_margin(%s, %d)" % (`margin`, level)
print "new_margin(%s, %d)" % (`margin`, level)
def new_spacing(self, spacing):
print "new_spacing(%s)" % `spacing`
print "new_spacing(%s)" % `spacing`
def new_styles(self, styles):
print "new_styles(%s)" % `styles`
print "new_styles(%s)" % `styles`
def send_paragraph(self, blankline):
print "send_paragraph(%s)" % `blankline`
print "send_paragraph(%s)" % `blankline`
def send_line_break(self):
print "send_line_break()"
print "send_line_break()"
def send_hor_rule(self, *args, **kw):
print "send_hor_rule()"
print "send_hor_rule()"
def send_label_data(self, data):
print "send_label_data(%s)" % `data`
print "send_label_data(%s)" % `data`
def send_flowing_data(self, data):
print "send_flowing_data(%s)" % `data`
print "send_flowing_data(%s)" % `data`
def send_literal_data(self, data):
print "send_literal_data(%s)" % `data`
print "send_literal_data(%s)" % `data`
class DumbWriter(NullWriter):
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n' + '\n'*blankline)
self.col = 0
self.atbreak = 0
self.file.write('\n' + '\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = string.rfind(data, '\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = string.expandtabs(data)
self.col = self.col + len(data)
self.atbreak = 0
self.file.write(data)
i = string.rfind(data, '\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = string.expandtabs(data)
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0] in string.whitespace
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in string.split(data):
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1] in string.whitespace
if not data: return
atbreak = self.atbreak or data[0] in string.whitespace
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in string.split(data):
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1] in string.whitespace
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file:
fp = open(file)
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
fp = open(sys.argv[1])
else:
fp = sys.stdin
fp = sys.stdin
while 1:
line = fp.readline()
if not line:
break
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
line = fp.readline()
if not line:
break
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)

View File

@ -21,11 +21,11 @@
# detects an error.
# It returns two values:
# (1) a list of pairs (option, option_argument) giving the options in
# the order in which they were specified. (I'd use a dictionary
# but applications may depend on option order or multiple
# occurrences.) Boolean options have '' as option_argument.
# (2) the list of remaining arguments (may be empty).
# (1) a list of pairs (option, option_argument) giving the options in
# the order in which they were specified. (I'd use a dictionary
# but applications may depend on option order or multiple
# occurrences.) Boolean options have '' as option_argument.
# (2) the list of remaining arguments (may be empty).
import string
@ -36,31 +36,31 @@ def getopt(args, shortopts, longopts = []):
longopts = longopts[:]
longopts.sort()
while args and args[0][:1] == '-' and args[0] != '-':
if args[0] == '--':
args = args[1:]
break
if args[0][:2] == '--':
list, args = do_longs(list, args[0][2:], longopts, args[1:])
else:
list, args = do_shorts(list, args[0][1:], shortopts, args[1:])
if args[0] == '--':
args = args[1:]
break
if args[0][:2] == '--':
list, args = do_longs(list, args[0][2:], longopts, args[1:])
else:
list, args = do_shorts(list, args[0][1:], shortopts, args[1:])
return list, args
def do_longs(list, opt, longopts, args):
try:
i = string.index(opt, '=')
opt, optarg = opt[:i], opt[i+1:]
i = string.index(opt, '=')
opt, optarg = opt[:i], opt[i+1:]
except ValueError:
optarg = None
optarg = None
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise error, 'option --%s requires argument' % opt
optarg, args = args[0], args[1:]
if optarg is None:
if not args:
raise error, 'option --%s requires argument' % opt
optarg, args = args[0], args[1:]
elif optarg:
raise error, 'option --%s must not have an argument' % opt
raise error, 'option --%s must not have an argument' % opt
list.append(('--' + opt, optarg or ''))
return list, args
@ -70,35 +70,35 @@ def do_longs(list, opt, longopts, args):
def long_has_args(opt, longopts):
optlen = len(opt)
for i in range(len(longopts)):
x, y = longopts[i][:optlen], longopts[i][optlen:]
if opt != x:
continue
if y != '' and y != '=' and i+1 < len(longopts):
if opt == longopts[i+1][:optlen]:
raise error, 'option --%s not a unique prefix' % opt
if longopts[i][-1:] in ('=', ):
return 1, longopts[i][:-1]
return 0, longopts[i]
x, y = longopts[i][:optlen], longopts[i][optlen:]
if opt != x:
continue
if y != '' and y != '=' and i+1 < len(longopts):
if opt == longopts[i+1][:optlen]:
raise error, 'option --%s not a unique prefix' % opt
if longopts[i][-1:] in ('=', ):
return 1, longopts[i][:-1]
return 0, longopts[i]
raise error, 'option --' + opt + ' not recognized'
def do_shorts(list, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise error, 'option -%s requires argument' % opt
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
list.append(('-' + opt, optarg))
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise error, 'option -%s requires argument' % opt
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
list.append(('-' + opt, optarg))
return list, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts[i+1:i+2] == ':'
if opt == shortopts[i] != ':':
return shortopts[i+1:i+2] == ':'
raise error, 'option -%s not recognized' % opt
if __name__ == '__main__':

View File

@ -47,222 +47,222 @@ class GzipFile:
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'r')
compresslevel=9, fileobj=None):
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'r')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'r'
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'r'
if mode[0:1] == 'r':
self.mode = READ
self._init_read()
self.filename = filename
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
if mode[0:1] == 'r':
self.mode = READ
self._init_read()
self.filename = filename
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
elif mode[0:1] == 'w':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise ValueError, "Mode " + mode + " not supported"
elif mode[0:1] == 'w':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise ValueError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header()
elif self.mode == READ:
self._read_gzip_header()
if self.mode == WRITE:
self._write_gzip_header()
elif self.mode == READ:
self._read_gzip_header()
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
if filename[-3:] != '.gz':
filename = filename + '.gz'
self.filename = filename
self.crc = zlib.crc32("")
self.size = 0
self.writebuf = []
self.bufsize = 0
if filename[-3:] != '.gz':
filename = filename + '.gz'
self.filename = filename
self.crc = zlib.crc32("")
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = self.filename[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
write32(self.fileobj, int(time.time()))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = self.filename[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
write32(self.fileobj, int(time.time()))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("")
self.size = 0
self.extrabuf = ""
self.extrasize = 0
self.crc = zlib.crc32("")
self.size = 0
self.extrabuf = ""
self.extrasize = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise RuntimeError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise RuntimeError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
# modtime = self.fileobj.read(4)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(6)
magic = self.fileobj.read(2)
if magic != '\037\213':
raise RuntimeError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise RuntimeError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
# modtime = self.fileobj.read(4)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(6)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen=ord(self.fileobj.read(1))
xlen=xlen+256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen=ord(self.fileobj.read(1))
xlen=xlen+256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc)
self.fileobj.write( self.compress.compress(data) )
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc)
self.fileobj.write( self.compress.compress(data) )
def writelines(self,lines):
self.write(string.join(lines))
self.write(string.join(lines))
def read(self,size=None):
if self.extrasize <= 0 and self.fileobj is None:
return ''
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if not size: # get the whole thing
try:
while 1:
self._read(readsize)
readsize = readsize * 2
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = readsize * 2
except EOFError:
pass
chunk = self.extrabuf[:size]
self.extrabuf = self.extrabuf[size:]
self.extrasize = self.extrasize - size
readsize = 1024
if not size: # get the whole thing
try:
while 1:
self._read(readsize)
readsize = readsize * 2
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = readsize * 2
except EOFError:
pass
chunk = self.extrabuf[:size]
self.extrabuf = self.extrabuf[size:]
self.extrasize = self.extrasize - size
return chunk
return chunk
def _unread(self, buf):
self.extrabuf = buf + self.extrabuf
self.extrasize = len(buf) + self.extrasize
self.extrabuf = buf + self.extrabuf
self.extrasize = len(buf) + self.extrasize
def _read(self, size=1024):
try:
buf = self.fileobj.read(size)
except AttributeError:
raise EOFError, "Reached EOF"
if buf == "":
uncompress = self.decompress.flush()
if uncompress == "":
self._read_eof()
self.fileobj = None
raise EOFError, 'Reached EOF'
else:
uncompress = self.decompress.decompress(buf)
self.crc = zlib.crc32(uncompress, self.crc)
self.extrabuf = self.extrabuf + uncompress
self.extrasize = self.extrasize + len(uncompress)
self.size = self.size + len(uncompress)
try:
buf = self.fileobj.read(size)
except AttributeError:
raise EOFError, "Reached EOF"
if buf == "":
uncompress = self.decompress.flush()
if uncompress == "":
self._read_eof()
self.fileobj = None
raise EOFError, 'Reached EOF'
else:
uncompress = self.decompress.decompress(buf)
self.crc = zlib.crc32(uncompress, self.crc)
self.extrabuf = self.extrabuf + uncompress
self.extrasize = self.extrasize + len(uncompress)
self.size = self.size + len(uncompress)
def _read_eof(self):
# Andrew writes:
## We've read to the end of the file, so we have to rewind in order
## to reread the 8 bytes containing the CRC and the file size. The
## decompressor is smart and knows when to stop, so feeding it
## extra data is harmless.
self.fileobj.seek(-8, 2)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj)
if crc32 != self.crc:
self.error = "CRC check failed"
elif isize != self.size:
self.error = "Incorrect length of data produced"
# Andrew writes:
## We've read to the end of the file, so we have to rewind in order
## to reread the 8 bytes containing the CRC and the file size. The
## decompressor is smart and knows when to stop, so feeding it
## extra data is harmless.
self.fileobj.seek(-8, 2)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj)
if crc32 != self.crc:
self.error = "CRC check failed"
elif isize != self.size:
self.error = "Incorrect length of data produced"
def close(self):
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32(self.fileobj, self.crc)
write32(self.fileobj, self.size)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32(self.fileobj, self.crc)
write32(self.fileobj, self.size)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self):
self.fileobj.flush()
self.fileobj.flush()
def seek(self):
raise IOError, 'Random access not allowed in gzip files'
raise IOError, 'Random access not allowed in gzip files'
def tell(self):
raise IOError, 'I won\'t tell() you for gzip files'
raise IOError, 'I won\'t tell() you for gzip files'
def isatty(self):
return 0
return 0
def readline(self):
bufs = []
readsize = 100
while 1:
c = self.read(readsize)
i = string.find(c, '\n')
if i >= 0 or c == '':
bufs.append(c[:i])
self._unread(c[i+1:])
return string.join(bufs, '')
bufs.append(c)
readsize = readsize * 2
bufs = []
readsize = 100
while 1:
c = self.read(readsize)
i = string.find(c, '\n')
if i >= 0 or c == '':
bufs.append(c[:i])
self._unread(c[i+1:])
return string.join(bufs, '')
bufs.append(c)
readsize = readsize * 2
def readlines(self):
buf = self.read()
return string.split(buf, '\n')
buf = self.read()
return string.split(buf, '\n')
def writelines(self, L):
for line in L:
self.write(line)
for line in L:
self.write(line)
def _test():
@ -273,36 +273,36 @@ def _test():
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
args = args[1:]
if not args:
args = ["-"]
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", `arg`
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while 1:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", `arg`
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while 1:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()

View File

@ -34,12 +34,12 @@ class HTMLParser(SGMLParser):
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
self.savedata = self.savedata + data
else:
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
# --- Hooks to save data; shouldn't need to be overridden
@ -49,21 +49,21 @@ class HTMLParser(SGMLParser):
def save_end(self):
data = self.savedata
self.savedata = None
if not self.nofill:
data = string.join(string.split(data))
return data
if not self.nofill:
data = string.join(string.split(data))
return data
# --- Hooks for anchors; should probably be overridden
def anchor_bgn(self, href, name, type):
self.anchor = href
if self.anchor:
self.anchorlist.append(href)
self.anchorlist.append(href)
def anchor_end(self):
if self.anchor:
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
# --- Hook for images; should probably be overridden
@ -218,10 +218,10 @@ class HTMLParser(SGMLParser):
def do_li(self, attrs):
self.formatter.end_paragraph(0)
if self.list_stack:
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
else:
label, counter = '*', 0
label, counter = '*', 0
self.formatter.add_label_data(label, counter)
def start_ol(self, attrs):
@ -230,8 +230,8 @@ class HTMLParser(SGMLParser):
label = '1.'
for a, v in attrs:
if a == 'type':
if len(v) == 1: v = v + '.'
label = v
if len(v) == 1: v = v + '.'
label = v
self.list_stack.append(['ol', label, 0])
def end_ol(self):
@ -271,8 +271,8 @@ class HTMLParser(SGMLParser):
self.formatter.end_paragraph(bl)
if self.list_stack:
if self.list_stack[-1][0] == 'dd':
del self.list_stack[-1]
self.formatter.pop_margin()
del self.list_stack[-1]
self.formatter.pop_margin()
# --- Phrase Markup
@ -302,26 +302,26 @@ class HTMLParser(SGMLParser):
# Typographic Elements
def start_i(self, attrs):
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_i(self):
self.formatter.pop_font()
self.formatter.pop_font()
def start_b(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
def end_b(self):
self.formatter.pop_font()
self.formatter.pop_font()
def start_tt(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
def end_tt(self):
self.formatter.pop_font()
self.formatter.pop_font()
def start_a(self, attrs):
href = ''
name = ''
type = ''
for attrname, value in attrs:
value = string.strip(value)
value = string.strip(value)
if attrname == 'href':
href = value
if attrname == 'name':
@ -350,8 +350,8 @@ class HTMLParser(SGMLParser):
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
@ -361,12 +361,12 @@ class HTMLParser(SGMLParser):
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = string.atoi(value)
except: pass
if attrname == 'height':
try: height = string.atoi(value)
except: pass
if attrname == 'width':
try: width = string.atoi(value)
except: pass
if attrname == 'height':
try: height = string.atoi(value)
except: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
@ -388,35 +388,35 @@ def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
del args[0]
if args:
file = args[0]
file = args[0]
else:
file = 'test.html'
file = 'test.html'
if file == '-':
f = sys.stdin
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
f.close()
if silent:
f = formatter.NullFormatter()
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)

View File

@ -69,22 +69,22 @@ FROZEN_MODULE = 33
class _Verbose:
def __init__(self, verbose = 0):
self.verbose = verbose
self.verbose = verbose
def get_verbose(self):
return self.verbose
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
self.verbose = verbose
# XXX The following is an experimental interface
def note(self, *args):
if self.verbose:
apply(self.message, args)
if self.verbose:
apply(self.message, args)
def message(self, format, *args):
print format%args
print format%args
class BasicModuleLoader(_Verbose):
@ -105,49 +105,49 @@ class BasicModuleLoader(_Verbose):
"""
def find_module(self, name, path = None):
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff: return stuff
return None
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff: return stuff
return None
def default_path(self):
return sys.path
return sys.path
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return None
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return None
def find_builtin_module(self, name):
if imp.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if imp.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
if imp.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if imp.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
try:
if type == BUILTIN_MODULE:
return imp.init_builtin(name)
if type == FROZEN_MODULE:
return imp.init_frozen(name)
if type == C_EXTENSION:
return imp.load_dynamic(name, filename, file)
if type == PY_SOURCE:
return imp.load_source(name, filename, file)
if type == PY_COMPILED:
return imp.load_compiled(name, filename, file)
finally:
if file: file.close()
raise ImportError, "Unrecognized module type (%s) for %s" % \
(`type`, name)
file, filename, (suff, mode, type) = stuff
try:
if type == BUILTIN_MODULE:
return imp.init_builtin(name)
if type == FROZEN_MODULE:
return imp.init_frozen(name)
if type == C_EXTENSION:
return imp.load_dynamic(name, filename, file)
if type == PY_SOURCE:
return imp.load_source(name, filename, file)
if type == PY_COMPILED:
return imp.load_compiled(name, filename, file)
finally:
if file: file.close()
raise ImportError, "Unrecognized module type (%s) for %s" % \
(`type`, name)
class Hooks(_Verbose):
@ -170,17 +170,17 @@ class Hooks(_Verbose):
def init_frozen(self, name): return imp.init_frozen(name)
def get_frozen_object(self, name): return imp.get_frozen_object(name)
def load_source(self, name, filename, file=None):
return imp.load_source(name, filename, file)
return imp.load_source(name, filename, file)
def load_compiled(self, name, filename, file=None):
return imp.load_compiled(name, filename, file)
return imp.load_compiled(name, filename, file)
def load_dynamic(self, name, filename, file=None):
return imp.load_dynamic(name, filename, file)
return imp.load_dynamic(name, filename, file)
def add_module(self, name):
d = self.modules_dict()
if d.has_key(name): return d[name]
d[name] = m = self.new_module(name)
return m
d = self.modules_dict()
if d.has_key(name): return d[name]
d[name] = m = self.new_module(name)
return m
# sys interface
def modules_dict(self): return sys.modules
@ -215,61 +215,61 @@ class ModuleLoader(BasicModuleLoader):
"""
def __init__(self, hooks = None, verbose = 0):
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
def default_path(self):
return self.hooks.default_path()
return self.hooks.default_path()
def modules_dict(self):
return self.hooks.modules_dict()
return self.hooks.modules_dict()
def get_hooks(self):
return self.hooks
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
self.hooks = hooks
def find_builtin_module(self, name):
if self.hooks.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if self.hooks.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
if self.hooks.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if self.hooks.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name+suff)
try:
fp = self.hooks.openfile(fullname, mode)
return fp, fullname, info
except self.hooks.openfile_error:
pass
return None
if dir is None:
return self.find_builtin_module(name)
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name+suff)
try:
fp = self.hooks.openfile(fullname, mode)
return fp, fullname, info
except self.hooks.openfile_error:
pass
return None
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
else:
raise ImportError, "Unrecognized module type (%s) for %s" % \
(`type`, name)
finally:
if file: file.close()
m.__file__ = filename
return m
file, filename, (suff, mode, type) = stuff
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
else:
raise ImportError, "Unrecognized module type (%s) for %s" % \
(`type`, name)
finally:
if file: file.close()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
@ -277,22 +277,22 @@ class FancyModuleLoader(ModuleLoader):
"""Fancy module loader -- parses and execs the code itself."""
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, filename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
m.__file__ = filename
exec code in m.__dict__
return m
file, filename, (suff, mode, type) = stuff
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, filename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
m.__file__ = filename
exec code in m.__dict__
return m
class ModuleImporter(_Verbose):
@ -305,57 +305,57 @@ class ModuleImporter(_Verbose):
"""
def __init__(self, loader = None, verbose = 0):
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
def get_loader(self):
return self.loader
return self.loader
def set_loader(self, loader):
self.loader = loader
self.loader = loader
def get_hooks(self):
return self.loader.get_hooks()
return self.loader.get_hooks()
def set_hooks(self, hooks):
return self.loader.set_hooks(hooks)
return self.loader.set_hooks(hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
if self.modules.has_key(name):
return self.modules[name] # Fast path
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
if self.modules.has_key(name):
return self.modules[name] # Fast path
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
def reload(self, module, path = None):
name = module.__name__
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, "Module %s not found for reload" % name
return self.loader.load_module(name, stuff)
name = module.__name__
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, "Module %s not found for reload" % name
return self.loader.load_module(name, stuff)
def unload(self, module):
del self.modules[module.__name__]
# XXX Should this try to clear the module's namespace?
del self.modules[module.__name__]
# XXX Should this try to clear the module's namespace?
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
def uninstall(self):
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
default_importer = None

View File

@ -63,10 +63,10 @@ def main():
while 1:
line = fp.readline()
if not line: break
if string.find(line, '{1, "') > -1:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
if string.find(line, '{1, "') > -1:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()

View File

@ -16,93 +16,93 @@ def import_hook(name, globals=None, locals=None, fromlist=None):
q, tail = find_head_package(parent, name)
m = load_tail(q, tail)
if not fromlist:
return q
return q
if hasattr(m, "__path__"):
ensure_fromlist(m, fromlist)
ensure_fromlist(m, fromlist)
return m
def determine_parent(globals):
if not globals or not globals.has_key("__name__"):
return None
return None
pname = globals['__name__']
if globals.has_key("__path__"):
parent = sys.modules[pname]
assert globals is parent.__dict__
return parent
parent = sys.modules[pname]
assert globals is parent.__dict__
return parent
if '.' in pname:
i = string.rfind(pname, '.')
pname = pname[:i]
parent = sys.modules[pname]
assert parent.__name__ == pname
return parent
i = string.rfind(pname, '.')
pname = pname[:i]
parent = sys.modules[pname]
assert parent.__name__ == pname
return parent
return None
def find_head_package(parent, name):
if '.' in name:
i = string.find(name, '.')
head = name[:i]
tail = name[i+1:]
i = string.find(name, '.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
qname = head
q = import_module(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = import_module(head, qname, parent)
if q: return q, tail
qname = head
parent = None
q = import_module(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named " + qname
def load_tail(q, tail):
m = q
while tail:
i = string.find(tail, '.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = import_module(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
i = string.find(tail, '.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = import_module(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
return m
def ensure_fromlist(m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = import_module(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def import_module(partname, fqname, parent):
try:
return sys.modules[fqname]
return sys.modules[fqname]
except KeyError:
pass
pass
try:
fp, pathname, stuff = imp.find_module(partname,
parent and parent.__path__)
fp, pathname, stuff = imp.find_module(partname,
parent and parent.__path__)
except ImportError:
return None
return None
try:
m = imp.load_module(fqname, fp, pathname, stuff)
m = imp.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if fp: fp.close()
if parent:
setattr(parent, partname, m)
setattr(parent, partname, m)
return m
@ -110,7 +110,7 @@ def import_module(partname, fqname, parent):
def reload_hook(module):
name = module.__name__
if '.' not in name:
return import_module(name, name, None)
return import_module(name, name, None)
i = string.rfind(name, '.')
pname = name[:i]
parent = sys.modules[pname]

View File

@ -11,21 +11,21 @@ def _group(s):
if not grouping:return s
result=""
while s and grouping:
# if grouping is -1, we are done
if grouping[0]==CHAR_MAX:
break
# 0: re-use last group ad infinitum
elif grouping[0]!=0:
#process last group
group=grouping[0]
grouping=grouping[1:]
if result:
result=s[-group:]+conv['thousands_sep']+result
else:
result=s[-group:]
s=s[:-group]
# if grouping is -1, we are done
if grouping[0]==CHAR_MAX:
break
# 0: re-use last group ad infinitum
elif grouping[0]!=0:
#process last group
group=grouping[0]
grouping=grouping[1:]
if result:
result=s[-group:]+conv['thousands_sep']+result
else:
result=s[-group:]
s=s[:-group]
if s and result:
result=s+conv['thousands_sep']+result
result=s+conv['thousands_sep']+result
return result
def format(f,val,grouping=0):
@ -35,13 +35,13 @@ def format(f,val,grouping=0):
result = f % val
fields = string.splitfields(result,".")
if grouping:
fields[0]=_group(fields[0])
fields[0]=_group(fields[0])
if len(fields)==2:
return fields[0]+localeconv()['decimal_point']+fields[1]
return fields[0]+localeconv()['decimal_point']+fields[1]
elif len(fields)==1:
return fields[0]
return fields[0]
else:
raise Error,"Too many decimal points in result string"
raise Error,"Too many decimal points in result string"
def str(val):
"""Convert float to integer, taking the locale into account."""

View File

@ -12,71 +12,71 @@ def url2pathname(pathname):
#
tp = urllib.splittype(pathname)[0]
if tp and tp <> 'file':
raise RuntimeError, 'Cannot convert non-local URL to pathname'
raise RuntimeError, 'Cannot convert non-local URL to pathname'
components = string.split(pathname, '/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] <> '':
del components[i]
else:
i = i+1
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] <> '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
return string.join(components[1:], ':')
# Absolute unix path, don't start with colon
return string.join(components[1:], ':')
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
return ':' + string.join(components, ':')
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
return ':' + string.join(components, ':')
def pathname2url(pathname):
"convert mac pathname to /-delimited pathname"
if '/' in pathname:
raise RuntimeError, "Cannot convert pathname containing slashes"
raise RuntimeError, "Cannot convert pathname containing slashes"
components = string.split(pathname, ':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
del components[0]
if components[-1] == '':
del components[-1]
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(lambda x: x[:31], components)
if os.path.isabs(pathname):
return '/' + string.join(components, '/')
return '/' + string.join(components, '/')
else:
return string.join(components, '/')
return string.join(components, '/')
def test():
for url in ["index.html",
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print `url`, '->', `url2pathname(url)`
"bar/index.html",
"/foo/bar/index.html",
"/foo/bar/",
"/"]:
print `url`, '->', `url2pathname(url)`
for path in ["drive:",
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print `path`, '->', `pathname2url(path)`
"drive:dir:",
"drive:dir:file",
"drive:file",
"file",
":file",
":dir:",
":dir:file"]:
print `path`, '->', `pathname2url(path)`
if __name__ == '__main__':
test()

View File

@ -15,33 +15,33 @@ def getcaps():
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except:
continue
morecaps = readmailcapfile(fp)
fp.close()
for key in morecaps.keys():
if not caps.has_key(key):
caps[key] = morecaps[key]
else:
caps[key] = caps[key] + morecaps[key]
try:
fp = open(mailcap, 'r')
except:
continue
morecaps = readmailcapfile(fp)
fp.close()
for key in morecaps.keys():
if not caps.has_key(key):
caps[key] = morecaps[key]
else:
caps[key] = caps[key] + morecaps[key]
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# XXX Actually, this is Unix-specific
if os.environ.has_key('MAILCAPS'):
str = os.environ['MAILCAPS']
mailcaps = string.splitfields(str, ':')
str = os.environ['MAILCAPS']
mailcaps = string.splitfields(str, ':')
else:
if os.environ.has_key('HOME'):
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
if os.environ.has_key('HOME'):
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
@ -50,69 +50,69 @@ def listmailcapfiles():
def readmailcapfile(fp):
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or string.strip(line) == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = string.splitfields(key, '/')
for j in range(len(types)):
types[j] = string.strip(types[j])
key = string.lower(string.joinfields(types, '/'))
# Update the database
if caps.has_key(key):
caps[key].append(fields)
else:
caps[key] = [fields]
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or string.strip(line) == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = string.splitfields(key, '/')
for j in range(len(types)):
types[j] = string.strip(types[j])
key = string.lower(string.joinfields(types, '/'))
# Update the database
if caps.has_key(key):
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = string.find(field, '=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = string.strip(field[:i])
fvalue = string.strip(field[i+1:])
if fields.has_key(fkey):
# Ignore it
pass
else:
fields[fkey] = fvalue
i = string.find(field, '=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = string.strip(field[:i])
fvalue = string.strip(field[i+1:])
if fields.has_key(fkey):
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return string.strip(line[start:i]), i
@ -130,24 +130,24 @@ def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if e.has_key('test'):
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
if e.has_key('test'):
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if caps.has_key(MIMEtype):
entries = entries + caps[MIMEtype]
entries = entries + caps[MIMEtype]
MIMEtypes = string.splitfields(MIMEtype, '/')
MIMEtype = MIMEtypes[0] + '/*'
if caps.has_key(MIMEtype):
entries = entries + caps[MIMEtype]
entries = entries + caps[MIMEtype]
if key is not None:
entries = filter(lambda e, key=key: e.has_key(key), entries)
entries = filter(lambda e, key=key: e.has_key(key), entries)
return entries
def subst(field, MIMEtype, filename, plist=[]):
@ -155,39 +155,39 @@ def subst(field, MIMEtype, filename, plist=[]):
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c <> '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] <> '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
c = field[i]; i = i+1
if c <> '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] <> '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = string.lower(name) + '='
n = len(name)
for p in plist:
if string.lower(p[:n]) == name:
return p[n:]
if string.lower(p[:n]) == name:
return p[n:]
return ''
@ -197,23 +197,23 @@ def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print "usage: mailcap [MIMEtype file] ..."
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print "No viewer found for", type
else:
print "Executing:", command
sts = os.system(command)
if sts:
print "Exit status:", sts
args = sys.argv[i:i+2]
if len(args) < 2:
print "usage: mailcap [MIMEtype file] ..."
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print "No viewer found for", type
else:
print "Executing:", command
sts = os.system(command)
if sts:
print "Exit status:", sts
def show(caps):
print "Mailcap files:"
@ -225,14 +225,14 @@ def show(caps):
ckeys = caps.keys()
ckeys.sort()
for type in ckeys:
print type
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print " %-15s" % k, e[k]
print
print type
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print " %-15s" % k, e[k]
print
if __name__ == '__main__':
test()

File diff suppressed because it is too large Load Diff

View File

@ -48,49 +48,49 @@ def guess_type(url):
"""
if not inited:
init()
init()
base, ext = posixpath.splitext(url)
while suffix_map.has_key(ext):
base, ext = posixpath.splitext(base + suffix_map[ext])
base, ext = posixpath.splitext(base + suffix_map[ext])
if encodings_map.has_key(ext):
encoding = encodings_map[ext]
base, ext = posixpath.splitext(base)
encoding = encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
encoding = None
if types_map.has_key(ext):
return types_map[ext], encoding
return types_map[ext], encoding
elif types_map.has_key(string.lower(ext)):
return types_map[string.lower(ext)], encoding
return types_map[string.lower(ext)], encoding
else:
return None, encoding
return None, encoding
def init(files=None):
global inited
for file in files or knownfiles:
s = read_mime_types(file)
if s:
for key, value in s.items():
types_map[key] = value
s = read_mime_types(file)
if s:
for key, value in s.items():
types_map[key] = value
inited = 1
def read_mime_types(file):
try:
f = open(file)
f = open(file)
except IOError:
return None
return None
map = {}
while 1:
line = f.readline()
if not line: break
words = string.split(line)
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words: continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
map['.'+suff] = type
line = f.readline()
if not line: break
words = string.split(line)
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words: continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
map['.'+suff] = type
f.close()
return map

View File

@ -89,7 +89,7 @@ class Pickler:
self.write(STOP)
def dump_special(self, callable, args, state = None):
if type(args) is not TupleType and args is not None:
if type(args) is not TupleType and args is not None:
raise PicklingError, "Second argument to dump_special " \
"must be a tuple"
@ -121,8 +121,8 @@ class Pickler:
memo = self.memo
if (not pers_save):
pid = self.persistent_id(object)
if (pid is not None):
pid = self.persistent_id(object)
if (pid is not None):
self.save_pers(pid)
return
@ -130,8 +130,8 @@ class Pickler:
t = type(object)
if ((t is TupleType) and (len(object) == 0)):
if (self.bin):
if ((t is TupleType) and (len(object) == 0)):
if (self.bin):
self.save_empty_tuple(object)
else:
self.save_tuple(object)
@ -162,9 +162,9 @@ class Pickler:
else:
tup = reduce(object)
if type(tup) is StringType:
self.save_global(object, tup)
return
if type(tup) is StringType:
self.save_global(object, tup)
return
if (type(tup) is not TupleType):
raise PicklingError, "Value returned by %s must be a " \
@ -172,7 +172,7 @@ class Pickler:
l = len(tup)
if ((l != 2) and (l != 3)):
if ((l != 2) and (l != 3)):
raise PicklingError, "tuple returned by %s must contain " \
"only two or three elements" % reduce
@ -189,9 +189,9 @@ class Pickler:
"by %s must be a tuple" % reduce
self.save_reduce(callable, arg_tup, state)
memo_len = len(memo)
self.write(self.put(memo_len))
memo[d] = (memo_len, object)
memo_len = len(memo)
self.write(self.put(memo_len))
memo[d] = (memo_len, object)
return
f(self, object)
@ -217,7 +217,7 @@ class Pickler:
save(arg_tup)
write(REDUCE)
if (state is not None):
if (state is not None):
save(state)
write(BUILD)
@ -284,7 +284,7 @@ class Pickler:
save(element)
if (len(object) and memo.has_key(d)):
if (self.bin):
if (self.bin):
write(POP_MARK + self.get(memo[d][0]))
return
@ -306,7 +306,7 @@ class Pickler:
save = self.save
memo = self.memo
if (self.bin):
if (self.bin):
write(EMPTY_LIST)
else:
write(MARK + LIST)
@ -337,7 +337,7 @@ class Pickler:
save = self.save
memo = self.memo
if (self.bin):
if (self.bin):
write(EMPTY_DICT)
else:
write(MARK + DICT)
@ -375,7 +375,7 @@ class Pickler:
if hasattr(object, '__getinitargs__'):
args = object.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
_keep_alive(args, memo)
else:
args = ()
@ -402,7 +402,7 @@ class Pickler:
stuff = object.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
@ -414,10 +414,10 @@ class Pickler:
if (name is None):
name = object.__name__
try:
module = object.__module__
except AttributeError:
module = whichmodule(object, name)
try:
module = object.__module__
except AttributeError:
module = whichmodule(object, name)
memo_len = len(memo)
write(GLOBAL + module + '\n' + name + '\n' +
@ -439,10 +439,10 @@ def _keep_alive(x, memo):
the memo itself...
"""
try:
memo[id(memo)].append(x)
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# aha, this is the first one :-)
memo[id(memo)]=[x]
classmap = {}
@ -461,8 +461,8 @@ def whichmodule(cls, clsname):
import sys
for name, module in sys.modules.items():
if name != '__main__' and \
hasattr(module, clsname) and \
if name != '__main__' and \
hasattr(module, clsname) and \
getattr(module, clsname) is cls:
break
else:
@ -601,18 +601,18 @@ class Unpickler:
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
instantiated = 0
if (not args and type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
value = apply(klass, args)
instantiated = 0
if (not args and type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
value = apply(klass, args)
self.append(value)
dispatch[INST] = load_inst
@ -623,19 +623,19 @@ class Unpickler:
del stack[k + 1]
args = tuple(stack[k + 1:])
del stack[k:]
instantiated = 0
if (not args and type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
value = apply(klass, args)
instantiated = 0
if (not args and type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
value = apply(klass, args)
self.append(value)
dispatch[OBJ] = load_obj
@ -663,11 +663,11 @@ class Unpickler:
callable = stack[-2]
arg_tup = stack[-1]
del stack[-2:]
del stack[-2:]
if type(callable) is not ClassType:
if not safe_constructors.has_key(callable):
try:
if type(callable) is not ClassType:
if not safe_constructors.has_key(callable):
try:
safe = callable.__safe_for_unpickling__
except AttributeError:
safe = None
@ -676,10 +676,10 @@ class Unpickler:
raise UnpicklingError, "%s is not safe for " \
"unpickling" % callable
if arg_tup is None:
value = callable.__basicnew__()
else:
value = apply(callable, arg_tup)
if arg_tup is None:
value = callable.__basicnew__()
else:
value = apply(callable, arg_tup)
self.append(value)
dispatch[REDUCE] = load_reduce
@ -689,7 +689,7 @@ class Unpickler:
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
@ -736,7 +736,7 @@ class Unpickler:
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
for i in range(mark + 1, len(stack)):
for i in range(mark + 1, len(stack)):
list.append(stack[i])
del stack[mark:]
@ -755,7 +755,7 @@ class Unpickler:
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
@ -769,15 +769,15 @@ class Unpickler:
try:
setstate = inst.__setstate__
except AttributeError:
try:
inst.__dict__.update(value)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__ is not
# accessible. Use the old way of unpickling the instance
# variables. This is a semantic different when unpickling in
# restricted vs. unrestricted modes.
for k, v in value.items():
setattr(inst, k, v)
try:
inst.__dict__.update(value)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__ is not
# accessible. Use the old way of unpickling the instance
# variables. This is a semantic different when unpickling in
# restricted vs. unrestricted modes.
for k, v in value.items():
setattr(inst, k, v)
else:
setstate(value)
dispatch[BUILD] = load_build

View File

@ -2,71 +2,71 @@ import os
import sys
import string
MAXFD = 256 # Max number of file descriptors (os.getdtablesize()???)
MAXFD = 256 # Max number of file descriptors (os.getdtablesize()???)
_active = []
def _cleanup():
for inst in _active[:]:
inst.poll()
inst.poll()
class Popen3:
def __init__(self, cmd, capturestderr=0, bufsize=-1):
if type(cmd) == type(''):
cmd = ['/bin/sh', '-c', cmd]
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.close(0)
os.close(1)
if os.dup(p2cread) <> 0:
sys.stderr.write('popen2: bad read dup\n')
if os.dup(c2pwrite) <> 1:
sys.stderr.write('popen2: bad write dup\n')
if capturestderr:
os.close(2)
if os.dup(errin) <> 2: pass
for i in range(3, MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
self.sts = -1 # Child not completed yet
_active.append(self)
if type(cmd) == type(''):
cmd = ['/bin/sh', '-c', cmd]
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
if capturestderr:
errout, errin = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.close(0)
os.close(1)
if os.dup(p2cread) <> 0:
sys.stderr.write('popen2: bad read dup\n')
if os.dup(c2pwrite) <> 1:
sys.stderr.write('popen2: bad write dup\n')
if capturestderr:
os.close(2)
if os.dup(errin) <> 2: pass
for i in range(3, MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
if capturestderr:
os.close(errin)
self.childerr = os.fdopen(errout, 'r', bufsize)
else:
self.childerr = None
self.sts = -1 # Child not completed yet
_active.append(self)
def poll(self):
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.sts = sts
_active.remove(self)
except os.error:
pass
return self.sts
if self.sts < 0:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.sts = sts
_active.remove(self)
except os.error:
pass
return self.sts
def wait(self):
pid, sts = os.waitpid(self.pid, 0)
if pid == self.pid:
self.sts = sts
_active.remove(self)
return self.sts
pid, sts = os.waitpid(self.pid, 0)
if pid == self.pid:
self.sts = sts
_active.remove(self)
return self.sts
def popen2(cmd, bufsize=-1):
_cleanup()

View File

@ -6,56 +6,56 @@
# Extended file operations
#
# f = posixfile.open(filename, [mode, [bufsize]])
# will create a new posixfile object
# will create a new posixfile object
#
# f = posixfile.fileopen(fileobject)
# will create a posixfile object from a builtin file object
# will create a posixfile object from a builtin file object
#
# f.file()
# will return the original builtin file object
# will return the original builtin file object
#
# f.dup()
# will return a new file object based on a new filedescriptor
# will return a new file object based on a new filedescriptor
#
# f.dup2(fd)
# will return a new file object based on the given filedescriptor
# will return a new file object based on the given filedescriptor
#
# f.flags(mode)
# will turn on the associated flag (merge)
# mode can contain the following characters:
# will turn on the associated flag (merge)
# mode can contain the following characters:
#
# (character representing a flag)
# a append only flag
# c close on exec flag
# n no delay flag
# s synchronization flag
# a append only flag
# c close on exec flag
# n no delay flag
# s synchronization flag
# (modifiers)
# ! turn flags 'off' instead of default 'on'
# = copy flags 'as is' instead of default 'merge'
# ? return a string in which the characters represent the flags
# that are set
# ! turn flags 'off' instead of default 'on'
# = copy flags 'as is' instead of default 'merge'
# ? return a string in which the characters represent the flags
# that are set
#
# note: - the '!' and '=' modifiers are mutually exclusive.
# - the '?' modifier will return the status of the flags after they
# have been changed by other characters in the mode string
# note: - the '!' and '=' modifiers are mutually exclusive.
# - the '?' modifier will return the status of the flags after they
# have been changed by other characters in the mode string
#
# f.lock(mode [, len [, start [, whence]]])
# will (un)lock a region
# mode can contain the following characters:
# will (un)lock a region
# mode can contain the following characters:
#
# (character representing type of lock)
# u unlock
# r read lock
# w write lock
# u unlock
# r read lock
# w write lock
# (modifiers)
# | wait until the lock can be granted
# ? return the first lock conflicting with the requested lock
# or 'None' if there is no conflict. The lock returned is in the
# format (mode, len, start, whence, pid) where mode is a
# character representing the type of lock ('r' or 'w')
# | wait until the lock can be granted
# ? return the first lock conflicting with the requested lock
# or 'None' if there is no conflict. The lock returned is in the
# format (mode, len, start, whence, pid) where mode is a
# character representing the type of lock ('r' or 'w')
#
# note: - the '?' modifier prevents a region from being locked; it is
# query only
# note: - the '?' modifier prevents a region from being locked; it is
# query only
#
class _posixfile_:
@ -65,149 +65,149 @@ class _posixfile_:
# Internal routines
#
def __repr__(self):
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
def __del__(self):
self._file_.close()
self._file_.close()
#
# Initialization routines
#
def open(self, name, mode='r', bufsize=-1):
import __builtin__
return self.fileopen(__builtin__.open(name, mode, bufsize))
import __builtin__
return self.fileopen(__builtin__.open(name, mode, bufsize))
def fileopen(self, file):
if `type(file)` != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_ = file
# Copy basic file methods
for method in file.__methods__:
setattr(self, method, getattr(file, method))
return self
if `type(file)` != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_ = file
# Copy basic file methods
for method in file.__methods__:
setattr(self, method, getattr(file, method))
return self
#
# New methods
#
def file(self):
return self._file_
return self._file_
def dup(self):
import posix
import posix
try: ignore = posix.fdopen
except: raise AttributeError, 'dup() method unavailable'
try: ignore = posix.fdopen
except: raise AttributeError, 'dup() method unavailable'
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
def dup2(self, fd):
import posix
import posix
try: ignore = posix.fdopen
except: raise AttributeError, 'dup() method unavailable'
try: ignore = posix.fdopen
except: raise AttributeError, 'dup() method unavailable'
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
def flags(self, *which):
import fcntl, FCNTL
import fcntl, FCNTL
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
l_flags = 0
if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
l_flags = 0
if 'n' in which: l_flags = l_flags | FCNTL.O_NDELAY
if 'a' in which: l_flags = l_flags | FCNTL.O_APPEND
if 's' in which: l_flags = l_flags | FCNTL.O_SYNC
file = self._file_
file = self._file_
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFL, l_flags)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_SETFD, arg)
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
if FCNTL.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
which = which + 'c'
if FCNTL.O_NDELAY & l_flags: which = which + 'n'
if FCNTL.O_SYNC & l_flags: which = which + 's'
return which
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), FCNTL.F_GETFL, 0)
if FCNTL.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), FCNTL.F_GETFD, 0) & 1:
which = which + 'c'
if FCNTL.O_NDELAY & l_flags: which = which + 'n'
if FCNTL.O_SYNC & l_flags: which = which + 's'
return which
def lock(self, how, *args):
import struct, fcntl, FCNTL
import struct, fcntl, FCNTL
if 'w' in how: l_type = FCNTL.F_WRLCK
elif 'r' in how: l_type = FCNTL.F_RDLCK
elif 'u' in how: l_type = FCNTL.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if 'w' in how: l_type = FCNTL.F_WRLCK
elif 'r' in how: l_type = FCNTL.F_RDLCK
elif 'u' in how: l_type = FCNTL.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if '|' in how: cmd = FCNTL.F_SETLKW
elif '?' in how: cmd = FCNTL.F_GETLK
else: cmd = FCNTL.F_SETLK
if '|' in how: cmd = FCNTL.F_SETLKW
elif '?' in how: cmd = FCNTL.F_GETLK
else: cmd = FCNTL.F_SETLK
l_whence = 0
l_start = 0
l_len = 0
l_whence = 0
l_start = 0
l_len = 0
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
# Hack by davem@magnet.com to get locking to go on freebsd;
# additions for AIX by Vladimir.Marangozov@imag.fr
# Hack by davem@magnet.com to get locking to go on freebsd;
# additions for AIX by Vladimir.Marangozov@imag.fr
import sys, os
if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
elif sys.platform in ['aix3', 'aix4']:
flock = struct.pack('hhlllii', \
l_type, l_whence, l_start, l_len, 0, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
if '?' in how:
if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
if '?' in how:
if sys.platform in ('netbsd1', 'freebsd2', 'freebsd3'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
elif sys.platform in ['aix3', 'aix4']:
l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
struct.unpack('hhlllii', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
if l_type != FCNTL.F_UNLCK:
if l_type == FCNTL.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
if l_type != FCNTL.F_UNLCK:
if l_type == FCNTL.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
#
# Public routine to obtain a posixfile object

View File

@ -1,5 +1,5 @@
# Author: Fred L. Drake, Jr.
# fdrake@cnri.reston.va.us, fdrake@acm.org
# Author: Fred L. Drake, Jr.
# fdrake@cnri.reston.va.us, fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
@ -70,119 +70,119 @@ def saferepr(object):
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0
assert (not depth) or depth > 0, "depth may not be negative"
assert width
self.__depth = depth
self.__indent_per_level = indent
self.__width = width
if stream:
self.__stream = stream
else:
import sys
self.__stream = sys.stdout
"""
indent = int(indent)
width = int(width)
assert indent >= 0
assert (not depth) or depth > 0, "depth may not be negative"
assert width
self.__depth = depth
self.__indent_per_level = indent
self.__width = width
if stream:
self.__stream = stream
else:
import sys
self.__stream = sys.stdout
def pprint(self, object):
self.__stream.write(self.pformat(object) + "\n")
self.__stream.write(self.pformat(object) + "\n")
def pformat(self, object):
sio = StringIO()
self.__format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
sio = StringIO()
self.__format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
self.__recursive = 0
self.pformat(object)
return self.__recursive
self.__recursive = 0
self.pformat(object)
return self.__recursive
def isreadable(self, object):
self.__recursive = 0
self.__readable = 1
self.pformat(object)
return self.__readable and not self.__recursive
self.__recursive = 0
self.__readable = 1
self.pformat(object)
return self.__readable and not self.__recursive
def __format(self, object, stream, indent, allowance, context, level):
level = level + 1
if context.has_key(id(object)):
object = _Recursion(object)
self.__recursive = 1
rep = self.__repr(object, context, level - 1)
objid = id(object)
context[objid] = 1
typ = type(object)
sepLines = len(rep) > (self.__width - 1 - indent - allowance)
level = level + 1
if context.has_key(id(object)):
object = _Recursion(object)
self.__recursive = 1
rep = self.__repr(object, context, level - 1)
objid = id(object)
context[objid] = 1
typ = type(object)
sepLines = len(rep) > (self.__width - 1 - indent - allowance)
if sepLines and typ in (ListType, TupleType):
# Pretty-print the sequence.
stream.write((typ is ListType) and '[' or '(')
if self.__indent_per_level > 1:
stream.write((self.__indent_per_level - 1) * ' ')
length = len(object)
if length:
indent = indent + self.__indent_per_level
self.__format(object[0], stream, indent, allowance + 1,
context, level)
if len(object) > 1:
for ent in object[1:]:
stream.write(',\n' + ' '*indent)
self.__format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self.__indent_per_level
if typ is TupleType and length == 1:
stream.write(',')
stream.write(((typ is ListType) and ']') or ')')
if sepLines and typ in (ListType, TupleType):
# Pretty-print the sequence.
stream.write((typ is ListType) and '[' or '(')
if self.__indent_per_level > 1:
stream.write((self.__indent_per_level - 1) * ' ')
length = len(object)
if length:
indent = indent + self.__indent_per_level
self.__format(object[0], stream, indent, allowance + 1,
context, level)
if len(object) > 1:
for ent in object[1:]:
stream.write(',\n' + ' '*indent)
self.__format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self.__indent_per_level
if typ is TupleType and length == 1:
stream.write(',')
stream.write(((typ is ListType) and ']') or ')')
elif sepLines and typ is DictType:
stream.write('{')
if self.__indent_per_level > 1:
stream.write((self.__indent_per_level - 1) * ' ')
length = len(object)
if length:
indent = indent + self.__indent_per_level
items = object.items()
items.sort()
key, ent = items[0]
rep = self.__repr(key, context, level) + ': '
stream.write(rep)
self.__format(ent, stream, indent + len(rep),
allowance + 1, context, level)
if len(items) > 1:
for key, ent in items[1:]:
rep = self.__repr(key, context, level) + ': '
stream.write(',\n' + ' '*indent + rep)
self.__format(ent, stream, indent + len(rep),
allowance + 1, context, level)
indent = indent - self.__indent_per_level
stream.write('}')
elif sepLines and typ is DictType:
stream.write('{')
if self.__indent_per_level > 1:
stream.write((self.__indent_per_level - 1) * ' ')
length = len(object)
if length:
indent = indent + self.__indent_per_level
items = object.items()
items.sort()
key, ent = items[0]
rep = self.__repr(key, context, level) + ': '
stream.write(rep)
self.__format(ent, stream, indent + len(rep),
allowance + 1, context, level)
if len(items) > 1:
for key, ent in items[1:]:
rep = self.__repr(key, context, level) + ': '
stream.write(',\n' + ' '*indent + rep)
self.__format(ent, stream, indent + len(rep),
allowance + 1, context, level)
indent = indent - self.__indent_per_level
stream.write('}')
else:
stream.write(rep)
del context[objid]
else:
stream.write(rep)
del context[objid]
def __repr(self, object, context, level):
repr, readable = _safe_repr(object, context, self.__depth, level)
if not readable:
self.__readable = 0
return repr
repr, readable = _safe_repr(object, context, self.__depth, level)
if not readable:
self.__readable = 0
return repr
def _safe_repr(object, context, maxlevels=None, level=0):
@ -190,54 +190,54 @@ def _safe_repr(object, context, maxlevels=None, level=0):
readable = 1
typ = type(object)
if not (typ in (DictType, ListType, TupleType) and object):
rep = `object`
if rep:
if rep[0] == '<':
readable = 0
else:
readable = 0
return `object`, readable
rep = `object`
if rep:
if rep[0] == '<':
readable = 0
else:
readable = 0
return `object`, readable
if context.has_key(id(object)):
return `_Recursion(object)`, 0
return `_Recursion(object)`, 0
objid = id(object)
context[objid] = 1
if typ is DictType:
if maxlevels and level >= maxlevels:
s = "{...}"
readable = 0
else:
items = object.items()
k, v = items[0]
krepr, kreadable = _safe_repr(k, context, maxlevels, level)
vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
readable = readable and kreadable and vreadable
s = "{%s: %s" % (krepr, vrepr)
for k, v in items[1:]:
krepr, kreadable = _safe_repr(k, context, maxlevels, level)
vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
readable = readable and kreadable and vreadable
s = "%s, %s: %s" % (s, krepr, vrepr)
s = s + "}"
if maxlevels and level >= maxlevels:
s = "{...}"
readable = 0
else:
items = object.items()
k, v = items[0]
krepr, kreadable = _safe_repr(k, context, maxlevels, level)
vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
readable = readable and kreadable and vreadable
s = "{%s: %s" % (krepr, vrepr)
for k, v in items[1:]:
krepr, kreadable = _safe_repr(k, context, maxlevels, level)
vrepr, vreadable = _safe_repr(v, context, maxlevels, level)
readable = readable and kreadable and vreadable
s = "%s, %s: %s" % (s, krepr, vrepr)
s = s + "}"
else:
s, term = (typ is ListType) and ('[', ']') or ('(', ')')
if maxlevels and level >= maxlevels:
s = s + "..."
readable = 0
else:
subrepr, subreadable = _safe_repr(
object[0], context, maxlevels, level)
readable = readable and subreadable
s = s + subrepr
tail = object[1:]
if not tail:
if typ is TupleType:
s = s + ','
for ent in tail:
subrepr, subreadable = _safe_repr(
ent, context, maxlevels, level)
readable = readable and subreadable
s = "%s, %s" % (s, subrepr)
s = s + term
s, term = (typ is ListType) and ('[', ']') or ('(', ')')
if maxlevels and level >= maxlevels:
s = s + "..."
readable = 0
else:
subrepr, subreadable = _safe_repr(
object[0], context, maxlevels, level)
readable = readable and subreadable
s = s + subrepr
tail = object[1:]
if not tail:
if typ is TupleType:
s = s + ','
for ent in tail:
subrepr, subreadable = _safe_repr(
ent, context, maxlevels, level)
readable = readable and subreadable
s = "%s, %s" % (s, subrepr)
s = s + term
del context[objid]
return s, readable
@ -246,8 +246,8 @@ class _Recursion:
# represent a recursive relationship; really only used for the __repr__()
# method...
def __init__(self, object):
self.__repr = "<Recursion on %s with id=%s>" \
% (type(object).__name__, id(object))
self.__repr = "<Recursion on %s with id=%s>" \
% (type(object).__name__, id(object))
def __repr__(self):
return self.__repr
return self.__repr

View File

@ -20,9 +20,9 @@ def compile(file, cfile=None, dfile=None):
file: source filename
cfile: target filename; defaults to source with 'c' or 'o' appended
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
dfile: purported filename; defaults to source (this is the filename
that will show up in error messages)
that will show up in error messages)
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
@ -44,16 +44,16 @@ def compile(file, cfile=None, dfile=None):
import os, marshal, __builtin__
f = open(file)
try:
timestamp = os.fstat(file.fileno())
timestamp = os.fstat(file.fileno())
except AttributeError:
timestamp = long(os.stat(file)[8])
timestamp = long(os.stat(file)[8])
codestring = f.read()
f.close()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
codestring = codestring + '\n'
codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
if not cfile:
cfile = file + (__debug__ and 'c' or 'o')
cfile = file + (__debug__ and 'c' or 'o')
fc = open(cfile, 'wb')
fc.write('\0\0\0\0')
wr_long(fc, timestamp)
@ -63,6 +63,6 @@ def compile(file, cfile=None, dfile=None):
fc.write(MAGIC)
fc.close()
if os.name == 'mac':
import macfs
macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
macfs.FSSpec(file).SetCreatorType('Pyth', 'TEXT')
import macfs
macfs.FSSpec(cfile).SetCreatorType('Pyth', 'PYC ')
macfs.FSSpec(file).SetCreatorType('Pyth', 'TEXT')

354
Lib/re.py
View File

@ -30,12 +30,12 @@ _MAXCACHE = 20
def _cachecompile(pattern, flags=0):
key = (pattern, flags)
try:
return _cache[key]
return _cache[key]
except KeyError:
pass
pass
value = compile(pattern, flags)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache.clear()
_cache[key] = value
return value
@ -47,17 +47,17 @@ def search(pattern, string, flags=0):
def sub(pattern, repl, string, count=0):
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
pattern = _cachecompile(pattern)
return pattern.sub(repl, string, count)
def subn(pattern, repl, string, count=0):
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
pattern = _cachecompile(pattern)
return pattern.subn(repl, string, count)
def split(pattern, string, maxsplit=0):
if type(pattern) == type(''):
pattern = _cachecompile(pattern)
pattern = _cachecompile(pattern)
return pattern.split(string, maxsplit)
def escape(pattern):
@ -65,10 +65,10 @@ def escape(pattern):
result = []
alphanum=string.letters+'_'+string.digits
for char in pattern:
if char not in alphanum:
if char == '\000': result.append(r'\000')
else: result.append('\\' + char)
else: result.append(char)
if char not in alphanum:
if char == '\000': result.append(r'\000')
else: result.append('\\' + char)
else: result.append(char)
return string.join(result, '')
def compile(pattern, flags=0):
@ -84,137 +84,137 @@ def compile(pattern, flags=0):
class RegexObject:
def __init__(self, pattern, flags, code, groupindex):
self.code = code
self.flags = flags
self.pattern = pattern
self.groupindex = groupindex
self.code = code
self.flags = flags
self.pattern = pattern
self.groupindex = groupindex
def search(self, string, pos=0, endpos=None):
"""Scan through string looking for a match to the pattern, returning
a MatchObject instance, or None if no match was found."""
"""Scan through string looking for a match to the pattern, returning
a MatchObject instance, or None if no match was found."""
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, 0)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, 0)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
def match(self, string, pos=0, endpos=None):
"""Try to apply the pattern at the start of the string, returning
a MatchObject instance, or None if no match was found."""
"""Try to apply the pattern at the start of the string, returning
a MatchObject instance, or None if no match was found."""
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, ANCHORED)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
if endpos is None or endpos>len(string):
endpos=len(string)
if endpos<pos: endpos=pos
regs = self.code.match(string, pos, endpos, ANCHORED)
if regs is None:
return None
self._num_regs=len(regs)
return MatchObject(self,
string,
pos, endpos,
regs)
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl"""
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl"""
return self.subn(repl, string, count)[0]
def subn(self, repl, source, count=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. number is the number of substitutions that
were made."""
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. number is the number of substitutions that
were made."""
if count < 0:
raise error, "negative substitution count"
if count == 0:
import sys
count = sys.maxint
if type(repl) == type(''):
if '\\' in repl:
repl = lambda m, r=repl: pcre_expand(m, r)
else:
repl = lambda m, r=repl: r
n = 0 # Number of matches
pos = 0 # Where to start searching
lastmatch = -1 # End of last match
results = [] # Substrings making up the result
end = len(source)
while n < count and pos <= end:
m = self.search(source, pos)
if not m:
break
i, j = m.span(0)
if i == j == lastmatch:
# Empty match adjacent to previous match
pos = pos + 1
results.append(source[lastmatch:pos])
continue
if pos < i:
results.append(source[pos:i])
results.append(repl(m))
pos = lastmatch = j
if i == j:
# Last match was empty; don't try here again
pos = pos + 1
results.append(source[lastmatch:pos])
n = n + 1
results.append(source[pos:])
return (string.join(results, ''), n)
if count < 0:
raise error, "negative substitution count"
if count == 0:
import sys
count = sys.maxint
if type(repl) == type(''):
if '\\' in repl:
repl = lambda m, r=repl: pcre_expand(m, r)
else:
repl = lambda m, r=repl: r
n = 0 # Number of matches
pos = 0 # Where to start searching
lastmatch = -1 # End of last match
results = [] # Substrings making up the result
end = len(source)
while n < count and pos <= end:
m = self.search(source, pos)
if not m:
break
i, j = m.span(0)
if i == j == lastmatch:
# Empty match adjacent to previous match
pos = pos + 1
results.append(source[lastmatch:pos])
continue
if pos < i:
results.append(source[pos:i])
results.append(repl(m))
pos = lastmatch = j
if i == j:
# Last match was empty; don't try here again
pos = pos + 1
results.append(source[lastmatch:pos])
n = n + 1
results.append(source[pos:])
return (string.join(results, ''), n)
def split(self, source, maxsplit=0):
"""Split \var{string} by the occurrences of the pattern,
returning a list containing the resulting substrings."""
"""Split \var{string} by the occurrences of the pattern,
returning a list containing the resulting substrings."""
if maxsplit < 0:
raise error, "negative split count"
if maxsplit == 0:
import sys
maxsplit = sys.maxint
n = 0
pos = 0
lastmatch = 0
results = []
end = len(source)
while n < maxsplit:
m = self.search(source, pos)
if not m:
break
i, j = m.span(0)
if i == j:
# Empty match
if pos >= end:
break
pos = pos+1
continue
results.append(source[lastmatch:i])
g = m.groups()
if g:
if type(g)==type( "" ): g = [g]
results[len(results):] = list(g)
pos = lastmatch = j
n = n + 1
results.append(source[lastmatch:])
return results
if maxsplit < 0:
raise error, "negative split count"
if maxsplit == 0:
import sys
maxsplit = sys.maxint
n = 0
pos = 0
lastmatch = 0
results = []
end = len(source)
while n < maxsplit:
m = self.search(source, pos)
if not m:
break
i, j = m.span(0)
if i == j:
# Empty match
if pos >= end:
break
pos = pos+1
continue
results.append(source[lastmatch:i])
g = m.groups()
if g:
if type(g)==type( "" ): g = [g]
results[len(results):] = list(g)
pos = lastmatch = j
n = n + 1
results.append(source[lastmatch:])
return results
# The following 3 functions were contributed by Mike Fletcher, and
# allow pickling and unpickling of RegexObject instances.
def __getinitargs__(self):
return (None,None,None,None) # any 4 elements, to work around
# problems with the
# pickle/cPickle modules not yet
# ignoring the __init__ function
# pickle/cPickle modules not yet
# ignoring the __init__ function
def __getstate__(self):
return self.pattern, self.flags, self.groupindex
def __setstate__(self, statetuple):
@ -225,70 +225,70 @@ class RegexObject:
class MatchObject:
def __init__(self, re, string, pos, endpos, regs):
self.re = re
self.string = string
self.pos = pos
self.endpos = endpos
self.regs = regs
self.re = re
self.string = string
self.pos = pos
self.endpos = endpos
self.regs = regs
def start(self, g = 0):
"Return the start of the substring matched by group g"
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g][0]
"Return the start of the substring matched by group g"
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g][0]
def end(self, g = 0):
"Return the end of the substring matched by group g"
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g][1]
"Return the end of the substring matched by group g"
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g][1]
def span(self, g = 0):
"""Return a tuple containing the start,end of the substring
matched by group g"""
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g]
"""Return a tuple containing the start,end of the substring
matched by group g"""
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
return self.regs[g]
def groups(self):
"Return a tuple containing all subgroups of the match object"
result = []
for g in range(1, self.re._num_regs):
if (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
result.append(None)
else:
result.append(self.string[self.regs[g][0]:self.regs[g][1]])
return tuple(result)
"Return a tuple containing all subgroups of the match object"
result = []
for g in range(1, self.re._num_regs):
if (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
result.append(None)
else:
result.append(self.string[self.regs[g][0]:self.regs[g][1]])
return tuple(result)
def group(self, *groups):
"Return one or more groups of the match."
if len(groups) == 0:
groups = (0,)
result = []
for g in groups:
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
if len(self.regs)<=g: raise IndexError, ('group "' + str(g) + '" is undefined')
elif (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
result.append(None)
else:
result.append(self.string[self.regs[g][0]:self.regs[g][1]])
if len(result) > 1:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return ()
"Return one or more groups of the match."
if len(groups) == 0:
groups = (0,)
result = []
for g in groups:
if type(g) == type(''):
try:
g = self.re.groupindex[g]
except (KeyError, TypeError):
raise IndexError, ('group "' + g + '" is undefined')
if len(self.regs)<=g: raise IndexError, ('group "' + str(g) + '" is undefined')
elif (self.regs[g][0] == -1) or (self.regs[g][1] == -1):
result.append(None)
else:
result.append(self.string[self.regs[g][0]:self.regs[g][1]])
if len(result) > 1:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return ()

View File

@ -108,36 +108,36 @@ def convert(s, syntax=None):
"""
table = mastertable.copy()
if syntax is None:
syntax = regex.get_syntax()
syntax = regex.get_syntax()
if syntax & RE_NO_BK_PARENS:
del table[r'\('], table[r'\)']
del table['('], table[')']
del table[r'\('], table[r'\)']
del table['('], table[')']
if syntax & RE_NO_BK_VBAR:
del table[r'\|']
del table['|']
del table[r'\|']
del table['|']
if syntax & RE_BK_PLUS_QM:
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
if syntax & RE_NEWLINE_OR:
table['\n'] = '|'
table['\n'] = '|'
res = ""
i = 0
end = len(s)
while i < end:
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
return res
@ -155,21 +155,21 @@ def quote(s, quote=None):
"""
if quote is None:
q = "'"
altq = "'"
if q in s and altq not in s:
q = altq
q = "'"
altq = "'"
if q in s and altq not in s:
q = altq
else:
assert quote in ('"', "'")
q = quote
assert quote in ('"', "'")
q = quote
res = q
for c in s:
if c == q: c = '\\' + c
elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
res = res + c
if c == q: c = '\\' + c
elif c < ' ' or c > '~': c = "\\%03o" % ord(c)
res = res + c
res = res + q
if '\\' in res:
res = 'r' + res
res = 'r' + res
return res
@ -179,7 +179,7 @@ def main():
s = eval(sys.stdin.read())
sys.stdout.write(quote(convert(s)))
if sys.stdout.isatty():
sys.stdout.write("\n")
sys.stdout.write("\n")
if __name__ == '__main__':

View File

@ -600,7 +600,7 @@ class AddrlistClass:
# Parse a date field
_monthnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
_daynames = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# The timezone table does not include the military time zones defined
@ -610,12 +610,12 @@ _daynames = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic standard
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT':-500, # Centreal
'MST':-700, 'MDT':-600, # Mountain
'PST':-800, 'PDT':-700 # Pacific
}
'AST': -400, 'ADT': -300, # Atlantic standard
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT':-500, # Centreal
'MST':-700, 'MDT':-600, # Mountain
'PST':-800, 'PDT':-700 # Pacific
}
def parsedate_tz(data):
@ -672,12 +672,12 @@ def parsedate_tz(data):
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset/100)*3600 + (tzoffset % 100)*60)
tuple = (yy, mm, dd, thh, tmm, tss, 0, 0, 0, tzoffset)
return tuple
@ -700,11 +700,11 @@ def mktime_tz(data):
"""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
# When used as script, run a small test program.

View File

@ -47,60 +47,60 @@ import __main__
class Completer:
def complete(self, text, state):
"""Return the next possible completion for 'text'.
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
return self.matches[state]
"""
if state == 0:
if "." in text:
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
return self.matches[state]
def global_matches(self, text):
"""Compute matches when text is a simple name.
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
Return a list of all keywords, built-in functions and names
currently defines in __main__ that match.
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,
__builtin__.__dict__.keys(),
__main__.__dict__.keys()]:
for word in list:
if word[:n] == text:
matches.append(word)
return matches
"""
import keyword
matches = []
n = len(text)
for list in [keyword.kwlist,
__builtin__.__dict__.keys(),
__main__.__dict__.keys()]:
for word in list:
if word[:n] == text:
matches.append(word)
return matches
def attr_matches(self, text):
"""Compute matches when text contains a dot.
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluabable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluabable in the globals of __main__, it will be evaluated
and its attributes (as revealed by dir()) are used as possible
completions.
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
words = dir(eval(expr, __main__.__dict__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr:
matches.append("%s.%s" % (expr, word))
return matches
"""
import re
m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
if not m:
return
expr, attr = m.group(1, 3)
words = dir(eval(expr, __main__.__dict__))
matches = []
n = len(attr)
for word in words:
if word[:n] == attr:
matches.append("%s.%s" % (expr, word))
return matches
readline.set_completer(Completer().complete)

View File

@ -16,9 +16,9 @@ import string
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
@ -53,300 +53,300 @@ class SGMLParser:
# Interface -- initialize and reset this instance
def __init__(self, verbose=0):
self.verbose = verbose
self.reset()
self.verbose = verbose
self.reset()
# Interface -- reset this instance. Loses all unprocessed data
def reset(self):
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
# For derived classes only -- enter literal mode (CDATA) till EOF
def setnomoretags(self):
self.nomoretags = self.literal = 1
self.nomoretags = self.literal = 1
# For derived classes only -- enter literal mode (CDATA)
def setliteral(self, *args):
self.literal = 1
self.literal = 1
# Interface -- feed some data to the parser. Call this as
# often as you want, with as little or as much text as you
# want (may include '\n'). (This just saves the text, all the
# processing is done by goahead().)
def feed(self, data):
self.rawdata = self.rawdata + data
self.goahead(0)
self.rawdata = self.rawdata + data
self.goahead(0)
# Interface -- handle the remaining data
def close(self):
self.goahead(1)
self.goahead(1)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start(0)
else: j = n
if i < j: self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
i = i+k
continue
match = special.match(rawdata, i)
if match:
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
i = match.end(0)
continue
elif rawdata[i] == '&':
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start(0)
else: j = n
if i < j: self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if endtagopen.match(rawdata, i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if commentopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_comment(i)
if k < 0: break
i = i+k
continue
match = special.match(rawdata, i)
if match:
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
i = match.end(0)
continue
elif rawdata[i] == '&':
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
raise RuntimeError, 'neither < nor & ??'
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
if rawdata[i:i+4] <> '<!--':
raise RuntimeError, 'unexpected call to handle_comment'
match = commentclose.search(rawdata, i+4)
if not match:
return -1
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
j = match.end(0)
return j-i
rawdata = self.rawdata
if rawdata[i:i+4] <> '<!--':
raise RuntimeError, 'unexpected call to handle_comment'
match = commentclose.search(rawdata, i+4)
if not match:
return -1
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
j = match.end(0)
return j-i
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
tag = string.lower(tag)
self.finish_shorttag(tag, data)
k = match.end(0)
return k
# XXX The following should skip matching quotes (' or ")
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
raise RuntimeError, 'unexpected call to parse_starttag'
k = match.end(0)
tag = string.lower(rawdata[i+1:k])
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrs.append((string.lower(attrname), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.finish_starttag(tag, attrs)
return j
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
tag = string.lower(tag)
self.finish_shorttag(tag, data)
k = match.end(0)
return k
# XXX The following should skip matching quotes (' or ")
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
raise RuntimeError, 'unexpected call to parse_starttag'
k = match.end(0)
tag = string.lower(rawdata[i+1:k])
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrs.append((string.lower(attrname), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.finish_starttag(tag, attrs)
return j
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = string.lower(string.strip(rawdata[i+2:j]))
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = string.lower(string.strip(rawdata[i+2:j]))
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
# Example -- handle character reference, no need to override
def handle_charref(self, name):
try:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
try:
n = string.atoi(name)
except string.atoi_error:
self.unknown_charref(name)
return
if not 0 <= n <= 255:
self.unknown_charref(name)
return
self.handle_data(chr(n))
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
# Example -- handle entity reference, no need to override
def handle_entityref(self, name):
table = self.entitydefs
if table.has_key(name):
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
table = self.entitydefs
if table.has_key(name):
self.handle_data(table[name])
else:
self.unknown_entityref(name)
return
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
@ -358,87 +358,87 @@ class SGMLParser:
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
self.testdata = self.testdata + data
if len(`self.testdata`) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
data = self.testdata
if data:
self.testdata = ""
print 'data:', `data`
def handle_comment(self, data):
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
self.flush()
r = `data`
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def close(self):
SGMLParser.close(self)
self.flush()
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if not args:
args = sys.argv[1:]
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
klass = TestSGMLParser
if args:
file = args[0]
file = args[0]
else:
file = 'test.html'
file = 'test.html'
if file == '-':
f = sys.stdin
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
f.close()
x = klass()
for c in data:
x.feed(c)
x.feed(c)
x.close()

View File

@ -13,18 +13,18 @@ def copyfile(src, dst):
fsrc = None
fdst = None
try:
fsrc = open(src, 'rb')
fdst = open(dst, 'wb')
while 1:
buf = fsrc.read(16*1024)
if not buf:
break
fdst.write(buf)
fsrc = open(src, 'rb')
fdst = open(dst, 'wb')
while 1:
buf = fsrc.read(16*1024)
if not buf:
break
fdst.write(buf)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copymode(src, dst):
"""Copy mode bits from src to dst"""
@ -47,7 +47,7 @@ def copy(src, dst):
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
@ -58,7 +58,7 @@ def copy2(src, dst):
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
@ -80,19 +80,19 @@ def copytree(src, dst, symlinks=0):
names = os.listdir(src)
os.mkdir(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname)
else:
copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (`srcname`, `dstname`, str(why))
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname)
else:
copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error), why:
print "Can't copy %s to %s: %s" % (`srcname`, `dstname`, str(why))
def rmtree(path, ignore_errors=0, onerror=None):
"""Recursively delete a directory tree.
@ -105,23 +105,23 @@ def rmtree(path, ignore_errors=0, onerror=None):
cmdtuples = []
_build_cmdtuple(path, cmdtuples)
for cmd in cmdtuples:
try:
apply(cmd[0], (cmd[1],))
except:
exc = sys.exc_info()
if ignore_errors:
pass
elif onerror:
onerror(cmd[0], cmd[1], exc)
else:
raise exc[0], (exc[1][0], exc[1][1] + ' removing '+cmd[1])
try:
apply(cmd[0], (cmd[1],))
except:
exc = sys.exc_info()
if ignore_errors:
pass
elif onerror:
onerror(cmd[0], cmd[1], exc)
else:
raise exc[0], (exc[1][0], exc[1][1] + ' removing '+cmd[1])
# Helper for rmtree()
def _build_cmdtuple(path, cmdtuples):
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append(os.remove, real_f)
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append(os.remove, real_f)
cmdtuples.append(os.rmdir, path)

View File

@ -61,61 +61,61 @@ import sys, os
def addsitedir(sitedir):
if sitedir not in sys.path:
sys.path.append(sitedir) # Add path component
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
names = os.listdir(sitedir)
except os.error:
return
return
names = map(os.path.normcase, names)
names.sort()
for name in names:
if name[-4:] == ".pth":
addpackage(sitedir, name)
if name[-4:] == ".pth":
addpackage(sitedir, name)
def addpackage(sitedir, name):
fullname = os.path.join(sitedir, name)
try:
f = open(fullname)
f = open(fullname)
except IOError:
return
return
while 1:
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir = os.path.join(sitedir, dir)
if dir not in sys.path and os.path.exists(dir):
sys.path.append(dir)
dir = f.readline()
if not dir:
break
if dir[0] == '#':
continue
if dir[-1] == '\n':
dir = dir[:-1]
dir = os.path.join(sitedir, dir)
if dir not in sys.path and os.path.exists(dir):
sys.path.append(dir)
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")]
else:
sitedirs = [prefix]
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir)
if os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")]
else:
sitedirs = [prefix]
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir)
try:
import sitecustomize # Run arbitrary site specific code
import sitecustomize # Run arbitrary site specific code
except ImportError:
pass # No site customization module
pass # No site customization module
def _test():
print "sys.path = ["
for dir in sys.path:
print " %s," % `dir`
print " %s," % `dir`
print "]"
if __name__ == '__main__':

View File

@ -53,7 +53,7 @@ class SMTP:
"""
self.debuglevel = 0
self.file = None
self.helo_resp = None
self.helo_resp = None
if host: self.connect(host, port)
def set_debuglevel(self, debuglevel):
@ -83,17 +83,17 @@ class SMTP:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.debuglevel > 0: print 'connect:', (host, port)
self.sock.connect(host, port)
(code,msg)=self.getreply()
if self.debuglevel >0 : print "connect:", msg
return msg
(code,msg)=self.getreply()
if self.debuglevel >0 : print "connect:", msg
return msg
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0: print 'send:', `str`
if self.sock:
if self.sock:
self.sock.send(str)
else:
raise SMTPServerDisconnected
raise SMTPServerDisconnected
def putcmd(self, cmd, args=""):
"""Send a command to the server.
@ -108,117 +108,117 @@ class SMTP:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read responce code.
- server response string corresponding to response code
(note : multiline responces converted to a single,
(note : multiline responces converted to a single,
multiline string)
"""
resp=[]
self.file = self.sock.makefile('rb')
while 1:
self.file = self.sock.makefile('rb')
while 1:
line = self.file.readline()
if self.debuglevel > 0: print 'reply:', `line`
resp.append(string.strip(line[4:]))
code=line[:3]
#check if multiline resp
if line[3:4]!="-":
resp.append(string.strip(line[4:]))
code=line[:3]
#check if multiline resp
if line[3:4]!="-":
break
try:
errcode = string.atoi(code)
except(ValueError):
errcode = -1
errcode = -1
errmsg = string.join(resp,"\n")
if self.debuglevel > 0:
errmsg = string.join(resp,"\n")
if self.debuglevel > 0:
print 'reply: retcode (%s); Msg: %s' % (errcode,errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
""" Send a command, and return it's responce code """
self.putcmd(cmd,args)
(code,msg)=self.getreply()
return code
""" Send a command, and return it's responce code """
self.putcmd(cmd,args)
(code,msg)=self.getreply()
return code
# std smtp commands
def helo(self, name=''):
""" SMTP 'helo' command. Hostname to send for this command
defaults to the FQDN of the local host """
name=string.strip(name)
if len(name)==0:
name=socket.gethostbyaddr(socket.gethostname())[0]
self.putcmd("helo",name)
(code,msg)=self.getreply()
self.helo_resp=msg
return code
name=string.strip(name)
if len(name)==0:
name=socket.gethostbyaddr(socket.gethostname())[0]
self.putcmd("helo",name)
(code,msg)=self.getreply()
self.helo_resp=msg
return code
def help(self):
""" SMTP 'help' command. Returns help text from server """
self.putcmd("help")
(code,msg)=self.getreply()
return msg
""" SMTP 'help' command. Returns help text from server """
self.putcmd("help")
(code,msg)=self.getreply()
return msg
def rset(self):
""" SMTP 'rset' command. Resets session. """
code=self.docmd("rset")
return code
code=self.docmd("rset")
return code
def noop(self):
""" SMTP 'noop' command. Dosen't do anything :> """
code=self.docmd("noop")
return code
code=self.docmd("noop")
return code
def mail(self,sender):
""" SMTP 'mail' command. Begins mail xfer session. """
self.putcmd("mail","from: %s" % sender)
return self.getreply()
return self.getreply()
def rcpt(self,recip):
""" SMTP 'rcpt' command. Indicates 1 recipient for this mail. """
self.putcmd("rcpt","to: %s" % recip)
return self.getreply()
self.putcmd("rcpt","to: %s" % recip)
return self.getreply()
def data(self,msg):
""" SMTP 'DATA' command. Sends message data to server.
Automatically quotes lines beginning with a period per rfc821 """
#quote periods in msg according to RFC821
#quote periods in msg according to RFC821
# ps, I don't know why I have to do it this way... doing:
# quotepat=re.compile(r"^[.]",re.M)
# msg=re.sub(quotepat,"..",msg)
# quotepat=re.compile(r"^[.]",re.M)
# msg=re.sub(quotepat,"..",msg)
# should work, but it dosen't (it doubles the number of any
# contiguous series of .'s at the beginning of a line,
#instead of just adding one. )
quotepat=re.compile(r"^[.]+",re.M)
quotepat=re.compile(r"^[.]+",re.M)
def m(pat):
return "."+pat.group(0)
msg=re.sub(quotepat,m,msg)
self.putcmd("data")
(code,repl)=self.getreply()
if self.debuglevel >0 : print "data:", (code,repl)
if code <> 354:
return -1
else:
self.send(msg)
self.send("\n.\n")
(code,msg)=self.getreply()
if self.debuglevel >0 : print "data:", (code,msg)
msg=re.sub(quotepat,m,msg)
self.putcmd("data")
(code,repl)=self.getreply()
if self.debuglevel >0 : print "data:", (code,repl)
if code <> 354:
return -1
else:
self.send(msg)
self.send("\n.\n")
(code,msg)=self.getreply()
if self.debuglevel >0 : print "data:", (code,msg)
return code
#some usefull methods
def sendmail(self,from_addr,to_addrs,msg):
""" This command performs an entire mail transaction.
The arguments are:
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : a list of addresses to send this mail to
- msg : the message to send.
This method will return normally if the mail is accepted for at least
This method will return normally if the mail is accepted for at least
one recipiant.
Otherwise it will throw an exception (either SMTPSenderRefused,
SMTPRecipientsRefused, or SMTPDataError)
That is, if this method does not throw an exception, then someone
That is, if this method does not throw an exception, then someone
should get your mail.
It returns a dictionary , with one entry for each recipient that was
It returns a dictionary , with one entry for each recipient that was
refused.
example:
@ -241,27 +241,27 @@ class SMTP:
will return an empty dictionary.
"""
if not self.helo_resp:
self.helo()
if not self.helo_resp:
self.helo()
(code,resp)=self.mail(from_addr)
if code <>250:
self.rset()
raise SMTPSenderRefused
senderrs={}
self.rset()
raise SMTPSenderRefused
senderrs={}
for each in to_addrs:
(code,resp)=self.rcpt(each)
if (code <> 250) and (code <> 251):
(code,resp)=self.rcpt(each)
if (code <> 250) and (code <> 251):
senderrs[each]=(code,resp)
if len(senderrs)==len(to_addrs):
#th' server refused all our recipients
#th' server refused all our recipients
self.rset()
raise SMTPRecipientsRefused
code=self.data(msg)
if code <>250 :
if code <>250 :
self.rset()
raise SMTPDataError
raise SMTPDataError
#if we got here then somebody got our mail
return senderrs
return senderrs
def close(self):
@ -275,5 +275,5 @@ class SMTP:
def quit(self):
self.docmd("quit")
self.close()
self.docmd("quit")
self.close()

View File

@ -72,14 +72,14 @@ argument = 312
sym_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
sym_name[_value] = _name
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token.main()
if __name__ == "__main__":

View File

@ -76,7 +76,7 @@ class Telnet:
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
@ -86,362 +86,362 @@ class Telnet:
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
I/O.
"""
def __init__(self, host=None, port=0):
"""Constructor.
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; a port
number is optional.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; a port
number is optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
if host:
self.open(host, port)
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
if host:
self.open(host, port)
def open(self, host, port=0):
"""Connect to a host.
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
def __del__(self):
"""Destructor -- close the connection."""
self.close()
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = string.replace(buffer, IAC, IAC+IAC)
self.msg("send %s", `buffer`)
self.sock.send(buffer)
"""
if IAC in buffer:
buffer = string.replace(buffer, IAC, IAC+IAC)
self.msg("send %s", `buffer`)
self.sock.send(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = string.find(self.cookedq, match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
while not self.eof and apply(select.select, s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = string.find(self.cookedq, match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
"""
n = len(match)
self.process_rawq()
i = string.find(self.cookedq, match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
while not self.eof and apply(select.select, s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = string.find(self.cookedq, match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ''
try:
while self.rawq:
c = self.rawq_getchar()
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf = buf + c
continue
c = self.rawq_getchar()
if c == IAC:
buf = buf + c
elif c in (DO, DONT):
opt = self.rawq_getchar()
self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(c))
self.sock.send(IAC + WONT + opt)
elif c in (WILL, WONT):
opt = self.rawq_getchar()
self.msg('IAC %s %d',
c == WILL and 'WILL' or 'WONT', ord(c))
else:
self.msg('IAC %s not recognized' % `c`)
except EOFError: # raised by self.rawq_getchar()
pass
self.cookedq = self.cookedq + buf
"""
buf = ''
try:
while self.rawq:
c = self.rawq_getchar()
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf = buf + c
continue
c = self.rawq_getchar()
if c == IAC:
buf = buf + c
elif c in (DO, DONT):
opt = self.rawq_getchar()
self.msg('IAC %s %d', c == DO and 'DO' or 'DONT', ord(c))
self.sock.send(IAC + WONT + opt)
elif c in (WILL, WONT):
opt = self.rawq_getchar()
self.msg('IAC %s %d',
c == WILL and 'WILL' or 'WONT', ord(c))
else:
self.msg('IAC %s not recognized' % `c`)
except EOFError: # raised by self.rawq_getchar()
pass
self.cookedq = self.cookedq + buf
def rawq_getchar(self):
"""Get next char from raw queue.
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %s", `buf`)
self.eof = (not buf)
self.rawq = self.rawq + buf
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %s", `buf`)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
"""Test whether data is available on the socket."""
return select.select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
"""Interaction function, emulates a very dumb telnet client."""
while 1:
rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
r, w, x = select.select([self.fileno()], [], [], timeout)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
r, w, x = select.select([self.fileno()], [], [], timeout)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
@ -454,18 +454,18 @@ def test():
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port)

View File

@ -19,55 +19,55 @@ template = None
def gettempdir():
global tempdir
if tempdir is not None:
return tempdir
return tempdir
attempdirs = ['/usr/tmp', '/tmp', os.getcwd(), os.curdir]
if os.name == 'nt':
attempdirs.insert(0, 'C:\\TEMP')
attempdirs.insert(0, '\\TEMP')
attempdirs.insert(0, 'C:\\TEMP')
attempdirs.insert(0, '\\TEMP')
elif os.name == 'mac':
import macfs, MACFS
try:
refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
MACFS.kTemporaryFolderType, 0)
dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
attempdirs.insert(0, dirname)
except macfs.error:
pass
import macfs, MACFS
try:
refnum, dirid = macfs.FindFolder(MACFS.kOnSystemDisk,
MACFS.kTemporaryFolderType, 0)
dirname = macfs.FSSpec((refnum, dirid, '')).as_pathname()
attempdirs.insert(0, dirname)
except macfs.error:
pass
for envname in 'TMPDIR', 'TEMP', 'TMP':
if os.environ.has_key(envname):
attempdirs.insert(0, os.environ[envname])
if os.environ.has_key(envname):
attempdirs.insert(0, os.environ[envname])
testfile = gettempprefix() + 'test'
for dir in attempdirs:
try:
filename = os.path.join(dir, testfile)
fp = open(filename, 'w')
fp.write('blat')
fp.close()
os.unlink(filename)
tempdir = dir
break
except IOError:
pass
try:
filename = os.path.join(dir, testfile)
fp = open(filename, 'w')
fp.write('blat')
fp.close()
os.unlink(filename)
tempdir = dir
break
except IOError:
pass
if tempdir is None:
msg = "Can't find a usable temporary directory amongst " + `attempdirs`
raise IOError, msg
msg = "Can't find a usable temporary directory amongst " + `attempdirs`
raise IOError, msg
return tempdir
# Function to calculate a prefix of the filename to use
def gettempprefix():
global template
if template == None:
if os.name == 'posix':
template = '@' + `os.getpid()` + '.'
elif os.name == 'nt':
template = '~' + `os.getpid()` + '-'
elif os.name == 'mac':
template = 'Python-Tmp-'
else:
template = 'tmp' # XXX might choose a better one
return template
global template
if template == None:
if os.name == 'posix':
template = '@' + `os.getpid()` + '.'
elif os.name == 'nt':
template = '~' + `os.getpid()` + '-'
elif os.name == 'mac':
template = 'Python-Tmp-'
else:
template = 'tmp' # XXX might choose a better one
return template
# Counter for generating unique names
@ -78,14 +78,14 @@ counter = 0
# User-callable function to return a unique temporary file name
def mktemp(suffix=""):
global counter
dir = gettempdir()
pre = gettempprefix()
while 1:
counter = counter + 1
file = os.path.join(dir, pre + `counter` + suffix)
if not os.path.exists(file):
return file
global counter
dir = gettempdir()
pre = gettempprefix()
while 1:
counter = counter + 1
file = os.path.join(dir, pre + `counter` + suffix)
if not os.path.exists(file):
return file
class TemporaryFileWrapper:
@ -96,31 +96,31 @@ class TemporaryFileWrapper:
no longer needed.
"""
def __init__(self, file, path):
self.file = file
self.path = path
self.file = file
self.path = path
def close(self):
self.file.close()
os.unlink(self.path)
self.file.close()
os.unlink(self.path)
def __del__(self):
try: self.close()
except: pass
try: self.close()
except: pass
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
setattr(self, name, a)
return a
file = self.__dict__['file']
a = getattr(file, name)
setattr(self, name, a)
return a
def TemporaryFile(mode='w+b', bufsize=-1, suffix=""):
name = mktemp(suffix)
file = open(name, mode, bufsize)
try:
os.unlink(name)
os.unlink(name)
except os.error:
# Non-unix -- can't unlink file that's still open, use wrapper
return TemporaryFileWrapper(file, name)
# Non-unix -- can't unlink file that's still open, use wrapper
return TemporaryFileWrapper(file, name)
else:
return file
return file

View File

@ -56,7 +56,7 @@ NT_OFFSET = 256
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
tok_name[_value] = _name
def ISTERMINAL(x):
@ -77,49 +77,49 @@ def main():
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
outFileName = args[1]
try:
fp = open(inFileName)
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = string.splitfields(fp.read(), "\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
"#define[ \t][ \t]*([A-Z][A-Z_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = string.atoi(val)
tokens[val] = name # reverse so we can sort them...
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = string.atoi(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = string.splitfields(fp.read(), "\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write(string.joinfields(format, "\n"))
fp.close()

View File

@ -22,7 +22,7 @@ DictType = DictionaryType = type({})
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
LambdaType = type(lambda: None) # Same as FunctionType
try:
CodeType = type(_f.func_code)
except:
@ -31,18 +31,18 @@ except:
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
FileType = type(sys.stdin) # XXX what if it was assigned to?
FileType = type(sys.stdin) # XXX what if it was assigned to?
except:
pass
XRangeType = type(xrange(0))
@ -51,14 +51,14 @@ try:
raise TypeError
except TypeError:
try:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
except:
pass
pass
tb = None; del tb
SliceType = type(slice(0))
EllipsisType = type(Ellipsis)
del sys, _f, _C, _x # Not for export
del sys, _f, _C, _x # Not for export

View File

@ -23,15 +23,15 @@ wishes to do different things depending on the Python version.
import os
home = os.curdir # Default
home = os.curdir # Default
if os.environ.has_key('HOME'):
home = os.environ['HOME']
elif os.name == 'nt': # Contributed by Jeff Bauer
elif os.name == 'nt': # Contributed by Jeff Bauer
if os.environ.has_key('HOMEPATH'):
if os.environ.has_key('HOMEDRIVE'):
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
if os.environ.has_key('HOMEDRIVE'):
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
pythonrc = os.path.join(home, ".pythonrc.py")
try:

132
Lib/uu.py
View File

@ -41,38 +41,38 @@ def encode(in_file, out_file, name=None, mode=None):
# If in_file is a pathname open it and change defaults
#
if in_file == '-':
in_file = sys.stdin
in_file = sys.stdin
elif type(in_file) == type(''):
if name == None:
name = os.path.basename(in_file)
if mode == None:
try:
mode = os.stat(in_file)[0]
except AttributeError:
pass
in_file = open(in_file, 'rb')
if name == None:
name = os.path.basename(in_file)
if mode == None:
try:
mode = os.stat(in_file)[0]
except AttributeError:
pass
in_file = open(in_file, 'rb')
#
# Open out_file if it is a pathname
#
if out_file == '-':
out_file = sys.stdout
out_file = sys.stdout
elif type(out_file) == type(''):
out_file = open(out_file, 'w')
out_file = open(out_file, 'w')
#
# Set defaults for name and mode
#
if name == None:
name = '-'
name = '-'
if mode == None:
mode = 0666
mode = 0666
#
# Write the data
#
out_file.write('begin %o %s\n' % ((mode&0777),name))
str = in_file.read(45)
while len(str) > 0:
out_file.write(binascii.b2a_uu(str))
str = in_file.read(45)
out_file.write(binascii.b2a_uu(str))
str = in_file.read(45)
out_file.write(' \nend\n')
@ -82,50 +82,50 @@ def decode(in_file, out_file=None, mode=None):
# Open the input file, if needed.
#
if in_file == '-':
in_file = sys.stdin
in_file = sys.stdin
elif type(in_file) == type(''):
in_file = open(in_file)
in_file = open(in_file)
#
# Read until a begin is encountered or we've exhausted the file
#
while 1:
hdr = in_file.readline()
if not hdr:
raise Error, 'No valid begin line found in input file'
if hdr[:5] != 'begin':
continue
hdrfields = string.split(hdr)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
string.atoi(hdrfields[1], 8)
break
except ValueError:
pass
hdr = in_file.readline()
if not hdr:
raise Error, 'No valid begin line found in input file'
if hdr[:5] != 'begin':
continue
hdrfields = string.split(hdr)
if len(hdrfields) == 3 and hdrfields[0] == 'begin':
try:
string.atoi(hdrfields[1], 8)
break
except ValueError:
pass
if out_file == None:
out_file = hdrfields[2]
out_file = hdrfields[2]
if mode == None:
mode = string.atoi(hdrfields[1], 8)
mode = string.atoi(hdrfields[1], 8)
#
# Open the output file
#
if out_file == '-':
out_file = sys.stdout
out_file = sys.stdout
elif type(out_file) == type(''):
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
fp = open(out_file, 'wb')
try:
os.path.chmod(out_file, mode)
except AttributeError:
pass
out_file = fp
#
# Main decoding loop
#
str = in_file.readline()
while str and str != 'end\n':
out_file.write(binascii.a2b_uu(str))
str = in_file.readline()
out_file.write(binascii.a2b_uu(str))
str = in_file.readline()
if not str:
raise Error, 'Truncated input file'
raise Error, 'Truncated input file'
def test():
"""uuencode/uudecode main program"""
@ -138,40 +138,40 @@ def test():
output = sys.stdout
ok = 1
try:
optlist, args = getopt.getopt(sys.argv[1:], 'dt')
optlist, args = getopt.getopt(sys.argv[1:], 'dt')
except getopt.error:
ok = 0
ok = 0
if not ok or len(args) > 2:
print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
print ' -d: Decode (in stead of encode)'
print ' -t: data is text, encoded format unix-compatible text'
sys.exit(1)
print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]'
print ' -d: Decode (in stead of encode)'
print ' -t: data is text, encoded format unix-compatible text'
sys.exit(1)
for o, a in optlist:
if o == '-d': dopt = 1
if o == '-t': topt = 1
if o == '-d': dopt = 1
if o == '-t': topt = 1
if len(args) > 0:
input = args[0]
input = args[0]
if len(args) > 1:
output = args[1]
output = args[1]
if dopt:
if topt:
if type(output) == type(''):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
if topt:
if type(output) == type(''):
output = open(output, 'w')
else:
print sys.argv[0], ': cannot do -t to stdout'
sys.exit(1)
decode(input, output)
else:
if topt:
if type(input) == type(''):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if topt:
if type(input) == type(''):
input = open(input, 'r')
else:
print sys.argv[0], ': cannot do -t from stdin'
sys.exit(1)
encode(input, output)
if __name__ == '__main__':
test()

View File

@ -17,19 +17,19 @@ def whichdb(filename):
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + ".pag", "rb")
f.close()
f = open(filename + ".dir", "rb")
f.close()
return "dbm"
f = open(filename + ".pag", "rb")
f.close()
f = open(filename + ".dir", "rb")
f.close()
return "dbm"
except IOError:
pass
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
f = open(filename, "rb")
except IOError:
return None
return None
# Read the first 4 bytes of the file -- the magic number
s = f.read(4)
@ -37,21 +37,21 @@ def whichdb(filename):
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
return ""
# Check for GNU dbm
if magic == 0x13579ace:
return "gdbm"
return "gdbm"
# Check for BSD hash
if magic == 0x061561:
return "dbhash"
return "dbhash"
# Unknown
return ""

View File

@ -18,11 +18,11 @@ class Error:
"""
def __init__(self, msg):
self.msg = msg
self.msg = msg
def __repr__(self):
return repr(self.msg)
return repr(self.msg)
def __str__(self):
return str(self.msg)
return str(self.msg)
class ConversionError(Error):
@ -34,76 +34,76 @@ class Packer:
"""Pack various data representations into a buffer."""
def __init__(self):
self.reset()
self.reset()
def reset(self):
self.__buf = ''
self.__buf = ''
def get_buffer(self):
return self.__buf
return self.__buf
# backwards compatibility
get_buf = get_buffer
def pack_uint(self, x):
self.__buf = self.__buf + struct.pack('>L', x)
self.__buf = self.__buf + struct.pack('>L', x)
pack_int = pack_uint
pack_enum = pack_int
def pack_bool(self, x):
if x: self.__buf = self.__buf + '\0\0\0\1'
else: self.__buf = self.__buf + '\0\0\0\0'
if x: self.__buf = self.__buf + '\0\0\0\1'
else: self.__buf = self.__buf + '\0\0\0\0'
def pack_uhyper(self, x):
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
self.pack_uint(x>>32 & 0xffffffffL)
self.pack_uint(x & 0xffffffffL)
pack_hyper = pack_uhyper
def pack_float(self, x):
try: self.__buf = self.__buf + struct.pack('>f', x)
except struct.error, msg:
raise ConversionError, msg
try: self.__buf = self.__buf + struct.pack('>f', x)
except struct.error, msg:
raise ConversionError, msg
def pack_double(self, x):
try: self.__buf = self.__buf + struct.pack('>d', x)
except struct.error, msg:
raise ConversionError, msg
try: self.__buf = self.__buf + struct.pack('>d', x)
except struct.error, msg:
raise ConversionError, msg
def pack_fstring(self, n, s):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
n = ((n+3)/4)*4
data = s[:n]
data = data + (n - len(data)) * '\0'
self.__buf = self.__buf + data
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
n = ((n+3)/4)*4
data = s[:n]
data = data + (n - len(data)) * '\0'
self.__buf = self.__buf + data
pack_fopaque = pack_fstring
def pack_string(self, s):
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
n = len(s)
self.pack_uint(n)
self.pack_fstring(n, s)
pack_opaque = pack_string
pack_bytes = pack_string
def pack_list(self, list, pack_item):
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
for item in list:
self.pack_uint(1)
pack_item(item)
self.pack_uint(0)
def pack_farray(self, n, list, pack_item):
if len(list) <> n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
if len(list) <> n:
raise ValueError, 'wrong array size'
for item in list:
pack_item(item)
def pack_array(self, list, pack_item):
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
n = len(list)
self.pack_uint(n)
self.pack_farray(n, list, pack_item)
@ -111,168 +111,168 @@ class Unpacker:
"""Unpacks various data representations from the given buffer."""
def __init__(self, data):
self.reset(data)
self.reset(data)
def reset(self, data):
self.__buf = data
self.__pos = 0
self.__buf = data
self.__pos = 0
def get_position(self):
return self.__pos
return self.__pos
def set_position(self, position):
self.__pos = position
self.__pos = position
def get_buffer(self):
return self.__buf
return self.__buf
def done(self):
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
if self.__pos < len(self.__buf):
raise Error('unextracted data remains')
def unpack_uint(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
x = struct.unpack('>L', data)[0]
try:
return int(x)
except OverflowError:
return x
def unpack_int(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>l', data)[0]
unpack_enum = unpack_int
unpack_bool = unpack_int
def unpack_uhyper(self):
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
hi = self.unpack_uint()
lo = self.unpack_uint()
return long(hi)<<32 | lo
def unpack_hyper(self):
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
x = self.unpack_uhyper()
if x >= 0x8000000000000000L:
x = x - 0x10000000000000000L
return x
def unpack_float(self):
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
i = self.__pos
self.__pos = j = i+4
data = self.__buf[i:j]
if len(data) < 4:
raise EOFError
return struct.unpack('>f', data)[0]
def unpack_double(self):
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
i = self.__pos
self.__pos = j = i+8
data = self.__buf[i:j]
if len(data) < 8:
raise EOFError
return struct.unpack('>d', data)[0]
def unpack_fstring(self, n):
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)/4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
if n < 0:
raise ValueError, 'fstring size must be nonnegative'
i = self.__pos
j = i + (n+3)/4*4
if j > len(self.__buf):
raise EOFError
self.__pos = j
return self.__buf[i:i+n]
unpack_fopaque = unpack_fstring
def unpack_string(self):
n = self.unpack_uint()
return self.unpack_fstring(n)
n = self.unpack_uint()
return self.unpack_fstring(n)
unpack_opaque = unpack_string
unpack_bytes = unpack_string
def unpack_list(self, unpack_item):
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x <> 1:
raise ConversionError, '0 or 1 expected, got ' + `x`
item = unpack_item()
list.append(item)
return list
list = []
while 1:
x = self.unpack_uint()
if x == 0: break
if x <> 1:
raise ConversionError, '0 or 1 expected, got ' + `x`
item = unpack_item()
list.append(item)
return list
def unpack_farray(self, n, unpack_item):
list = []
for i in range(n):
list.append(unpack_item())
return list
list = []
for i in range(n):
list.append(unpack_item())
return list
def unpack_array(self, unpack_item):
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
n = self.unpack_uint()
return self.unpack_farray(n, unpack_item)
# test suite
def _test():
p = Packer()
packtest = [
(p.pack_uint, (9,)),
(p.pack_bool, (None,)),
(p.pack_bool, ('hello',)),
(p.pack_uhyper, (45L,)),
(p.pack_float, (1.9,)),
(p.pack_double, (1.9,)),
(p.pack_string, ('hello world',)),
(p.pack_list, (range(5), p.pack_uint)),
(p.pack_array, (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
]
(p.pack_uint, (9,)),
(p.pack_bool, (None,)),
(p.pack_bool, ('hello',)),
(p.pack_uhyper, (45L,)),
(p.pack_float, (1.9,)),
(p.pack_double, (1.9,)),
(p.pack_string, ('hello world',)),
(p.pack_list, (range(5), p.pack_uint)),
(p.pack_array, (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
]
succeedlist = [1] * len(packtest)
count = 0
for method, args in packtest:
print 'pack test', count,
try:
apply(method, args)
print 'succeeded'
except ConversionError, var:
print 'ConversionError:', var.msg
succeedlist[count] = 0
count = count + 1
print 'pack test', count,
try:
apply(method, args)
print 'succeeded'
except ConversionError, var:
print 'ConversionError:', var.msg
succeedlist[count] = 0
count = count + 1
data = p.get_buffer()
# now verify
up = Unpacker(data)
unpacktest = [
(up.unpack_uint, (), lambda x: x == 9),
(up.unpack_bool, (), lambda x: not x),
(up.unpack_bool, (), lambda x: x),
(up.unpack_uhyper, (), lambda x: x == 45L),
(up.unpack_float, (), lambda x: 1.89 < x < 1.91),
(up.unpack_double, (), lambda x: 1.89 < x < 1.91),
(up.unpack_string, (), lambda x: x == 'hello world'),
(up.unpack_list, (up.unpack_uint,), lambda x: x == range(5)),
(up.unpack_array, (up.unpack_string,),
lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
]
(up.unpack_uint, (), lambda x: x == 9),
(up.unpack_bool, (), lambda x: not x),
(up.unpack_bool, (), lambda x: x),
(up.unpack_uhyper, (), lambda x: x == 45L),
(up.unpack_float, (), lambda x: 1.89 < x < 1.91),
(up.unpack_double, (), lambda x: 1.89 < x < 1.91),
(up.unpack_string, (), lambda x: x == 'hello world'),
(up.unpack_list, (up.unpack_uint,), lambda x: x == range(5)),
(up.unpack_array, (up.unpack_string,),
lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
]
count = 0
for method, args, pred in unpacktest:
print 'unpack test', count,
try:
if succeedlist[count]:
x = apply(method, args)
print pred(x) and 'succeeded' or 'failed', ':', x
else:
print 'skipping'
except ConversionError, var:
print 'ConversionError:', var.msg
count = count + 1
print 'unpack test', count,
try:
if succeedlist[count]:
x = apply(method, args)
print pred(x) and 'succeeded' or 'failed', ':', x
else:
print 'skipping'
except ConversionError, var:
print 'ConversionError:', var.msg
count = count + 1
if __name__ == '__main__':

File diff suppressed because it is too large Load Diff