Attached are 2.6 compatibility patches for supervisor and medusa. The patches bundle 2.5.4's asyncore into medusa & fix some 2.6 hashlib warnings. The supervisor test suite passes except for two tests that were failing for me before I made the changes. (I was running tests through nose, and the failures looked like they could just be nose related.)

Cheers,
Jason
diff -Naur medusa-trunk/CHANGES.txt medusa/CHANGES.txt
--- medusa-trunk/CHANGES.txt	2009-04-22 11:09:08.000000000 -0700
+++ medusa/CHANGES.txt	2009-04-22 11:12:54.000000000 -0700
@@ -1,6 +1,8 @@
 
 Version 0.5.5:
 
+* Re-added asyncore.py and asynchat.py; the Python 2.6 stdlib version
+  of these modules introduced backward-incompatible changes.
 * [Patch #855389] ADD RNFR & RNTO commands to FTP server (Robin Becker)
 * [Patch #852089] In status_handler, catch any exception raised by the status()
   method.
diff -Naur medusa-trunk/MANIFEST medusa/MANIFEST
--- medusa-trunk/MANIFEST	2009-04-22 11:09:08.000000000 -0700
+++ medusa/MANIFEST	2009-04-22 13:40:08.000000000 -0700
@@ -1,3 +1,5 @@
+asynchat_25.py
+asyncore_25.py
 auth_handler.py
 CHANGES.txt
 chat_server.py
diff -Naur medusa-trunk/asynchat_25.py medusa/asynchat_25.py
--- medusa-trunk/asynchat_25.py	1969-12-31 16:00:00.000000000 -0800
+++ medusa/asynchat_25.py	2009-04-22 11:31:53.000000000 -0700
@@ -0,0 +1,295 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#       Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+#       Author: Sam Rushing <[email protected]>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class.  At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting.  Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'.  The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+
+import socket
+from medusa import asyncore_25 as asyncore
+from collections import deque
+
+class async_chat (asyncore.dispatcher):
+    """This is an abstract class.  You must derive from this class, and add
+    the two methods collect_incoming_data() and found_terminator()"""
+
+    # these are overridable defaults
+
+    ac_in_buffer_size       = 4096
+    ac_out_buffer_size      = 4096
+
+    def __init__ (self, conn=None):
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        self.producer_fifo = fifo()
+        asyncore.dispatcher.__init__ (self, conn)
+
+    def collect_incoming_data(self, data):
+        raise NotImplementedError, "must be implemented in subclass"
+
+    def found_terminator(self):
+        raise NotImplementedError, "must be implemented in subclass"
+
+    def set_terminator (self, term):
+        "Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
+        self.terminator = term
+
+    def get_terminator (self):
+        return self.terminator
+
+    # grab some more data from the socket,
+    # throw it to the collector method,
+    # check for the terminator,
+    # if found, transition to the next state.
+
+    def handle_read (self):
+
+        try:
+            data = self.recv (self.ac_in_buffer_size)
+        except socket.error, why:
+            self.handle_error()
+            return
+
+        self.ac_in_buffer = self.ac_in_buffer + data
+
+        # Continue to search for self.terminator in self.ac_in_buffer,
+        # while calling self.collect_incoming_data.  The while loop
+        # is necessary because we might read several data+terminator
+        # combos with a single recv(1024).
+
+        while self.ac_in_buffer:
+            lb = len(self.ac_in_buffer)
+            terminator = self.get_terminator()
+            if not terminator:
+                # no terminator, collect it all
+                self.collect_incoming_data (self.ac_in_buffer)
+                self.ac_in_buffer = ''
+            elif isinstance(terminator, int) or isinstance(terminator, long):
+                # numeric terminator
+                n = terminator
+                if lb < n:
+                    self.collect_incoming_data (self.ac_in_buffer)
+                    self.ac_in_buffer = ''
+                    self.terminator = self.terminator - lb
+                else:
+                    self.collect_incoming_data (self.ac_in_buffer[:n])
+                    self.ac_in_buffer = self.ac_in_buffer[n:]
+                    self.terminator = 0
+                    self.found_terminator()
+            else:
+                # 3 cases:
+                # 1) end of buffer matches terminator exactly:
+                #    collect data, transition
+                # 2) end of buffer matches some prefix:
+                #    collect data to the prefix
+                # 3) end of buffer does not match any prefix:
+                #    collect data
+                terminator_len = len(terminator)
+                index = self.ac_in_buffer.find(terminator)
+                if index != -1:
+                    # we found the terminator
+                    if index > 0:
+                        # don't bother reporting the empty string (source of subtle bugs)
+                        self.collect_incoming_data (self.ac_in_buffer[:index])
+                    self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+                    # This does the Right Thing if the terminator is changed here.
+                    self.found_terminator()
+                else:
+                    # check for a prefix of the terminator
+                    index = find_prefix_at_end (self.ac_in_buffer, terminator)
+                    if index:
+                        if index != lb:
+                            # we found a prefix, collect up to the prefix
+                            self.collect_incoming_data (self.ac_in_buffer[:-index])
+                            self.ac_in_buffer = self.ac_in_buffer[-index:]
+                        break
+                    else:
+                        # no prefix, collect it all
+                        self.collect_incoming_data (self.ac_in_buffer)
+                        self.ac_in_buffer = ''
+
+    def handle_write (self):
+        self.initiate_send ()
+
+    def handle_close (self):
+        self.close()
+
+    def push (self, data):
+        self.producer_fifo.push (simple_producer (data))
+        self.initiate_send()
+
+    def push_with_producer (self, producer):
+        self.producer_fifo.push (producer)
+        self.initiate_send()
+
+    def readable (self):
+        "predicate for inclusion in the readable for select()"
+        return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+
+    def writable (self):
+        "predicate for inclusion in the writable for select()"
+        # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
+        # this is about twice as fast, though not as clear.
+        return not (
+                (self.ac_out_buffer == '') and
+                self.producer_fifo.is_empty() and
+                self.connected
+                )
+
+    def close_when_done (self):
+        "automatically close this channel once the outgoing queue is empty"
+        self.producer_fifo.push (None)
+
+    # refill the outgoing buffer by calling the more() method
+    # of the first producer in the queue
+    def refill_buffer (self):
+        while 1:
+            if len(self.producer_fifo):
+                p = self.producer_fifo.first()
+                # a 'None' in the producer fifo is a sentinel,
+                # telling us to close the channel.
+                if p is None:
+                    if not self.ac_out_buffer:
+                        self.producer_fifo.pop()
+                        self.close()
+                    return
+                elif isinstance(p, str):
+                    self.producer_fifo.pop()
+                    self.ac_out_buffer = self.ac_out_buffer + p
+                    return
+                data = p.more()
+                if data:
+                    self.ac_out_buffer = self.ac_out_buffer + data
+                    return
+                else:
+                    self.producer_fifo.pop()
+            else:
+                return
+
+    def initiate_send (self):
+        obs = self.ac_out_buffer_size
+        # try to refill the buffer
+        if (len (self.ac_out_buffer) < obs):
+            self.refill_buffer()
+
+        if self.ac_out_buffer and self.connected:
+            # try to send the buffer
+            try:
+                num_sent = self.send (self.ac_out_buffer[:obs])
+                if num_sent:
+                    self.ac_out_buffer = self.ac_out_buffer[num_sent:]
+
+            except socket.error, why:
+                self.handle_error()
+                return
+
+    def discard_buffers (self):
+        # Emergencies only!
+        self.ac_in_buffer = ''
+        self.ac_out_buffer = ''
+        while self.producer_fifo:
+            self.producer_fifo.pop()
+
+
+class simple_producer:
+
+    def __init__ (self, data, buffer_size=512):
+        self.data = data
+        self.buffer_size = buffer_size
+
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = ''
+            return result
+
+class fifo:
+    def __init__ (self, list=None):
+        if not list:
+            self.list = deque()
+        else:
+            self.list = deque(list)
+
+    def __len__ (self):
+        return len(self.list)
+
+    def is_empty (self):
+        return not self.list
+
+    def first (self):
+        return self.list[0]
+
+    def push (self, data):
+        self.list.append(data)
+
+    def pop (self):
+        if self.list:
+            return (1, self.list.popleft())
+        else:
+            return (0, None)
+
+# Given 'haystack', see if any prefix of 'needle' is at its end.  This
+# assumes an exact match has already been checked.  Return the number of
+# characters matched.
+# for example:
+# f_p_a_e ("qwerty\r", "\r\n") => 1
+# f_p_a_e ("qwertydkjf", "\r\n") => 0
+# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# new python:   28961/s
+# old python:   18307/s
+# re:        12820/s
+# regex:     14035/s
+
+def find_prefix_at_end (haystack, needle):
+    l = len(needle) - 1
+    while l and not haystack.endswith(needle[:l]):
+        l -= 1
+    return l
diff -Naur medusa-trunk/asyncore_25.py medusa/asyncore_25.py
--- medusa-trunk/asyncore_25.py	1969-12-31 16:00:00.000000000 -0800
+++ medusa/asyncore_25.py	2009-04-22 11:22:30.000000000 -0700
@@ -0,0 +1,551 @@
+# -*- Mode: Python -*-
+#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+#   Author: Sam Rushing <[email protected]>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time".  Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background."  Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import select
+import socket
+import sys
+import time
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
+     ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
+
+try:
+    socket_map
+except NameError:
+    socket_map = {}
+
+class ExitNow(Exception):
+    pass
+
+def read(obj):
+    try:
+        obj.handle_read_event()
+    except ExitNow:
+        raise
+    except:
+        obj.handle_error()
+
+def write(obj):
+    try:
+        obj.handle_write_event()
+    except ExitNow:
+        raise
+    except:
+        obj.handle_error()
+
+def _exception (obj):
+    try:
+        obj.handle_expt_event()
+    except ExitNow:
+        raise
+    except:
+        obj.handle_error()
+
+def readwrite(obj, flags):
+    try:
+        if flags & (select.POLLIN | select.POLLPRI):
+            obj.handle_read_event()
+        if flags & select.POLLOUT:
+            obj.handle_write_event()
+        if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
+            obj.handle_expt_event()
+    except ExitNow:
+        raise
+    except:
+        obj.handle_error()
+
+def poll(timeout=0.0, map=None):
+    if map is None:
+        map = socket_map
+    if map:
+        r = []; w = []; e = []
+        for fd, obj in map.items():
+            is_r = obj.readable()
+            is_w = obj.writable()
+            if is_r:
+                r.append(fd)
+            if is_w:
+                w.append(fd)
+            if is_r or is_w:
+                e.append(fd)
+        if [] == r == w == e:
+            time.sleep(timeout)
+        else:
+            try:
+                r, w, e = select.select(r, w, e, timeout)
+            except select.error, err:
+                if err[0] != EINTR:
+                    raise
+                else:
+                    return
+
+        for fd in r:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            read(obj)
+
+        for fd in w:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            write(obj)
+
+        for fd in e:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            _exception(obj)
+
+def poll2(timeout=0.0, map=None):
+    # Use the poll() support added to the select module in Python 2.0
+    if map is None:
+        map = socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    pollster = select.poll()
+    if map:
+        for fd, obj in map.items():
+            flags = 0
+            if obj.readable():
+                flags |= select.POLLIN | select.POLLPRI
+            if obj.writable():
+                flags |= select.POLLOUT
+            if flags:
+                # Only check for exceptions if object was either readable
+                # or writable.
+                flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
+                pollster.register(fd, flags)
+        try:
+            r = pollster.poll(timeout)
+        except select.error, err:
+            if err[0] != EINTR:
+                raise
+            r = []
+        for fd, flags in r:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            readwrite(obj, flags)
+
+poll3 = poll2                           # Alias for backward compatibility
+
+def loop(timeout=30.0, use_poll=False, map=None, count=None):
+    if map is None:
+        map = socket_map
+
+    if use_poll and hasattr(select, 'poll'):
+        poll_fun = poll2
+    else:
+        poll_fun = poll
+
+    if count is None:
+        while map:
+            poll_fun(timeout, map)
+
+    else:
+        while map and count > 0:
+            poll_fun(timeout, map)
+            count = count - 1
+
+class dispatcher:
+
+    debug = False
+    connected = False
+    accepting = False
+    closing = False
+    addr = None
+
+    def __init__(self, sock=None, map=None):
+        if map is None:
+            self._map = socket_map
+        else:
+            self._map = map
+
+        if sock:
+            self.set_socket(sock, map)
+            # I think it should inherit this anyway
+            self.socket.setblocking(0)
+            self.connected = True
+            # XXX Does the constructor require that the socket passed
+            # be connected?
+            try:
+                self.addr = sock.getpeername()
+            except socket.error:
+                # The addr isn't crucial
+                pass
+        else:
+            self.socket = None
+
+    def __repr__(self):
+        status = [self.__class__.__module__+"."+self.__class__.__name__]
+        if self.accepting and self.addr:
+            status.append('listening')
+        elif self.connected:
+            status.append('connected')
+        if self.addr is not None:
+            try:
+                status.append('%s:%d' % self.addr)
+            except TypeError:
+                status.append(repr(self.addr))
+        return '<%s at %#x>' % (' '.join(status), id(self))
+
+    def add_channel(self, map=None):
+        #self.log_info('adding channel %s' % self)
+        if map is None:
+            map = self._map
+        map[self._fileno] = self
+
+    def del_channel(self, map=None):
+        fd = self._fileno
+        if map is None:
+            map = self._map
+        if map.has_key(fd):
+            #self.log_info('closing channel %d:%s' % (fd, self))
+            del map[fd]
+        self._fileno = None
+
+    def create_socket(self, family, type):
+        self.family_and_type = family, type
+        self.socket = socket.socket(family, type)
+        self.socket.setblocking(0)
+        self._fileno = self.socket.fileno()
+        self.add_channel()
+
+    def set_socket(self, sock, map=None):
+        self.socket = sock
+##        self.__dict__['socket'] = sock
+        self._fileno = sock.fileno()
+        self.add_channel(map)
+
+    def set_reuse_addr(self):
+        # try to re-use a server port if possible
+        try:
+            self.socket.setsockopt(
+                socket.SOL_SOCKET, socket.SO_REUSEADDR,
+                self.socket.getsockopt(socket.SOL_SOCKET,
+                                       socket.SO_REUSEADDR) | 1
+                )
+        except socket.error:
+            pass
+
+    # ==================================================
+    # predicates for select()
+    # these are used as filters for the lists of sockets
+    # to pass to select().
+    # ==================================================
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return True
+
+    # ==================================================
+    # socket object methods.
+    # ==================================================
+
+    def listen(self, num):
+        self.accepting = True
+        if os.name == 'nt' and num > 5:
+            num = 1
+        return self.socket.listen(num)
+
+    def bind(self, addr):
+        self.addr = addr
+        return self.socket.bind(addr)
+
+    def connect(self, address):
+        self.connected = False
+        err = self.socket.connect_ex(address)
+        # XXX Should interpret Winsock return values
+        if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
+            return
+        if err in (0, EISCONN):
+            self.addr = address
+            self.connected = True
+            self.handle_connect()
+        else:
+            raise socket.error, (err, errorcode[err])
+
+    def accept(self):
+        # XXX can return either an address pair or None
+        try:
+            conn, addr = self.socket.accept()
+            return conn, addr
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                pass
+            else:
+                raise
+
+    def send(self, data):
+        try:
+            result = self.socket.send(data)
+            return result
+        except socket.error, why:
+            if why[0] == EWOULDBLOCK:
+                return 0
+            else:
+                raise
+            return 0
+
+    def recv(self, buffer_size):
+        try:
+            data = self.socket.recv(buffer_size)
+            if not data:
+                # a closed connection is indicated by signaling
+                # a read condition, and having recv() return 0.
+                self.handle_close()
+                return ''
+            else:
+                return data
+        except socket.error, why:
+            # winsock sometimes throws ENOTCONN
+            if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
+                self.handle_close()
+                return ''
+            else:
+                raise
+
+    def close(self):
+        self.del_channel()
+        self.socket.close()
+
+    # cheap inheritance, used to pass all other attribute
+    # references to the underlying socket object.
+    def __getattr__(self, attr):
+        return getattr(self.socket, attr)
+
+    # log and log_info may be overridden to provide more sophisticated
+    # logging and warning methods. In general, log is for 'hit' logging
+    # and 'log_info' is for informational, warning and error logging.
+
+    def log(self, message):
+        sys.stderr.write('log: %s\n' % str(message))
+
+    def log_info(self, message, type='info'):
+        if __debug__ or type != 'info':
+            print '%s: %s' % (type, message)
+
+    def handle_read_event(self):
+        if self.accepting:
+            # for an accepting socket, getting a read implies
+            # that we are connected
+            if not self.connected:
+                self.connected = True
+            self.handle_accept()
+        elif not self.connected:
+            self.handle_connect()
+            self.connected = True
+            self.handle_read()
+        else:
+            self.handle_read()
+
+    def handle_write_event(self):
+        # getting a write implies that we are connected
+        if not self.connected:
+            self.handle_connect()
+            self.connected = True
+        self.handle_write()
+
+    def handle_expt_event(self):
+        self.handle_expt()
+
+    def handle_error(self):
+        nil, t, v, tbinfo = compact_traceback()
+
+        # sometimes a user repr method will crash.
+        try:
+            self_repr = repr(self)
+        except:
+            self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
+
+        self.log_info(
+            'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+                self_repr,
+                t,
+                v,
+                tbinfo
+                ),
+            'error'
+            )
+        self.close()
+
+    def handle_expt(self):
+        self.log_info('unhandled exception', 'warning')
+
+    def handle_read(self):
+        self.log_info('unhandled read event', 'warning')
+
+    def handle_write(self):
+        self.log_info('unhandled write event', 'warning')
+
+    def handle_connect(self):
+        self.log_info('unhandled connect event', 'warning')
+
+    def handle_accept(self):
+        self.log_info('unhandled accept event', 'warning')
+
+    def handle_close(self):
+        self.log_info('unhandled close event', 'warning')
+        self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send(dispatcher):
+
+    def __init__(self, sock=None, map=None):
+        dispatcher.__init__(self, sock, map)
+        self.out_buffer = ''
+
+    def initiate_send(self):
+        num_sent = 0
+        num_sent = dispatcher.send(self, self.out_buffer[:512])
+        self.out_buffer = self.out_buffer[num_sent:]
+
+    def handle_write(self):
+        self.initiate_send()
+
+    def writable(self):
+        return (not self.connected) or len(self.out_buffer)
+
+    def send(self, data):
+        if self.debug:
+            self.log_info('sending %s' % repr(data))
+        self.out_buffer = self.out_buffer + data
+        self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback():
+    t, v, tb = sys.exc_info()
+    tbinfo = []
+    assert tb # Must have a traceback
+    while tb:
+        tbinfo.append((
+            tb.tb_frame.f_code.co_filename,
+            tb.tb_frame.f_code.co_name,
+            str(tb.tb_lineno)
+            ))
+        tb = tb.tb_next
+
+    # just to be safe
+    del tb
+
+    file, function, line = tbinfo[-1]
+    info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
+    return (file, function, line), t, v, info
+
+def close_all(map=None):
+    if map is None:
+        map = socket_map
+    for x in map.values():
+        x.socket.close()
+    map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead.  So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o?  [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+    import fcntl
+
+    class file_wrapper:
+        # here we override just enough to make a file
+        # look like a socket for the purposes of asyncore.
+
+        def __init__(self, fd):
+            self.fd = fd
+
+        def recv(self, *args):
+            return os.read(self.fd, *args)
+
+        def send(self, *args):
+            return os.write(self.fd, *args)
+
+        read = recv
+        write = send
+
+        def close(self):
+            os.close(self.fd)
+
+        def fileno(self):
+            return self.fd
+
+    class file_dispatcher(dispatcher):
+
+        def __init__(self, fd, map=None):
+            dispatcher.__init__(self, None, map)
+            self.connected = True
+            self.set_file(fd)
+            # set it to non-blocking mode
+            flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+            flags = flags | os.O_NONBLOCK
+            fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+        def set_file(self, fd):
+            self._fileno = fd
+            self.socket = file_wrapper(fd)
+            self.add_channel()
diff -Naur medusa-trunk/auth_handler.py medusa/auth_handler.py
--- medusa-trunk/auth_handler.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/auth_handler.py	2009-04-22 13:50:05.000000000 -0700
@@ -10,7 +10,10 @@
 # support for 'basic' authenticaion.
 
 import base64
-import md5
+try:
+    from hashlib import md5
+except ImportError:
+    from md5 import new as md5
 import re
 import string
 import time
@@ -97,7 +100,7 @@
 
     def apply_hash (self, s):
         "Apply MD5 to a string <s>, then wrap it in base64 encoding."
-        m = md5.new()
+        m = md5()
         m.update (s)
         d = m.digest()
         # base64.encodestring tacks on an extra linefeed.
diff -Naur medusa-trunk/chat_server.py medusa/chat_server.py
--- medusa-trunk/chat_server.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/chat_server.py	2009-04-22 11:14:07.000000000 -0700
@@ -12,8 +12,8 @@
 VERSION = string.split(RCS_ID)[2]
 
 import socket
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 import status_handler
 
 class chat_channel (asynchat.async_chat):
diff -Naur medusa-trunk/demo/publish.py medusa/demo/publish.py
--- medusa-trunk/demo/publish.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/demo/publish.py	2009-04-22 11:23:35.000000000 -0700
@@ -12,7 +12,7 @@
 # base64-encodes the username and password).  The 'Digest' scheme is
 # much more secure, but not widely supported yet. <sigh>
 
-import asyncore
+from medusa import asyncore_25 as asyncore
 from medusa import default_handler
 from medusa import http_server
 from medusa import put_handler
diff -Naur medusa-trunk/demo/script_server.py medusa/demo/script_server.py
--- medusa-trunk/demo/script_server.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/demo/script_server.py	2009-04-22 11:24:04.000000000 -0700
@@ -1,7 +1,7 @@
 # -*- Mode: Python -*-
 
 import re, sys
-import asyncore
+from medusa import asyncore_25 as asyncore
 from medusa import http_server
 from medusa import default_handler
 from medusa import logger
diff -Naur medusa-trunk/demo/simple_anon_ftpd.py medusa/demo/simple_anon_ftpd.py
--- medusa-trunk/demo/simple_anon_ftpd.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/demo/simple_anon_ftpd.py	2009-04-22 11:24:13.000000000 -0700
@@ -1,6 +1,6 @@
 # -*- Mode: Python -*-
 
-import asyncore
+from medusa import asyncore_25 as asyncore
 from medusa import ftp_server
 
 # create a 'dummy' authorizer (one that lets everyone in) that returns
diff -Naur medusa-trunk/demo/start_medusa.py medusa/demo/start_medusa.py
--- medusa-trunk/demo/start_medusa.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/demo/start_medusa.py	2009-04-22 11:24:27.000000000 -0700
@@ -13,8 +13,8 @@
 
 import os
 import sys
-import asyncore
 
+from medusa import asyncore_25 as asyncore
 from medusa import http_server
 from medusa import ftp_server
 from medusa import chat_server
diff -Naur medusa-trunk/demo/winFTPserver.py medusa/demo/winFTPserver.py
--- medusa-trunk/demo/winFTPserver.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/demo/winFTPserver.py	2009-04-22 11:24:38.000000000 -0700
@@ -11,7 +11,7 @@
 
 import win32security, win32con, win32api, win32net
 import ntsecuritycon, pywintypes
-import asyncore
+from medusa import asyncore_25 as asyncore
 from medusa import ftp_server, filesys
 
 class Win32Authorizer:
diff -Naur medusa-trunk/event_loop.py medusa/event_loop.py
--- medusa-trunk/event_loop.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/event_loop.py	2009-04-22 11:14:28.000000000 -0700
@@ -12,7 +12,7 @@
 # timeouts ("if the channel doesn't close in 5 minutes, then forcibly
 # close it" would be a typical usage).
 
-import asyncore
+import asyncore_25 as asyncore
 import bisect
 import time
 
diff -Naur medusa-trunk/ftp_server.py medusa/ftp_server.py
--- medusa-trunk/ftp_server.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/ftp_server.py	2009-04-22 11:15:00.000000000 -0700
@@ -19,8 +19,8 @@
 # vestigial anyway.  I've attempted to include the most commonly-used
 # commands, using the feature set of wu-ftpd as a guide.
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 import os
 import socket
diff -Naur medusa-trunk/http_server.py medusa/http_server.py
--- medusa-trunk/http_server.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/http_server.py	2009-04-22 11:15:13.000000000 -0700
@@ -17,8 +17,8 @@
 import time
 
 # async modules
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 # medusa modules
 import http_date
diff -Naur medusa-trunk/logger.py medusa/logger.py
--- medusa-trunk/logger.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/logger.py	2009-04-22 11:17:10.000000000 -0700
@@ -1,6 +1,6 @@
 # -*- Mode: Python -*-
 
-import asynchat
+import asynchat_25 as asynchat
 import socket
 import time         # these three are for the rotating logger
 import os           # |
diff -Naur medusa-trunk/monitor.py medusa/monitor.py
--- medusa-trunk/monitor.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/monitor.py	2009-04-22 11:33:59.000000000 -0700
@@ -15,8 +15,8 @@
 
 VERSION = string.split(RCS_ID)[2]
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 from counter import counter
 import producers
@@ -160,6 +160,7 @@
     channel_class = monitor_channel
 
     def __init__ (self, hostname='127.0.0.1', port=8023):
+        asyncore.dispatcher.__init__(self)
         self.hostname = hostname
         self.port = port
         self.create_socket (socket.AF_INET, socket.SOCK_STREAM)
diff -Naur medusa-trunk/monitor_client.py medusa/monitor_client.py
--- medusa-trunk/monitor_client.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/monitor_client.py	2009-04-22 11:15:46.000000000 -0700
@@ -2,8 +2,8 @@
 
 # monitor client, unix version.
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 import socket
 import string
 import sys
diff -Naur medusa-trunk/resolver.py medusa/resolver.py
--- medusa-trunk/resolver.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/resolver.py	2009-04-22 11:15:51.000000000 -0700
@@ -13,7 +13,7 @@
 # see rfc1035 for details
 
 import string
-import asyncore
+import asyncore_25 as asyncore
 import socket
 import sys
 import time
diff -Naur medusa-trunk/rpc_client.py medusa/rpc_client.py
--- medusa-trunk/rpc_client.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/rpc_client.py	2009-04-22 11:17:25.000000000 -0700
@@ -224,7 +224,7 @@
 #                                                async fastrpc client
 # ===========================================================================
 
-import asynchat
+import asynchat_25 as asynchat
 
 class async_fastrpc_client (asynchat.async_chat):
 
diff -Naur medusa-trunk/rpc_server.py medusa/rpc_server.py
--- medusa-trunk/rpc_server.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/rpc_server.py	2009-04-22 11:16:03.000000000 -0700
@@ -37,8 +37,8 @@
 import sys
 import types
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 from producers import scanning_producer
 from counter import counter
diff -Naur medusa-trunk/status_handler.py medusa/status_handler.py
--- medusa-trunk/status_handler.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/status_handler.py	2009-04-22 11:16:09.000000000 -0700
@@ -11,7 +11,7 @@
 import re
 from cgi import escape
 
-import asyncore
+import asyncore_25 as asyncore
 import http_server
 import medusa_gif
 import producers
diff -Naur medusa-trunk/test/test_11.py medusa/test/test_11.py
--- medusa-trunk/test/test_11.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/test/test_11.py	2009-04-22 11:34:51.000000000 -0700
@@ -1,9 +1,9 @@
 # -*- Mode: Python -*-
 
-import asyncore
-import asynchat
 import socket
 import string
+from medusa import asyncore_25 as asyncore
+from medusa import asynchat_25 as asynchat
 
 # get some performance figures for an HTTP/1.1 server.
 # use pipelining.
diff -Naur medusa-trunk/test/test_lb.py medusa/test/test_lb.py
--- medusa-trunk/test/test_lb.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/test/test_lb.py	2009-04-22 11:35:05.000000000 -0700
@@ -13,8 +13,8 @@
 # server
 # ==================================================
 
-import asyncore
-import asynchat
+from medusa import asyncore_25 as asyncore
+from medusa import asynchat_25 as asynchat
 
 class test_channel (asynchat.async_chat):
 
diff -Naur medusa-trunk/thread/select_trigger.py medusa/thread/select_trigger.py
--- medusa-trunk/thread/select_trigger.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/thread/select_trigger.py	2009-04-22 11:20:38.000000000 -0700
@@ -16,8 +16,8 @@
 
 __revision__ = "$Id: select_trigger.py,v 1.4 2003/01/09 15:49:15 akuchling Exp $"
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 import os
 import socket
diff -Naur medusa-trunk/thread/thread_channel.py medusa/thread/thread_channel.py
--- medusa-trunk/thread/thread_channel.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/thread/thread_channel.py	2009-04-22 11:20:48.000000000 -0700
@@ -10,8 +10,8 @@
 # May be possible to do it on Win32, using TCP localhost sockets.
 # [does winsock support 'socketpair'?]
 
-import asyncore
-import asynchat
+import asyncore_25 as asyncore
+import asynchat_25 as asynchat
 
 import fcntl
 import FCNTL
diff -Naur medusa-trunk/thread/thread_handler.py medusa/thread/thread_handler.py
--- medusa-trunk/thread/thread_handler.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/thread/thread_handler.py	2009-04-22 11:20:58.000000000 -0700
@@ -331,7 +331,7 @@
     else:
         nthreads = string.atoi (sys.argv[1])
 
-        import asyncore
+        import asyncore_25 as asyncore
         from medusa import http_server
         # create a generic web server
         hs = http_server.http_server ('', 7080)
diff -Naur medusa-trunk/xmlrpc_handler.py medusa/xmlrpc_handler.py
--- medusa-trunk/xmlrpc_handler.py	2009-04-22 11:09:08.000000000 -0700
+++ medusa/xmlrpc_handler.py	2009-04-22 11:16:26.000000000 -0700
@@ -94,7 +94,7 @@
             print 'method="%s" params=%s' % (method, params)
             return "Sure, that works"
 
-    import asyncore
+    import asyncore_25 as asyncore
 
     hs = http_server.http_server ('', 8000)
     rpc = rpc_demo()
Index: src/supervisor/options.py
===================================================================
--- src/supervisor/options.py	(revision 846)
+++ src/supervisor/options.py	(working copy)
@@ -13,7 +13,6 @@
 ##############################################################################
 
 import ConfigParser
-import asyncore
 import socket
 import getopt
 import os
@@ -34,6 +33,8 @@
 from fcntl import fcntl
 from fcntl import F_SETFL, F_GETFL
 
+from medusa import asyncore_25 as asyncore
+
 from supervisor.datatypes import boolean
 from supervisor.datatypes import integer
 from supervisor.datatypes import name_to_uid
Index: src/supervisor/tests/test_http.py
===================================================================
--- src/supervisor/tests/test_http.py	(revision 846)
+++ src/supervisor/tests/test_http.py	(working copy)
@@ -271,14 +271,20 @@
         self.assertEqual(authorizer.authorize(('foo', 'password')), True)
     
     def test_authorize_gooduser_badpassword_sha(self):
-        import sha
-        password = '{SHA}' + sha.new('password').hexdigest()
+        try:
+            from hashlib import sha1
+        except ImportError:
+            from sha import new as sha1
+        password = '{SHA}' + sha1('password').hexdigest()
         authorizer = self._makeOne({'foo':password})
         self.assertEqual(authorizer.authorize(('foo', 'bar')), False)
 
     def test_authorize_gooduser_goodpassword_sha(self):
-        import sha
-        password = '{SHA}' + sha.new('password').hexdigest()
+        try:
+            from hashlib import sha1
+        except ImportError:
+            from sha import new as sha1
+        password = '{SHA}' + sha1('password').hexdigest()
         authorizer = self._makeOne({'foo':password})
         self.assertEqual(authorizer.authorize(('foo', 'password')), True)
 
Index: src/supervisor/tests/test_supervisord.py
===================================================================
--- src/supervisor/tests/test_supervisord.py	(revision 846)
+++ src/supervisor/tests/test_supervisord.py	(working copy)
@@ -388,7 +388,7 @@
         process = DummyProcess(pconfig)
         gconfig = DummyPGroupConfig(options, pconfigs=[pconfig])
         pgroup = DummyProcessGroup(gconfig)
-        import asyncore
+        from medusa import asyncore_25 as asyncore
         exitnow = DummyDispatcher(readable=True, error=asyncore.ExitNow)
         pgroup.dispatchers = {6:exitnow}
         supervisord.process_groups = {'foo': pgroup}
@@ -408,7 +408,7 @@
             L.append(event)
         from supervisor import events
         events.subscribe(events.SupervisorStateChangeEvent, callback)
-        import asyncore
+        from medusa import asyncore_25 as asyncore
         options.test = True
         self.assertRaises(asyncore.ExitNow, supervisord.runforever)
         self.assertTrue(pgroup.all_stopped)
@@ -430,7 +430,7 @@
         supervisord.process_groups = {'foo': pgroup}
         supervisord.options.mood = 0
         supervisord.options.test = True
-        import asyncore
+        from medusa import asyncore_25 as asyncore
         self.assertRaises(asyncore.ExitNow, supervisord.runforever)
         self.assertEqual(pgroup.all_stopped, True)
 
Index: src/supervisor/http_client.py
===================================================================
--- src/supervisor/http_client.py	(revision 846)
+++ src/supervisor/http_client.py	(working copy)
@@ -2,11 +2,12 @@
 
 import sys
 import socket
-import asyncore
-import asynchat
 import base64
 from urlparse import urlparse
 
+from medusa import asyncore_25 as aysncore
+from medusa import asynchat_25 as asynchat
+
 CR="\x0d"
 LF="\x0a"
 CRLF=CR+LF
Index: src/supervisor/process.py
===================================================================
--- src/supervisor/process.py	(revision 846)
+++ src/supervisor/process.py	(working copy)
@@ -12,7 +12,6 @@
 #
 ##############################################################################
 
-import asyncore
 import os
 import sys
 import time
@@ -22,6 +21,8 @@
 import traceback
 import signal
 
+from medusa import asyncore_25 as asyncore
+
 from supervisor.states import ProcessStates
 from supervisor.states import SupervisorStates
 from supervisor.states import getProcessStateDescription
Index: src/supervisor/http.py
===================================================================
--- src/supervisor/http.py	(revision 846)
+++ src/supervisor/http.py	(working copy)
@@ -12,7 +12,6 @@
 #
 ##############################################################################
 
-import asyncore
 import os
 import stat
 import time
@@ -22,6 +21,7 @@
 import pwd
 import urllib
 
+from medusa import asyncore_25 as asyncore
 from medusa import http_date
 from medusa import http_server
 from medusa import producers
Index: src/supervisor/supervisorctl.py
===================================================================
--- src/supervisor/supervisorctl.py	(revision 846)
+++ src/supervisor/supervisorctl.py	(working copy)
@@ -39,11 +39,12 @@
 import getpass
 import xmlrpclib
 import socket
-import asyncore
 import errno
 import urlparse
 import threading
 
+from medusa import asyncore_25 as asyncore
+
 from supervisor.options import ClientOptions
 from supervisor.options import split_namespec
 from supervisor import xmlrpc
Index: src/supervisor/supervisord.py
===================================================================
--- src/supervisor/supervisord.py	(revision 846)
+++ src/supervisor/supervisord.py	(working copy)
@@ -46,8 +46,9 @@
 import errno
 import select
 import signal
-import asyncore
 
+from medusa import asyncore_25 as asyncore
+
 from supervisor.options import ServerOptions
 from supervisor.options import signame
 from supervisor import events
_______________________________________________
Supervisor-users mailing list
[email protected]
http://lists.supervisord.org/mailman/listinfo/supervisor-users

Reply via email to