Index: lib/sqlalchemy/ansisql.py
===================================================================
--- lib/sqlalchemy/ansisql.py	(revision 1231)
+++ lib/sqlalchemy/ansisql.py	(working copy)
@@ -31,7 +31,6 @@
     return ANSISQLEngine(**params)
 
 class ANSISQLEngine(sqlalchemy.engine.SQLEngine):
-
     def schemagenerator(self, **params):
         return ANSISchemaGenerator(self, **params)
     
@@ -101,7 +100,7 @@
                     return str(i[0])
                 self.strings[self.statement] = re.sub(match, getnum, self.strings[self.statement])
 
-    def get_from_text(self, obj):
+    def get_from_text(self, obj, is_aliased=False):
         return self.froms.get(obj, None)
 
     def get_str(self, obj):
@@ -183,7 +182,6 @@
         else:
             self.strings[column] = "%s.%s" % (column.table.name, column.name)
 
-
     def visit_fromclause(self, fromclause):
         self.froms[fromclause] = fromclause.from_name
 
@@ -274,11 +272,10 @@
         return self.bindtemplate % name
         
     def visit_alias(self, alias):
-        self.froms[alias] = self.get_from_text(alias.original) + " AS " + alias.name
+        self.froms[alias] = self.get_from_text(alias.original, True) + " AS " + alias.name
         self.strings[alias] = self.get_str(alias.original)
 
     def visit_select(self, select):
-        
         # the actual list of columns to print in the SELECT column list.
         # its an ordered dictionary to insure that the actual labeled column name
         # is unique.
Index: lib/sqlalchemy/databases/__init__.py
===================================================================
--- lib/sqlalchemy/databases/__init__.py	(revision 1231)
+++ lib/sqlalchemy/databases/__init__.py	(working copy)
@@ -5,4 +5,4 @@
 # the MIT License: http://www.opensource.org/licenses/mit-license.php
 
 
-__all__ = ['oracle', 'postgres', 'sqlite', 'mysql']
\ No newline at end of file
+__all__ = ['oracle', 'postgres', 'sqlite', 'mysql', 'mssql']
Index: lib/sqlalchemy/databases/information_schema.py
===================================================================
--- lib/sqlalchemy/databases/information_schema.py	(revision 1231)
+++ lib/sqlalchemy/databases/information_schema.py	(working copy)
@@ -56,6 +56,18 @@
     Column("constraint_name", String),
     schema="information_schema")
 
+gen_ref_constraints = schema.Table("referential_constraints", generic_engine,
+    Column("constraint_catalog", String),
+    Column("constraint_schema", String),
+    Column("constraint_name", String),
+    Column("unique_constraint_catlog", String),
+    Column("unique_constraint_schema", String),
+    Column("unique_constraint_name", String),
+    Column("match_option", String),
+    Column("update_rule", String),
+    Column("delete_rule", String),
+    schema="information_schema")
+                                   
 class ISchema(object):
     def __init__(self, engine):
         self.engine = engine
Index: lib/sqlalchemy/databases/mssql.py
===================================================================
--- lib/sqlalchemy/databases/mssql.py	(revision 0)
+++ lib/sqlalchemy/databases/mssql.py	(revision 0)
@@ -0,0 +1,478 @@
+# mssql.py
+
+"""
+notes:
+  supports both pymssql and adodbapi interfaces
+
+  IDENTITY columns are supported by using SA schema.Sequence() objects. In other words:
+         Table('test', mss_engine,
+                Column('id',   Integer, Sequence('blah',100,10), primary_key=True),
+                Column('name', String(20))
+              ).create()
+
+         would yield:
+         CREATE TABLE test (
+           id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
+           name VARCHAR(20)
+           )
+  note that the start & increment values for sequences are optional and will default to 1,1
+
+  support for SET IDENTITY_INSERT ON mode (automagic on / off for INSERTs)
+
+  support for auto-fetching of @@IDENTITY on insert
+
+  select.limit implemented as SELECT TOP n
+
+
+Known issues / TODO:
+  no support for more than one IDENTITY column per table
+  no support for table reflection of IDENTITY columns with (seed,increment) values other than (1,1)
+  no support for GUID type columns (yet)
+  pymssql has problems with transaction control that this module attempts to work around
+  pymssql has problems with binary and unicode data that this module does NOT work around
+  adodbapi fails testtypes.py unit test on unicode data too -- issue with the test?
+
+"""
+
+import sys, StringIO, string, types, re, datetime
+
+import sqlalchemy.sql as sql
+import sqlalchemy.engine as engine
+import sqlalchemy.schema as schema
+import sqlalchemy.ansisql as ansisql
+import sqlalchemy.types as sqltypes
+from sqlalchemy import *
+
+try:
+    import adodbapi as dbmodule
+    # ADODBAPI has a non-standard Connection method
+    connect = dbmodule.Connection
+    make_connect_string = lambda keys: \
+        [["Provider=SQLOLEDB;Data Source=%s;User Id=%s;Password=%s;Initial Catalog=%s" % (
+            keys["host"], keys["user"], keys["password"], keys["database"])], {}]
+    do_commit = False
+except:
+    try:
+        import pymssql as dbmodule
+        connect = dbmodule.connect
+        # pymmsql doesn't have a Binary method.  we use string
+        dbmodule.Binary = lambda st: str(st)
+        make_connect_string = lambda keys:  \
+                    [[], keys]
+        do_commit = True
+    except:
+        dbmodule = None
+        raise
+    
+class MSNumeric(sqltypes.Numeric):
+    def convert_result_value(self, value, engine):
+        return value
+
+    def convert_bind_param(self, value, engine):
+        if value is None:
+            # Not sure that this exception is needed
+            return value
+        else:
+            return str(value) 
+
+    def get_col_spec(self):
+        return "NUMERIC(%(precision)s, %(length)s)" % {'precision': self.precision, 'length' : self.length}
+
+class MSFloat(sqltypes.Float):
+    def get_col_spec(self):
+        return "FLOAT(%(precision)s)" % {'precision': self.precision}
+    def convert_bind_param(self, value, engine):
+        """By converting to string, we can use Decimal types round-trip."""
+        return str(value) 
+
+class MSInteger(sqltypes.Integer):
+    def get_col_spec(self):
+        return "INTEGER"
+
+class MSSmallInteger(sqltypes.Smallinteger):
+    def get_col_spec(self):
+        return "SMALLINT"
+
+class MSDateTime(sqltypes.DateTime):
+    def get_col_spec(self):
+        return "DATETIME"
+
+    def convert_bind_param(self, value, engine):
+        if hasattr(value, "isoformat"):
+            return value.isoformat(' ')
+        else:
+            return value
+
+    def convert_result_value(self, value, engine):
+        # adodbapi will return datetimes with empty time values as datetime.date() objects. Promote them back to full datetime.datetime()
+        if value and not hasattr(value, 'second'):
+            return datetime.datetime(value.year, value.month, value.day)
+        return value
+
+class MSDate(sqltypes.Date):
+    def get_col_spec(self):
+        return "SMALLDATETIME"
+    
+    def convert_bind_param(self, value, engine):
+        if value and hasattr(value, "isoformat"):
+            return value.isoformat()
+        return value
+
+    def convert_result_value(self, value, engine):
+        # pymssql will return SMALLDATETIME values as datetime.datetime(), truncate it back to datetime.date()
+        if value and hasattr(value, 'second'):
+            return value.date()
+        return value
+
+class MSText(sqltypes.TEXT):
+    def get_col_spec(self):
+        return "TEXT"
+class MSString(sqltypes.String):
+    def get_col_spec(self):
+        return "VARCHAR(%(length)s)" % {'length' : self.length}
+class MSChar(sqltypes.CHAR):
+    def get_col_spec(self):
+        return "CHAR(%(length)s)" % {'length' : self.length}
+class MSBinary(sqltypes.Binary):
+    def get_col_spec(self):
+        return "IMAGE"
+class MSBoolean(sqltypes.Boolean):
+    def get_col_spec(self):
+        return "BIT"
+        
+colspecs = {
+    sqltypes.Integer : MSInteger,
+    sqltypes.Smallinteger: MSSmallInteger,
+    sqltypes.Numeric : MSNumeric,
+    sqltypes.Float : MSFloat,
+    sqltypes.DateTime : MSDateTime,
+    sqltypes.Date : MSDate,
+    sqltypes.String : MSString,
+    sqltypes.Binary : MSBinary,
+    sqltypes.Boolean : MSBoolean,
+    sqltypes.TEXT : MSText,
+    sqltypes.CHAR: MSChar,
+}
+
+ischema_names = {
+    'int' : MSInteger,
+    'smallint' : MSSmallInteger,
+    'varchar' : MSString,
+    'char' : MSChar,
+    'text' : MSText,
+    'decimal' : MSNumeric,
+    'numeric' : MSNumeric,
+    'float' : MSFloat,
+    'datetime' : MSDateTime,
+    'smalldatetime' : MSDate,
+    'binary' : MSBinary,
+    'bit': MSBoolean,
+    'real' : MSFloat,
+    'image' : MSBinary
+}
+
+def engine(opts, **params):
+    return MSSQLEngine(opts, **params)
+
+def descriptor():
+    return {'name':'mssql',
+    'description':'MSSQL',
+    'arguments':[
+        ('user',"Database Username",None),
+        ('password',"Database Password",None),
+        ('db',"Database Name",None),
+        ('host',"Hostname", None),
+    ]}
+
+class MSSQLEngine(ansisql.ANSISQLEngine):
+    def __init__(self, opts, module = None, **params):
+        if module is None:
+            self.module = dbmodule
+        self.opts = opts or {}
+        ansisql.ANSISQLEngine.__init__(self, **params)
+
+    def connect_args(self):
+        return make_connect_string(self.opts)
+
+    def type_descriptor(self, typeobj):
+        return sqltypes.adapt_type(typeobj, colspecs)
+
+    def last_inserted_ids(self):
+        return self.context.last_inserted_ids
+
+    def supports_sane_rowcount(self):
+        return True
+
+    def compiler(self, statement, bindparams, **kwargs):
+        return MSSQLCompiler(statement, bindparams, engine=self, **kwargs)
+
+    def schemagenerator(self, **params):
+        return MSSQLSchemaGenerator(self, **params)
+
+    def schemadropper(self, **params):
+        return MSSQLSchemaDropper(self, **params)
+
+    def get_default_schema_name(self):
+        return "dbo"
+        
+    def last_inserted_ids(self):
+        return self.context.last_inserted_ids
+            
+    def do_begin(self, connection):
+        """implementations might want to put logic here for turning autocommit on/off, etc."""
+        if do_commit:
+            pass  
+
+    def _execute(self, c, statement, parameters):
+        try:
+            c.execute(statement, parameters)
+            self.context.rowcount = c.rowcount
+            c.DBPROP_COMMITPRESERVE = "Y"
+        except Exception, e:
+            # del c.parent  # Close the Parent Connection, delete it from the pool
+            raise exceptions.SQLError(statement, parameters, e)
+
+
+    def do_rollback(self, connection):
+        """implementations might want to put logic here for turning autocommit on/off, etc."""
+        if do_commit:
+            try:
+                # connection.rollback() for pymmsql failed sometimes--the begin tran doesn't show up
+                # this is a workaround that seems to be handle it.
+                r = self.raw_connection(connection)
+                r.query("if @@trancount > 0 rollback tran")
+                r.fetch_array()
+                r.query("begin tran")
+                r.fetch_array()
+            except:
+                pass
+        try:
+            del connection
+        except:
+            raise
+
+    def raw_connection(self, connection):
+        """Pull the raw pymmsql connection out--sensative to "pool.ConnectionFairy" and pymssql.pymssqlCnx Classes"""
+        try:
+            return connection.connection.__dict__['_pymssqlCnx__cnx']
+        except:
+            return connection.connection.adoConn
+
+    def do_commit(self, connection):
+        """implementations might want to put logic here for turning autocommit on/off, etc.
+            do_commit is set for pymmsql connections--ADO seems to handle transactions without any issue 
+        """
+        # ADO Uses Implicit Transactions.
+        if do_commit:
+            # This is very pymssql specific.  We use this instead of its commit, because it hangs on failed rollbacks.
+            # By using the "if" we don't assume an open transaction--much better.
+            r = self.raw_connection(connection)
+            r.query("if @@trancount > 0 commit tran")
+            r.fetch_array()
+            r.query("begin tran")
+            r.fetch_array()
+        else:
+            pass
+            #connection.supportsTransactions = 1
+            try:
+                pass
+                #connection.adoConn.CommitTrans()
+            except:
+                pass
+                #connection.adoConn.execute("begin trans", {})
+            #connection.adoConn.BeginTrans()
+
+    def connection(self):
+        """returns a managed DBAPI connection from this SQLEngine's connection pool."""
+        c = self._pool.connect()
+        c.supportsTransactions = 0
+        return c
+
+    def pre_exec(self, proxy, compiled, parameters, **kwargs):
+        """ MS-SQL has a special mode for inserting non-NULL values into IDENTITY columns. Activate it if needed. """
+        if getattr(compiled, "isinsert", False):
+            self.context.IINSERT = False
+            self.context.HASIDENT = False
+            for c in compiled.statement.table.c:
+                if hasattr(c,'sequence'):
+                    self.context.HASIDENT = True
+                    if parameters.has_key(c.name):
+                        self.context.IINSERT = True
+                    break
+            if self.context.IINSERT:
+                proxy("SET IDENTITY_INSERT %s ON" % compiled.statement.table.name)
+
+    def post_exec(self, proxy, compiled, parameters, **kwargs):
+        """ Turn off the INDENTITY_INSERT mode if it's been activated, and fetch recently inserted IDENTIFY values (works only for one column) """
+        if getattr(compiled, "isinsert", False):
+            if self.context.IINSERT:
+                proxy("SET IDENTITY_INSERT %s OFF" % compiled.statement.table.name)
+                self.context.IINSERT = False
+            elif self.context.HASIDENT:
+                cursor = proxy("SELECT @@IDENTITY AS lastrowid")
+                row = cursor.fetchone()
+                self.context.last_inserted_ids = [row[0]]
+            self.context.HASIDENT = False
+            
+    def dbapi(self):
+        return self.module
+
+    def reflecttable(self, table):
+        import sqlalchemy.databases.information_schema as ischema
+        
+        # Get base columns
+        if table.schema is not None:
+            current_schema = table.schema
+        else:
+            current_schema = self.get_default_schema_name()
+
+        columns = ischema.gen_columns.toengine(self)
+        s = select([columns],
+                   current_schema and sql.and_(columns.c.table_name==table.name, columns.c.table_schema==current_schema) or columns.c.table_name==table.name,
+                   order_by=[columns.c.ordinal_position])
+        
+        c = s.execute()
+        while True:
+            row = c.fetchone()
+            if row is None:
+                break
+
+            (name, type, nullable, charlen, numericprec, numericscale, default) = (
+                row[columns.c.column_name], 
+                row[columns.c.data_type], 
+                row[columns.c.is_nullable] == 'YES', 
+                row[columns.c.character_maximum_length],
+                row[columns.c.numeric_precision],
+                row[columns.c.numeric_scale],
+                row[columns.c.column_default]
+            )
+
+            args = []
+            for a in (charlen, numericprec, numericscale):
+                if a is not None:
+                    args.append(a)
+                    coltype = ischema_names[type]
+        
+            coltype = coltype(*args)
+            colargs= []
+            if default is not None:
+                colargs.append(PassiveDefault(sql.text(default)))
+                
+            table.append_item(schema.Column(name, coltype, nullable=nullable, *colargs))
+
+
+        # We also run an sp_columns to check for identity columns:
+        # FIXME: note that this only fetches the existence of an identity column, not it's properties like (seed, increment)
+        cursor = table.engine.execute("sp_columns " + table.name, {})
+        while True:
+            row = cursor.fetchone()
+            if row is None:
+                break
+            col_name, type_name = row[3], row[5]
+            if type_name.endswith("identity"):
+                ic = table.c[col_name]
+                # setup a psuedo-sequence to represent the identity attribute - we interpret this at table.create() time as the identity attribute
+                ic.sequence = schema.Sequence(ic.name + '_identity')
+
+        # Add constraints
+        RR = ischema.gen_ref_constraints.toengine(self)    #information_schema.referential_constraints
+        TC = ischema.gen_constraints.toengine(self)        #information_schema.table_constraints
+        C  = ischema.gen_column_constraints.toengine(self).alias('C') #information_schema.constraint_column_usage: the constrained column 
+        R  = ischema.gen_column_constraints.toengine(self).alias('R') #information_schema.constraint_column_usage: the referenced column
+
+        fromjoin = TC.join(RR, RR.c.constraint_name == TC.c.constraint_name).join(C, C.c.constraint_name == RR.c.constraint_name)
+        fromjoin = fromjoin.join(R, R.c.constraint_name == RR.c.unique_constraint_name)
+
+        s = select([TC.c.constraint_type, C.c.table_schema, C.c.table_name, C.c.column_name,
+                    R.c.table_schema, R.c.table_name, R.c.column_name],
+                   and_(RR.c.constraint_schema == current_schema,  C.c.table_name == table.name),
+                   from_obj = [fromjoin]
+                   )
+               
+        c = s.execute()
+
+        while True:
+            row = c.fetchone()
+            if row is None:
+                break
+            (type, constrained_column, referred_schema, referred_table, referred_column) = (
+                row[colmap[0]],
+                row[colmap[3]],
+                row[colmap[4]],
+                row[colmap[5]],
+                row[colmap[6]]
+                )
+
+            if type=='PRIMARY KEY':
+                table.c[constrained_column]._set_primary_key()
+            elif type=='FOREIGN KEY':
+                remotetable = Table(referred_table, self, autoload = True, schema=referred_schema)
+                table.c[constrained_column].append_item(schema.ForeignKey(remotetable.c[referred_column]))
+        
+
+
+class MSSQLCompiler(ansisql.ANSICompiler):
+    def visit_select_precolumns(self, select):
+        """ MS-SQL puts TOP, it's version of LIMIT here """
+        s = select.distinct and "DISTINCT " or ""
+        if (select.limit):
+            s += "TOP %s " % (select.limit,)
+        return s
+
+    def limit_clause(self, select):
+        # Limit in mssql is after the select keyword; MSsql has no support for offset
+        return ""
+
+    #def get_from_text(self, obj):
+    #    #  SQL Server is a little sensitive about aliases.  Alias the table name if it's not the same as the from clause
+    #    frm = self.froms.get(obj, None)
+    #    if frm and isinstance(obj, schema.Table) and frm <> obj.name and not " AS " in frm:
+    #        print 'tfrm:', type(frm), 'frm:', frm
+    #        print 'tobj:', type(obj), 'obj:', obj, 'objname:', obj.name
+    #        import pdb ; pdb.set_trace()
+    #        return frm + " AS " + obj.name
+    #    return frm
+
+
+    def get_from_text(self, obj, is_aliased=False):
+        #  SQL Server is a little sensitive about aliases.  Alias the table name if it's not the same as the from clause
+        frm = self.froms.get(obj, None)
+        if frm and isinstance(obj, schema.Table) and frm <> obj.name and (not is_aliased) and (not " AS " in frm):
+            #print 'tfrm:', type(frm), 'frm:', frm
+            #print 'tobj:', type(obj), 'obj:', obj, 'objname:', obj.name
+            #import pdb ; pdb.set_trace()
+            return frm + " AS " + obj.name
+        return frm
+        
+class MSSQLSchemaGenerator(ansisql.ANSISchemaGenerator):
+    def get_column_specification(self, column, override_pk=False, first_pk=False):
+        colspec = column.name + " " + column.type.get_col_spec()
+
+        # install a IDENTITY Sequence if we have an implicit IDENTITY column
+        if column.primary_key and isinstance(column.type, types.Integer):
+            if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
+                column.sequence = schema.Sequence(column.name + '_seq')
+
+        if not column.nullable:
+            colspec += " NOT NULL"
+
+        if hasattr(column, 'sequence'):
+            colspec += " IDENTITY(%s,%s)" % (column.sequence.start or 1, column.sequence.increment or 1)
+        else:
+            default = self.get_column_default_string(column)
+            if default is not None:
+                colspec += " DEFAULT " + default
+
+        if column.primary_key:
+            if not override_pk:
+                colspec += " PRIMARY KEY"
+        if column.foreign_key:
+            colspec += " REFERENCES %s(%s)" % (column.foreign_key.column.table.fullname, column.foreign_key.column.name)
+        
+        return colspec
+
+
+class MSSQLSchemaDropper(ansisql.ANSISchemaDropper):
+    def visit_index(self, index):
+        self.append("\nDROP INDEX " + index.table.name + "." + index.name)
+        self.execute()
Index: lib/sqlalchemy/engine.py
===================================================================
--- lib/sqlalchemy/engine.py	(revision 1231)
+++ lib/sqlalchemy/engine.py	(working copy)
@@ -193,7 +193,7 @@
     connection = property(_connection, doc="the connection represented by this SQLSession.  The connection is late-connecting, meaning the call to the connection pool only occurs when it is first called (and the pool will typically only connect the first time it is called as well)")
     
     def begin(self):
-        """begins" a transaction on this SQLSession's connection.  repeated calls to begin() will increment a counter that must be decreased by corresponding commit() statements before an actual commit occurs.  this is to provide "nested" behavior of transactions so that different functions in a particular call stack can call begin()/commit() independently of each other without knowledge of an existing transaction."""
+        """begins a transaction on this SQLSession's connection.  repeated calls to begin() will increment a counter that must be decreased by corresponding commit() statements before an actual commit occurs.  this is to provide "nested" behavior of transactions so that different functions in a particular call stack can call begin()/commit() independently of each other without knowledge of an existing transaction. """
         if self.__tcount == 0:
             self.__transaction = self.connection
             self.engine.do_begin(self.connection)
@@ -506,7 +506,7 @@
         self.commit()
         
     def begin(self):
-        """"begins a transaction on the current thread's SQLSession."""
+        """ begins a transaction on the current thread SQLSession. """
         self.session.begin()
             
     def rollback(self):
@@ -647,7 +647,7 @@
         return ResultProxy(cursor, self, typemap=compiled.typemap)
 
     def execute(self, statement, parameters=None, connection=None, cursor=None, echo=None, typemap=None, commit=False, return_raw=False, **kwargs):
-        """executes the given string-based SQL statement with the given parameters.  
+        """ executes the given string-based SQL statement with the given parameters.  
 
         The parameters can be a dictionary or a list, or a list of dictionaries or lists, depending
         on the paramstyle of the DBAPI.
@@ -659,7 +659,7 @@
         up.
 
         In all error cases, a rollback() is immediately performed on the connection before
-        propigating the exception outwards.
+        propagating the exception outwards.
 
         Other options include:
 
Index: lib/sqlalchemy/schema.py
===================================================================
--- lib/sqlalchemy/schema.py	(revision 1231)
+++ lib/sqlalchemy/schema.py	(working copy)
@@ -256,7 +256,7 @@
         default=None : a scalar, python callable, or ClauseElement representing the "default value" for this column,
         which will be invoked upon insert if this column is not present in the insert list or is given a value
         of None.
-        
+
         hidden=False : indicates this column should not be listed in the
         table's list of columns.  Used for the "oid" column, which generally
         isnt in column lists.
@@ -271,7 +271,9 @@
         indexed in a unique index . Pass true to autogenerate the index
         name. Pass a string to specify the index name. Multiple columns that
         specify the same index name will all be included in the index, in the
-        order of their creation.  """
+        order of their creation.
+
+        """
         
         name = str(name) # in case of incoming unicode
         super(Column, self).__init__(name, None, type)
@@ -507,6 +509,7 @@
         """calls the visit_seauence method on the given visitor."""
         return visitor.visit_sequence(self)
 
+
 class Index(SchemaItem):
     """Represents an index of columns from a database table
     """
Index: test/indexes.py
===================================================================
--- test/indexes.py	(revision 1231)
+++ test/indexes.py	(working copy)
@@ -42,9 +42,10 @@
         """test that mixed-case index identifiers are legal"""
         employees = Table('companyEmployees', testbase.db,
                           Column('id', Integer, primary_key=True),
-                          Column('firstName', String),
-                          Column('lastName', String),
-                          Column('emailAddress', String))        
+                          Column('firstName', String(30)),
+                          Column('lastName', String(30)),
+                          Column('emailAddress', String(30)))
+
         employees.create()
         self.created.append(employees)
         
Index: test/testbase.py
===================================================================
--- test/testbase.py	(revision 1231)
+++ test/testbase.py	(working copy)
@@ -46,9 +46,11 @@
         elif DBTYPE == 'oracle8':
             db_uri = 'oracle://user=scott&password=tiger'
             opts = {'use_ansi':False}
+        elif DBTYPE == 'mssql':
+            db_uri = 'mssql://database=test&user=scott&password=tiger'
 
     if not db_uri:
-        raise "Could not create engine.  specify --db <sqlite|sqlite_file|postgres|mysql|oracle> to test runner."
+        raise "Could not create engine.  specify --db <sqlite|sqlite_file|postgres|mysql|oracle|oracle8|mssql> to test runner."
 
     if PROXY:
         db = proxy.ProxyEngine(echo=echo, default_ordering=True, **opts)
Index: test/testtypes.py
===================================================================
--- test/testtypes.py	(revision 1231)
+++ test/testtypes.py	(working copy)
@@ -147,8 +147,8 @@
     def testbinary(self):
         stream1 =self.get_module_stream('sqlalchemy.sql')
         stream2 =self.get_module_stream('sqlalchemy.engine')
-        binary_table.insert().execute(misc='sql.pyc', data=stream1, data_slice=stream1[0:100])
-        binary_table.insert().execute(misc='engine.pyc', data=stream2, data_slice=stream2[0:99])
+        binary_table.insert().execute(primary_id=1, misc='sql.pyc',    data=stream1, data_slice=stream1[0:100])
+        binary_table.insert().execute(primary_id=2, misc='engine.pyc', data=stream2, data_slice=stream2[0:99])
         l = binary_table.select().execute().fetchall()
         print len(stream1), len(l[0]['data']), len(l[0]['data_slice'])
         self.assert_(list(stream1) == list(l[0]['data']))
@@ -179,7 +179,7 @@
         collist = [Column('user_id', INT, primary_key = True), Column('user_name', VARCHAR(20)), Column('user_datetime', DateTime),
                    Column('user_date', Date), Column('user_time', Time)]
         
-        if db.engine.__module__.endswith('mysql'):
+        if db.engine.__module__.endswith('mysql') or db.engine.__module__.endswith('mssql'):
             # strip microseconds -- not supported by this engine (should be an easier way to detect this)
             for d in insert_data:
                 if d[2] is not None:
@@ -198,6 +198,7 @@
         users_with_date = Table('query_users_with_date', db, redefine = True, *collist)
         users_with_date.create()
         insert_dicts = [dict(zip(fnames, d)) for d in insert_data]
+
         for idict in insert_dicts:
             users_with_date.insert().execute(**idict) # insert the data
 
