[sqlalchemy] execute() and commit() and commit

2010-02-12 Thread Bob Farrell
Hi there,

I'm having a bit of trouble with session.execute() and session.commit()

Here's a simple example that demonstrates what I'm trying to do:

import os
import sys

import sqlalchemy.orm as orm
import sqlalchemy as sa

Session = orm.sessionmaker()
session = None

def setup():
global session

Session.configure(bind=sa.create_engine('sqlite:///test.sqlite'))

session = Session(autocommit=False)

try:
os.unlink(test.sqlite)
except OSError:
pass
session.execute(create table test (a, b, c))
session.commit()

def test1():
session.execute(insert into test values (1, 2, 3))
session.commit()

def test2():
session.execute(insert into test values (4, 5, 6))

def test3():
session.execute(insert into test values (7, 8, 9))
session.execute(commit)

if __name__ == __main__:
dispatch = {'test1': test1,
'test2': test2,
'test3': test3}
if len(sys.argv)  1 and sys.argv[1] in dispatch:
setup()
dispatch[sys.argv[1]]()

And here's the output from running each test:

{. b...@mote: ~/dev/sasqlconsole .]
£ python test.py test1  sqlite3 test.sqlite select * from test;
1|2|3
{. b...@mote: ~/dev/sasqlconsole .]
£ python test.py test2  sqlite3 test.sqlite select * from test;
{. b...@mote: ~/dev/sasqlconsole .]
£ python test.py test3  sqlite3 test.sqlite select * from test;
Traceback (most recent call last):
  File test.py, line 41, in module
dispatch[sys.argv[1]]()
  File test.py, line 33, in test3
session.execute(commit)
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/orm/session.py,
 line 753, in execute
clause, params or {})
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
 line 824, in execute
return Connection.executors[c](self, object, multiparams, params)
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
 line 874, in _execute_clauseelement
return self.__execute_context(context)
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
 line 896, in __execute_context
self._cursor_execute(context.cursor, context.statement, 
context.parameters[0], context=context)
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
 line 950, in _cursor_execute
self._handle_dbapi_exception(e, statement, parameters, cursor, context)
  File 
/usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
 line 931, in _handle_dbapi_exception
raise exc.DBAPIError.instance(statement, parameters, e, 
connection_invalidated=is_disconnect)
sqlalchemy.exc.OperationalError: (OperationalError) cannot commit - no 
transaction is active u'commit' []

Tests 1  2 behave as I'd expect, but it's when I try to pass in a commit as
a string to execute() that I see explosions.

This is for a SQL repl I wrote some time ago and I decided to add transactions
to it so I need a sane way to deal with commit being typed. I'd like to make
this behaviour optional so that it can mimic other sql repls such as sqlplus.

Any suggestions on the best way to go about doing this are welcome.

Thanks,
-- 
Bob Farrell

-- 
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalch...@googlegroups.com.
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com.
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en.



[sqlalchemy] Re: execute() and commit() and commit

2010-02-12 Thread Bob Farrell
Sorry, I forgot to mention that if I run my select * from test;
*after* I get the error for test3, it shows that insert did in fact
get committed to the database:

{. b...@mote: ~/dev/sasqlconsole .]
£ sqlite3 test.sqlite select * from test
7|8|9

So it seems that the commit is getting sent to the database and it's
processing correctly, but SQLAlchemy is stepping in somewhere on the
way back before the execute() returns and causing some mischief.

On Feb 12, 12:19 pm, Bob Farrell robertanthonyfarr...@googlemail.com
wrote:
 Hi there,

 I'm having a bit of trouble with session.execute() and session.commit()

 Here's a simple example that demonstrates what I'm trying to do:

 import os
 import sys

 import sqlalchemy.orm as orm
 import sqlalchemy as sa

 Session = orm.sessionmaker()
 session = None

 def setup():
     global session

     Session.configure(bind=sa.create_engine('sqlite:///test.sqlite'))

     session = Session(autocommit=False)

     try:
         os.unlink(test.sqlite)
     except OSError:
         pass
     session.execute(create table test (a, b, c))
     session.commit()

 def test1():
     session.execute(insert into test values (1, 2, 3))
     session.commit()

 def test2():
     session.execute(insert into test values (4, 5, 6))

 def test3():
     session.execute(insert into test values (7, 8, 9))
     session.execute(commit)

 if __name__ == __main__:
     dispatch = {'test1': test1,
                 'test2': test2,
                 'test3': test3}
     if len(sys.argv)  1 and sys.argv[1] in dispatch:
         setup()
         dispatch[sys.argv[1]]()

 And here's the output from running each test:

 {. b...@mote: ~/dev/sasqlconsole .]
 python test.py test1  sqlite3 test.sqlite select * from test;
 1|2|3
 {. b...@mote: ~/dev/sasqlconsole .]
 python test.py test2  sqlite3 test.sqlite select * from test;
 {. b...@mote: ~/dev/sasqlconsole .]
 python test.py test3  sqlite3 test.sqlite select * from test;
 Traceback (most recent call last):
   File test.py, line 41, in module
     dispatch[sys.argv[1]]()
   File test.py, line 33, in test3
     session.execute(commit)
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/orm/session.py,
  line 753, in execute
     clause, params or {})
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
  line 824, in execute
     return Connection.executors[c](self, object, multiparams, params)
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
  line 874, in _execute_clauseelement
     return self.__execute_context(context)
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
  line 896, in __execute_context
     self._cursor_execute(context.cursor, context.statement, 
 context.parameters[0], context=context)
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
  line 950, in _cursor_execute
     self._handle_dbapi_exception(e, statement, parameters, cursor, context)
   File 
 /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
  line 931, in _handle_dbapi_exception
     raise exc.DBAPIError.instance(statement, parameters, e, 
 connection_invalidated=is_disconnect)
 sqlalchemy.exc.OperationalError: (OperationalError) cannot commit - no 
 transaction is active u'commit' []

 Tests 1  2 behave as I'd expect, but it's when I try to pass in a commit as
 a string to execute() that I see explosions.

 This is for a SQL repl I wrote some time ago and I decided to add transactions
 to it so I need a sane way to deal with commit being typed. I'd like to make
 this behaviour optional so that it can mimic other sql repls such as sqlplus.

 Any suggestions on the best way to go about doing this are welcome.

 Thanks,
 --
 Bob Farrell

-- 
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalch...@googlegroups.com.
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com.
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en.



[sqlalchemy] Re: execute() and commit() and commit

2010-02-12 Thread Bob Farrell
I tried this against Oracle and it works without a hitch, so it looks
like it's a problem with sqlite - we've got some ideas on how to fix
it so we'll carry on looking. So this thread can be ignored now, as
it's not a sqlalchemy issue (unless sqlalchemy planned to special case
commit as text for sqlite).

Thanks,

On Feb 12, 12:33 pm, Bob Farrell robertanthonyfarr...@googlemail.com
wrote:
 Sorry, I forgot to mention that if I run my select * from test;
 *after* I get the error for test3, it shows that insert did in fact
 get committed to the database:

 {. b...@mote: ~/dev/sasqlconsole .]
 £ sqlite3 test.sqlite select * from test
 7|8|9

 So it seems that the commit is getting sent to the database and it's
 processing correctly, but SQLAlchemy is stepping in somewhere on the
 way back before the execute() returns and causing some mischief.

 On Feb 12, 12:19 pm, Bob Farrell robertanthonyfarr...@googlemail.com
 wrote:

  Hi there,

  I'm having a bit of trouble with session.execute() and session.commit()

  Here's a simple example that demonstrates what I'm trying to do:

  import os
  import sys

  import sqlalchemy.orm as orm
  import sqlalchemy as sa

  Session = orm.sessionmaker()
  session = None

  def setup():
      global session

      Session.configure(bind=sa.create_engine('sqlite:///test.sqlite'))

      session = Session(autocommit=False)

      try:
          os.unlink(test.sqlite)
      except OSError:
          pass
      session.execute(create table test (a, b, c))
      session.commit()

  def test1():
      session.execute(insert into test values (1, 2, 3))
      session.commit()

  def test2():
      session.execute(insert into test values (4, 5, 6))

  def test3():
      session.execute(insert into test values (7, 8, 9))
      session.execute(commit)

  if __name__ == __main__:
      dispatch = {'test1': test1,
                  'test2': test2,
                  'test3': test3}
      if len(sys.argv)  1 and sys.argv[1] in dispatch:
          setup()
          dispatch[sys.argv[1]]()

  And here's the output from running each test:

  {. b...@mote: ~/dev/sasqlconsole .]
  python test.py test1  sqlite3 test.sqlite select * from test;
  1|2|3
  {. b...@mote: ~/dev/sasqlconsole .]
  python test.py test2  sqlite3 test.sqlite select * from test;
  {. b...@mote: ~/dev/sasqlconsole .]
  python test.py test3  sqlite3 test.sqlite select * from test;
  Traceback (most recent call last):
    File test.py, line 41, in module
      dispatch[sys.argv[1]]()
    File test.py, line 33, in test3
      session.execute(commit)
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/orm/session.py,
   line 753, in execute
      clause, params or {})
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
   line 824, in execute
      return Connection.executors[c](self, object, multiparams, params)
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
   line 874, in _execute_clauseelement
      return self.__execute_context(context)
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
   line 896, in __execute_context
      self._cursor_execute(context.cursor, context.statement, 
  context.parameters[0], context=context)
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
   line 950, in _cursor_execute
      self._handle_dbapi_exception(e, statement, parameters, cursor, context)
    File 
  /usr/lib/python2.5/site-packages/SQLAlchemy-0.5.8-py2.5.egg/sqlalchemy/engine/base.py,
   line 931, in _handle_dbapi_exception
      raise exc.DBAPIError.instance(statement, parameters, e, 
  connection_invalidated=is_disconnect)
  sqlalchemy.exc.OperationalError: (OperationalError) cannot commit - no 
  transaction is active u'commit' []

  Tests 1  2 behave as I'd expect, but it's when I try to pass in a commit 
  as
  a string to execute() that I see explosions.

  This is for a SQL repl I wrote some time ago and I decided to add 
  transactions
  to it so I need a sane way to deal with commit being typed. I'd like to 
  make
  this behaviour optional so that it can mimic other sql repls such as 
  sqlplus.

  Any suggestions on the best way to go about doing this are welcome.

  Thanks,
  --
  Bob Farrell

-- 
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalch...@googlegroups.com.
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com.
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en.



[sqlalchemy] Re: Changes in exc.py causing problems.

2009-05-27 Thread Bob Farrell

Hooray. \o/

I'll leave the code commented until I pull the next release.

Cheers,

On May 26, 6:03 pm, Michael Bayer mike...@zzzcomputing.com wrote:
 this is all fixed in the current trunk.  release probably today as the
 issue you have below is more severe than the one I had noticed.

 Bob Farrell wrote:

  Hi hi.

  £ diff SQLAlchemy-0.5.2-py2.5.egg/sqlalchemy/exc.py SQLAlchemy-0.5.4p1-
  py2.5.egg/sqlalchemy/exc.py
  134a135,139
          if len(self.params)  10:
              return ' '.join((SQLAlchemyError.__str__(self),
                               repr(self.statement),
                               repr(self.params[:2]),
                               '... and a total of %i bound parameters' %
  len(self.params)))

  This change is resulting in this problem:

    File /home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/
  prospectspace/commands/pspatch.py, line 1473, in create_user
      print e
    File /home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
  python2.5/site-packages/SQLAlchemy-0.5.4p1-py2.5.egg/sqlalchemy/
  exc.py, line 138, in __str__
      repr(self.params[:2]),
  TypeError: unhashable type

  Which is a little confusing, not sure why any hashing attempt is
  happening there - I'll investigate it further and send a patch
  tomorrow, unless something blaringly obvious stands out to you as to
  what's causing this.

  Cheers,
--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Re: Possible bug in orm/state.py

2009-05-26 Thread Bob Farrell



On May 23, 7:56 pm, Michael Bayer mike...@zzzcomputing.com wrote:
 On May 23, 2009, at 1:47 PM, Bob Farrell wrote:



  Hi, using scoped_session(sessionmaker()) to create my sessions, if I
  hammer a
  request (using Pylons) by repeatedly clicking on a link that uses the
  ORM
  somewhat extensively for the relevant request, it seems that another
  thread is
  getting involved with SQLAlchemy internals and pulling the rug out
  from under
  its feet.

 that means you are sharing a mapped instance between threads.    A  
 mapped instance, when associated with a Session (i.e. not detatched),  
 should be considered as an extension of that Session's internal  
 state.  The Session isn't threadsafe so you can't share a persistent  
 instance between threads.  If you are using some kind of persistent/
 cached instances, make sure they are detatched from their original  
 Session first, or merge() the instances in to the Session local to the  
 request before using them (you can send the dont_load=True flag to  
 merge() if you want to cut down on potentially needless SELECT  
 statements).

Okay, thanks - I'll track down the problem with that in mind.



  And here's how I'm dealing with creating the sessions:

  threadlocal = threading.local()

  Session = scoped_session(sessionmaker(autocommit=True))
  Session.metadata = None

  def setup_db():
     if hasattr(threadlocal, 'engine'):
         return
     uri = config['main.engine.dburi']
     threadlocal.engine = create_engine(uri)
     Session.configure(bind=threadlocal.engine)
     if Session.metadata is None:
         Session.metadata = MetaData(threadlocal.engine)
         model.initialise(Session.metadata)

 the threading.local() is unnecessary...unless you are planning for the  
 same application to be run with different .ini files in each thread  
 which would be extremely unusual.    scoped_session() already handles  
 the thread local part for you as far as Sessions are concerned, and  
 Engine objects are threadsafe.

Ah, so engine = create_engine(...) will provide separate connections
as needed, rather than just a single connection ? The reason I wrote
this code was because the original code we had was causing big
problems - we were repeatedly getting QueuePool limit of size 30
overflow 10 reached, connection timed out, timeout 30 errors, so what
I was hoping to do here was limit the connections to one per thread,
but thinking about it I suppose this code doesn't achieve that at all,
as any code can create a new connection with engine.connect()

I'm going to have to track down what bits of code in SQLAlchemy
implicitly create new connections so I can figure out where we're not
closing them - hopefully autocommit=True will remedy this somewhat.

Anyway, thanks a lot for the info. :-)

 Session = scoped_session(sessionmaker(autocommit=True))
 metadata = None

 def setup_db():
      global metadata
      if metadata is not None:
          return
     uri = config['main.engine.dburi']
     engine = create_engine(uri)
     Session.configure(bind=engine)
     metadata = MetaData(engine)
     model.initialise(metadata)

 if OTOH you had some reason for the threadlocal engines, then you dont  
 want to use Session.configure, which configures the whole  
 scoped_session().  You'd want to say  
 Session(bind=my_threadlocal_engine).
--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Re: Possible bug in orm/state.py

2009-05-26 Thread Bob Farrell



On May 26, 3:31 pm, Michael Bayer mike...@zzzcomputing.com wrote:
 Bob Farrell wrote:

  Hi Michael, I found this (your writing) in a thread from quite a while
  back:

  A common pattern which can cause what you see there is if your
  templates are accessing lazy loaders on objects, but the Session which
  loaded those objects had been clear()'ed or otherwise closed before
  the template was allowed to render.  You should try to keep the same
  Session open throughout the full request lifecycle, and you should
  avoid removing any objects from the Session which you wish to continue
  using.

  Hi - came across this because I've started getting the same problem
  and was somewhat relieved to see that the solution is a well-known one
  (given that rendering the template raises this error). I have code
  like this:

          [do stuff involving the session]
          return self.render_response('settings.mako', t_pars)

  in my controller methods and it's the return where the error gets
  raised. Can you tell me a good approach for making the session stay
  alive here ? I'm assuming the problem is that self.render_response
  returns something lazy and so by the time the template actually
  renders the objects relating to the session have gone out of scope.

 the whole request is wrapped within a block that handles Session
 lifecycle, so that the Session is still just fine when render_response is
 being called.  In Pylons, an appropriate base.py is provided for you which
 does this.    A description is 
 athttp://www.sqlalchemy.org/docs/05/session.html#lifespan-of-a-contextu...
 .

 The Session has a behavior whereby after a commit(), it expires its
 contents.  This so that it reloads everything upon access to get access to
 what any concurrent transactions have done.   If your pattern is something
 like this:

    Session.commit()
    return self.render_response(...)

 your render phase may issue a lot of SQL to reload things (though nothing
 should break).    Two ways to work around this are to set
 expire_on_commit=False in your sessionmaker(), or to wrap your whole
 controller method in a commit, such as:

 @commits_transaction
 def my_method(self):
     do stuff
     return self.render_response(...)

 commits_transaction looks like:

 @decorator
 def commits_transaction(fn, self, *args, **kw):
     try:
         ret = fn(self, *args, **kw)
         Session.commit()
         return ret
     except:
         Session.rollback()
         raise

 the rollback() may not be needed if your overall handler calls rollback()
 in all cases.

Great, thanks very much - looks like gutting our horrible connection-
handling code and moving to scoped_session is really making things
better and this should (hopefully) be the last problem needing ironing
out. Shouldn't be a problem given what you mentioned above. You have,
as ever, been a tremendous help. :-)
--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Changes in exc.py causing problems.

2009-05-26 Thread Bob Farrell

Hi hi.

£ diff SQLAlchemy-0.5.2-py2.5.egg/sqlalchemy/exc.py SQLAlchemy-0.5.4p1-
py2.5.egg/sqlalchemy/exc.py
134a135,139
 if len(self.params)  10:
 return ' '.join((SQLAlchemyError.__str__(self),
  repr(self.statement),
  repr(self.params[:2]),
  '... and a total of %i bound parameters' % 
 len(self.params)))

This change is resulting in this problem:

  File /home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/
prospectspace/commands/pspatch.py, line 1473, in create_user
print e
  File /home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/SQLAlchemy-0.5.4p1-py2.5.egg/sqlalchemy/
exc.py, line 138, in __str__
repr(self.params[:2]),
TypeError: unhashable type

Which is a little confusing, not sure why any hashing attempt is
happening there - I'll investigate it further and send a patch
tomorrow, unless something blaringly obvious stands out to you as to
what's causing this.

Cheers,
--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Possible bug in orm/state.py

2009-05-23 Thread Bob Farrell

Hi, using scoped_session(sessionmaker()) to create my sessions, if I
hammer a
request (using Pylons) by repeatedly clicking on a link that uses the
ORM
somewhat extensively for the relevant request, it seems that another
thread is
getting involved with SQLAlchemy internals and pulling the rug out
from under
its feet. Here's the change I made to stop the exception from
happening, but
we're just wondering if I've done something else wrong or if this is
an actual
bug in SQLAlchemy:

Index: state.py
===
--- state.py(revision 5974)
+++ state.py(working copy)
@@ -170,9 +170,14 @@
 attr.impl.key in self.expired_attributes and
 attr.impl.key in unmodified
 ])
-for k in self.expired_attributes:
-self.callables.pop(k, None)
-del self.expired_attributes
+try:
+for k in self.expired_attributes:
+self.callables.pop(k, None)
+del self.expired_attributes
+except AttributeError:
+# XXX: self.expired_attributes can be del'ed by another
thread
+# which raises an AttributeError here
+pass
 return ATTR_WAS_SET

 @property

Here's the original traceback before the change:

File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/
prospectspace/controllers/company.py', line 206 in index
  return self.render_response('company.mako', t_pars)
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/
prospectspace/lib/base.py', line 372 in render_response
  page = tmpl.render(**kargs)
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/Mako-0.1.8-py2.5.egg/mako/template.py', line
114 in render
  return runtime._render(self, self.callable_, args, data)
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/Mako-0.1.8-py2.5.egg/mako/runtime.py', line
287 in _render
  _render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/Mako-0.1.8-py2.5.egg/mako/runtime.py', line
304 in _render_context
  _exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/Mako-0.1.8-py2.5.egg/mako/runtime.py', line
337 in _exec_template
  callable_(context, *args, **kwargs)
File 'prospectmaster_mako', line 61 in render_body
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/SQLAlchemy-0.5.4p1-py2.5.egg/sqlalchemy/orm/
attributes.py', line 158 in __get__
  return self.impl.get(instance_state(instance), instance_dict
(instance))
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/SQLAlchemy-0.5.4p1-py2.5.egg/sqlalchemy/orm/
attributes.py', line 374 in get
  value = callable_()
File '/home/bob/src/prospectspace/branches/sqlalchemy-eon-merge/lib/
python2.5/site-packages/SQLAlchemy-0.5.4p1-py2.5.egg/sqlalchemy/orm/
state.py', line 175 in __call__
  del self.expired_attributes
AttributeError: expired_attributes

And here's how I'm dealing with creating the sessions:

threadlocal = threading.local()

Session = scoped_session(sessionmaker(autocommit=True))
Session.metadata = None

def setup_db():
if hasattr(threadlocal, 'engine'):
return
uri = config['main.engine.dburi']
threadlocal.engine = create_engine(uri)
Session.configure(bind=threadlocal.engine)
if Session.metadata is None:
Session.metadata = MetaData(threadlocal.engine)
model.initialise(Session.metadata)


And then each request does this:

setup_db()
environ['dbsession'] = Session()

The reason for including this bit is because I'm not convinced I'm
doing it
correctly, so I want to make sure that, if I am doing it wrong, this
isn't
what's causing the problem in SQLAlchemy, i.e. it may not be a bug at
all.

Please let me know if you need any more code/info - thanks a lot for
any help.

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Extended Query subclass with add_named_column method

2009-03-31 Thread Bob Farrell

Hello, SQLAlchemy people,

So I spoke to jek on IRC to see if there was a way to use add_column
without causing the query to return a RowTuple and it doesn't look
like there is, so I wrote this:

class AdditiveQuery(Query):

Extended sqlalchemy.orm.Query class with add_named_column method
to add a
column onto a query but store the values on the main RowProxy
object
instead of yielding RowTuples as add_column does:

q = AdditiveQuery(FooModel, session=some_session)
q = q.join((BarModel, FooModel.x == BarModel.x))
q = q.add_named_column(BarModel.baz, 'bar_baz')
for row in q:
print q.bar_baz

def __init__(self, *args, **kwargs):
self._named_columns = []
super(AdditiveQuery, self).__init__(*args, **kwargs)

def add_named_column(self, column, alias=None):
if alias is None:
alias = column.key
if alias in self._named_columns:
raise ValueError(Alias %s already in use. % (alias,))
self._named_columns.append(alias)
return self.add_column(column)

def __iter__(self):
def g(it):
checked = False
for rows in it:
row = rows[0]
rest = rows[1:]
for alias, value in zip(self._named_columns, rest):
if not checked and hasattr(row, alias):
raise ValueError(Alias %s already exists on

original row object. % (alias,))
setattr(row, alias, value)
yield row
checked = True
it = super(AdditiveQuery, self).__iter__()
return g(it)

The only immediate issue I can think of with this is that if you do
AdditiveQuery(FooModel, BarModel)  then this is going to blow up as
it'll try to call setattr on a RowTuple - I'm not sure whether to a)
just let this happen, b) take preventative measures and raise a more
useful exception, c) allow specifying exactly which entity to attach
the named_column onto.

Any suggestions welcome and, of course, if any of the SA boys want to
stick this into SQLAlchemy, you're more than welcome.

Thanks !

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Re: Extended Query subclass with add_named_column method

2009-03-31 Thread Bob Farrell

Yes, if this has been defined on the mapper. Generally I do a lot of
just using SQLAlchemy to as a way of writing SQL in Python code that
can be passed around for dynamically building up queries so my mappers
tend to be quite bare - or are you suggesting this can be done without
configuring the relation on the mapper ? If there's a way to do this
in SQLAlchemy that doesn't need extra mapper config then I'm all
ears. :-)

On Mar 31, 3:22 pm, Michael Bayer mike...@zzzcomputing.com wrote:
 wouldn't this be accomplished more simply using contains_eager() ?

 On Mar 31, 2009, at 9:12 AM, Bob Farrell wrote:



  Hello, SQLAlchemy people,

  So I spoke to jek on IRC to see if there was a way to use add_column
  without causing the query to return a RowTuple and it doesn't look
  like there is, so I wrote this:

  class AdditiveQuery(Query):
     
     Extended sqlalchemy.orm.Query class with add_named_column method
  to add a
     column onto a query but store the values on the main RowProxy
  object
     instead of yielding RowTuples as add_column does:

     q = AdditiveQuery(FooModel, session=some_session)
     q = q.join((BarModel, FooModel.x == BarModel.x))
     q = q.add_named_column(BarModel.baz, 'bar_baz')
     for row in q:
         print q.bar_baz
     
     def __init__(self, *args, **kwargs):
         self._named_columns = []
         super(AdditiveQuery, self).__init__(*args, **kwargs)

     def add_named_column(self, column, alias=None):
         if alias is None:
             alias = column.key
         if alias in self._named_columns:
             raise ValueError(Alias %s already in use. % (alias,))
         self._named_columns.append(alias)
         return self.add_column(column)

     def __iter__(self):
         def g(it):
             checked = False
             for rows in it:
                 row = rows[0]
                 rest = rows[1:]
                 for alias, value in zip(self._named_columns, rest):
                     if not checked and hasattr(row, alias):
                         raise ValueError(Alias %s already exists on
  
                             original row object. % (alias,))
                     setattr(row, alias, value)
                 yield row
                 checked = True
         it = super(AdditiveQuery, self).__iter__()
         return g(it)

  The only immediate issue I can think of with this is that if you do
  AdditiveQuery(FooModel, BarModel)  then this is going to blow up as
  it'll try to call setattr on a RowTuple - I'm not sure whether to a)
  just let this happen, b) take preventative measures and raise a more
  useful exception, c) allow specifying exactly which entity to attach
  the named_column onto.

  Any suggestions welcome and, of course, if any of the SA boys want to
  stick this into SQLAlchemy, you're more than welcome.

  Thanks !
--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Re: Textual SQL

2009-02-02 Thread Bob Farrell

On Mon, Feb 02, 2009 at 09:56:15AM -0800, Pavel Skvazh wrote:
 
 Session.execute('INSERT INTO SOMETHING SOMETHING / DELETE/ UPDATE')
 
 Do I have to call Session.commit() after this or it's already taken
 care of? In other words does the literal sql statements follow the
 session transaction rules or they act on there own?
 
sess.execute() will execute whatever you pass it immediately.

 And since this works and worked for me for a long time now, what's the
 benefit of from sqlalchemy.sql import text that I noticed in the docs
 lately?
 
Using text() creates a ClauseElement that you can whack together with other
constructs. See the docs here for more info:
http://www.sqlalchemy.org/docs/05/sqlexpression.html#using-text

 Thanks!
  

-- 
--
Bob Farrell
pH, an Experian Company
www.phgroup.com
Office Line: 020 7598 0310
Fax: 020 7598 0311
--

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Postgres and count() weirdness

2009-01-29 Thread Bob Farrell

Hi,

query.count().scalar()

This code works fine with sqlite and Oracle, but with PG it goes nuts and
complains that an explicit AS must be used in a subselect, i.e. it's doing
this:

select count(*) as blah from (select ...)

But PG wants this:

select count(*) as blah from (select ...) as boom

Doing this fixes the problem:

query.alias().count().scalar()

But to me this seems like a deficiency in the dialect - shouldn't the AS be
added automagically ?

 sqlalchemy.__version__
'0.5.0rc4'

It's no biggie but seems worth pointing out in case you weren't aware of it.

Thanks. :-)
-- 
--
Bob Farrell
pH, an Experian Company
www.phgroup.com
Office Line: 020 7598 0310
Fax: 020 7598 0311
--

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to 
sqlalchemy+unsubscr...@googlegroups.com
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---



[sqlalchemy] Re: Orphans not deleted using cascade parameter

2008-08-28 Thread Bob Farrell
On Wed, Aug 27, 2008 at 12:53:38PM -0700, Alex Mathieu wrote:
 
 Thanks Michael, I'll have a look over this !!
 
 Bob, thanks also for your help, however, I'm not able to use the
 code... maybe the indention is wrong here or I don't know... I was
 able to execute the function, but even by putting a print as the first
 line of the function, nothing got printed out, weird... (maybe my lack
 of skills using python, yet :P)
 
Alex,

Weird. I've attached it here so maybe that will make things work.
-- 
--
Bob Farrell
pH, an Experian Company
www.phgroup.com
Office Line: 020 7598 0310
Fax: 020 7598 0311
--

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to [EMAIL PROTECTED]
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---

def delete_cascade(orm_obj):
Perform a cascading delete on any ORM object and its children.
# Since we take an ORM _object_, we need to discover its table:
obj_table = class_mapper(type(orm_obj)).mapped_table
def get_child_tables(parent_table, children=[]):
Recursively find all child tables.
new_children = []
# Use SQLAlchemy's table_iterator reversed to give us the tables in the
# correct order to ensure that we can delete without breaking any constraints
# (i.e. we will not delete a parent before its child:
for table in obj_table.metadata.table_iterator(reverse=True):
for fk in table.foreign_keys:
if fk.references(parent_table) and \
(table, fk, parent_table) not in children:
new_children.append((table, fk, parent_table))
break
# If no new children are found we have reached the top of the recursion so we
# fall back down the stack:
if not new_children:
return []
else:
for child in new_children:
# Here is the recursive call:
children.extend(get_child_tables(child[0]))
children.extend(new_children)
return children
_children = get_child_tables(obj_table)
children = []
# This loop filters out any tables who have more than one foreign key where one
# of the foreign keys references the root node so we have no duplicates. The
# result is a list of tables that reference either the root node or their
# parent:
for child in _children:
if child[0] not in [x[0] for x in children]:
children.append(child)
elif child[1].references(obj_table):
for i, _child in enumerate(children):
if _child[0] == child[0]:
children[i] = child
break
# This is a rare-case optimisation that sees if any of the tables reference the
# root node indirectly by having a foreign key whose counterpart is a direct
# reference to the root node:
for child in children:
table, fk, parent_table = child
if not fk.references(obj_table):
parent_fk = fk.column.foreign_key
while parent_fk is not None:
if parent_fk.references(obj_table):
obj_column = (
parent_fk.column.key
)
break
parent_fk = parent_fk.column.foreign_key
# Finally build a select for grandchildren or later to establish which records
# need to be removed by seeing which of their parent's records are ancestors of
# the root node:
if parent_fk is None:
sel = select([fk.parent])
parent_fk = fk.column.foreign_key
while parent_fk is not None:
sel.append_whereclause(
parent_fk.parent==parent_fk.column
)
tmp = parent_fk.column.foreign_key
if tmp is not None:
parent_fk = tmp
else:
break
obj_column = (
parent_fk.column.key
)
sel.append_whereclause(
parent_fk.column==getattr(orm_obj, obj_column)
)
in_column = fk.column.key
yield delete(
fk.parent.table,
fk.parent.in_(sel)
)
continue
# Otherwise simply yield a delete statement to delete the first-generation
# child of the root node:
else:
obj_column = fk.column.key
yield delete(
table,
fk.parent==getattr(orm_obj, obj_column)
)
# Build the delete statement for the root node itself by introspectively
# discovering the primary keys

[sqlalchemy] Re: Orphans not deleted using cascade parameter

2008-08-27 Thread Bob Farrell

On Wed, Aug 27, 2008 at 07:48:20AM -0700, Alex Mathieu wrote:
 
 F*ck... I just realized that I was using  MyISAM table engine...
 here's the deal then... I cannot use InnoDB for this projet so I
 think I will be writing some recursive code that can determine if an
 object has childs dependencies and will delete the proper objects
 
 thanks again =)
 

I posted this on the ML a while ago - one of the SA devs suggested a use case
for it would be your current situation so here it is again. Let me know if you
use it and have any problems.

def delete_cascade(orm_obj):
Perform a cascading delete on any ORM object and its children.
# Since we take an ORM _object_, we need to discover its table:
obj_table = class_mapper(type(orm_obj)).mapped_table
def get_child_tables(parent_table, children=[]):
Recursively find all child tables.
new_children = []
# Use SQLAlchemy's table_iterator reversed to give us the tables in the
# correct order to ensure that we can delete without breaking any constraints
# (i.e. we will not delete a parent before its child:
for table in obj_table.metadata.table_iterator(reverse=True):
for fk in table.foreign_keys:
if fk.references(parent_table) and \
(table, fk, parent_table) not in children:
new_children.append((table, fk, parent_table))
break
# If no new children are found we have reached the top of the recursion so we
# fall back down the stack:
if not new_children:
return []
else:
for child in new_children:
# Here is the recursive call:
children.extend(get_child_tables(child[0]))
children.extend(new_children)
return children
_children = get_child_tables(obj_table)
children = []
# This loop filters out any tables who have more than one foreign key where one
# of the foreign keys references the root node so we have no duplicates. The
# result is a list of tables that reference either the root node or their
# parent:
for child in _children:
if child[0] not in [x[0] for x in children]:
children.append(child)
elif child[1].references(obj_table):
for i, _child in enumerate(children):
if _child[0] == child[0]:
children[i] = child
break
# This is a rare-case optimisation that sees if any of the tables reference the
# root node indirectly by having a foreign key whose counterpart is a direct
# reference to the root node:
for child in children:
table, fk, parent_table = child
if not fk.references(obj_table):
parent_fk = fk.column.foreign_key
while parent_fk is not None:
if parent_fk.references(obj_table):
obj_column = (
parent_fk.column.key
)
break
parent_fk = parent_fk.column.foreign_key
# Finally build a select for grandchildren or later to establish which records
# need to be removed by seeing which of their parent's records are ancestors of
# the root node:
if parent_fk is None:
sel = select([fk.parent])
parent_fk = fk.column.foreign_key
while parent_fk is not None:
sel.append_whereclause(
parent_fk.parent==parent_fk.column
)
tmp = parent_fk.column.foreign_key
if tmp is not None:
parent_fk = tmp
else:
break
obj_column = (
parent_fk.column.key
)
sel.append_whereclause(
parent_fk.column==getattr(orm_obj, obj_column)
)
in_column = fk.column.key
yield delete(
fk.parent.table,
fk.parent.in_(sel)
)
continue
# Otherwise simply yield a delete statement to delete the first-generation
# child of the root node:
else:
obj_column = fk.column.key
yield delete(
table,
fk.parent==getattr(orm_obj, obj_column)
)
# Build the delete statement for the root node itself by introspectively
# discovering the primary keys of the root node's table and deleting a 
# single record from this table (i.e. the root node):
pk = [getattr(orm_obj, x) for x in obj_table.primary_key.keys()]
pk_cols = [x for x in obj_table.c if x.primary_key]
cond = pk[0] == pk_cols[0]
for x, y in zip(pk[1:], pk_cols[1:]):
if x and y:
cond = x == y
yield delete(
obj_table,
cond
)


-- 
--
Bob Farrell
pH, an Experian Company
www.phgroup.com
Office Line: 020 7598 0310
Fax: 020 7598

[sqlalchemy] Cascading delete

2008-08-21 Thread Bob Farrell
Hi there,

I spoke with zzzeek_ on IRC yesterday re: some code I'd written for an
introspective cascading delete function. We were previously using the ORM to do
this for us but, due to the way it works, it was taking several minutes to
delete large amounts of second-generation orphans. The code I've written
recursively gets all the tables involved, analyses the relationships and issues
as few delete statements as possible (I hope). I've attached the code, or
there's a link to it here:
http://paste.pocoo.org/show/82878/

With parent-child relationships my tests are showing this working much, much
faster than the ORM (by a factor of at least 100x). I haven't had a chance to
set up more complicated tables to fully test the recursive aspect of it but it
has worked with what I've given it so far.

I'm really enjoying working with SQLAlchemy, you guys have done a really good
job. If you think there's room for something like this in SA then it's all
yours. :-)

Cheers,
-- 
--
Bob Farrell
pH, an Experian Company
www.phgroup.com
Office Line: 020 7598 0310
Fax: 020 7598 0311
--

--~--~-~--~~~---~--~~
You received this message because you are subscribed to the Google Groups 
sqlalchemy group.
To post to this group, send email to sqlalchemy@googlegroups.com
To unsubscribe from this group, send email to [EMAIL PROTECTED]
For more options, visit this group at 
http://groups.google.com/group/sqlalchemy?hl=en
-~--~~~~--~~--~--~---

from sqlalchemy import class_mapper, select
from sqlalchemy.sql import delete

def delete_cascade(orm_obj):
Perform a cascading delete on any ORM object and its children.
# Since we take an ORM _object_, we need to discover its table:
obj_table = class_mapper(type(orm_obj)).mapped_table
def get_child_tables(parent_table, children=[]):
Recursively find all child tables.
new_children = []
# Use SQLAlchemy's table_iterator reversed to give us the tables in the
# correct order to ensure that we can delete without breaking any constraints
# (i.e. we will not delete a parent before its child:
for table in obj_table.metadata.table_iterator(reverse=True):
for fk in table.foreign_keys:
if fk.references(parent_table) and \
(table, fk, parent_table) not in children:
new_children.append((table, fk, parent_table))
break
# If no new children are found we have reached the top of the recursion so we
# fall back down the stack:
if not new_children:
return []
else:
for child in new_children:
# Here is the recursive call:
children.extend(get_child_tables(child[0]))
children.extend(new_children)
return children
_children = get_child_tables(obj_table)
children = []
# This loop filters out any tables who have more than one foreign key where one
# of the foreign keys references the root node so we have no duplicates. The
# result is a list of tables that reference either the root node or their
# parent:
for child in _children:
if child[0] not in [x[0] for x in children]:
children.append(child)
elif child[1].references(obj_table):
for i, _child in enumerate(children):
if _child[0] == child[0]:
children[i] = child
break
# This is a rare-case optimisation that sees if any of the tables reference the
# root node indirectly by having a foreign key whose counterpart is a direct
# reference to the root node:
for child in children:
table, fk, parent_table = child
if not fk.references(obj_table):
parent_fk = fk.column.foreign_key
while parent_fk is not None:
if parent_fk.references(obj_table):
obj_column = (
parent_fk.column.key
)
break
parent_fk = parent_fk.column.foreign_key
# Finally build a select for grandchildren or later to establish which records
# need to be removed by seeing which of their parent's records are ancestors of
# the root node:
if parent_fk is None:
sel = select([fk.parent])
parent_fk = fk.column.foreign_key
while parent_fk is not None:
sel.append_whereclause(
parent_fk.parent==parent_fk.column
)
tmp = parent_fk.column.foreign_key
if tmp is not None:
parent_fk = tmp
else:
break
obj_column = (
parent_fk.column.key
)
sel.append_whereclause(
parent_fk.column==getattr(orm_obj, obj_column