Hello,

    Squid is sending POST requests on reused pinned connections, and
some of those requests fail due to a pconn race, with no possibility for
a retry.

When using SslBump, the HTTP request is always forwarded using a server
connection "pinned" to the HTTP client connection. Squid does not reuse
a persistent connection from the idle pconn pool for bumped client
requests. Squid uses the dedicated pinned server connection instead.
This bypasses pconn race controls even though Squid may be essentially
reusing an idle HTTP connection and, hence, may experience the same kind
of race conditions.

However, connections that were just pinned, without sending any
requests, are not "essentially reused idle pconns" so we must be careful
to allow unretriable requests on freshly pinned connections.

The same logic applies to pinned connection outside SslBump.

The code assumes that the connection is always pinned before any HTTP
requests are sent on it. This is true for SslBump, but I do not know
whether it is true for other transactions.


The patch is based on Squid v3.3. If approved, I will adjust it
(including removal of HERE) for trunk.


The patch fixes the bug in my tests. Please review


Thank you,

Alex.
Do not send unretriable requests on reused pinned connections if possible.

When using SslBump, the HTTP request is always forwarded using a server
connection "pinned" to the HTTP client connection. Squid does not reuse a
persistent connection from the idle pconn pool for bumped client requests.
Squid uses the dedicated pinned server connection instead. This bypasses pconn
race controls even though we may be essentially reusing an idle HTTP
connection and, hence, may experience the same kind of race conditions.

The same logic applies to any other pinned connection. However, connections
that were just pinned, without sending any requests, are not "essentially
reused idle pconns" so we must be careful to allow unretriable requests on
freshly pinned connections.

The code assumes that the connection is always pinned before any HTTP requests
are sent on it. This is true for SslBump, but I do not know whether it is true
for other transactions.

=== modified file 'src/client_side.cc'
--- src/client_side.cc	2012-11-16 15:36:07 +0000
+++ src/client_side.cc	2012-11-30 22:02:20 +0000
@@ -4246,40 +4246,41 @@
     /*
      * hack for ident ACL. It needs to get full addresses, and a place to store
      * the ident result on persistent connections...
      */
     /* connection oriented auth also needs these two lines for it's operation. */
     return ch;
 }
 
 CBDATA_CLASS_INIT(ConnStateData);
 
 ConnStateData::ConnStateData() :
         AsyncJob("ConnStateData"),
 #if USE_SSL
         sslBumpMode(Ssl::bumpEnd),
         switchedToHttps_(false),
         sslServerBump(NULL),
 #endif
         stoppedSending_(NULL),
         stoppedReceiving_(NULL)
 {
+    pinning.useCount = 0;
     pinning.pinned = false;
     pinning.auth = false;
 }
 
 bool
 ConnStateData::transparent() const
 {
     return clientConnection != NULL && (clientConnection->flags & (COMM_TRANSPARENT|COMM_INTERCEPTION));
 }
 
 bool
 ConnStateData::reading() const
 {
     return reader != NULL;
 }
 
 void
 ConnStateData::stopReading()
 {
     if (reading()) {
@@ -4422,40 +4423,41 @@
     // callback is called for the failed connection.
     if (pinning.serverConnection == io.conn) {
         pinning.closeHandler = NULL; // Comm unregisters handlers before calling
         unpinConnection();
     }
 }
 
 void
 ConnStateData::pinConnection(const Comm::ConnectionPointer &pinServer, HttpRequest *request, CachePeer *aPeer, bool auth)
 {
     char desc[FD_DESC_SZ];
 
     if (Comm::IsConnOpen(pinning.serverConnection)) {
         if (pinning.serverConnection->fd == pinServer->fd)
             return;
     }
 
     unpinConnection(); // closes pinned connection, if any, and resets fields
 
     pinning.serverConnection = pinServer;
+    pinning.useCount = 0; // assume that this is an unused connection
 
     debugs(33, 3, HERE << pinning.serverConnection);
 
     // when pinning an SSL bumped connection, the request may be NULL
     const char *pinnedHost = "[unknown]";
     if (request) {
         pinning.host = xstrdup(request->GetHost());
         pinning.port = request->port;
         pinnedHost = pinning.host;
     } else {
         pinning.port = pinServer->remote.GetPort();
     }
     pinning.pinned = true;
     if (aPeer)
         pinning.peer = cbdataReference(aPeer);
     pinning.auth = auth;
     char stmp[MAX_IPSTRLEN];
     snprintf(desc, FD_DESC_SZ, "%s pinned connection for %s (%d)",
              (auth || !aPeer) ? pinnedHost : aPeer->name,
              clientConnection->remote.ToURL(stmp,MAX_IPSTRLEN),
@@ -4485,42 +4487,50 @@
     }
     if (request && pinning.port != request->port) {
         valid = false;
     }
     if (pinning.peer && !cbdataReferenceValid(pinning.peer)) {
         valid = false;
     }
     if (aPeer != pinning.peer) {
         valid = false;
     }
 
     if (!valid) {
         /* The pinning info is not safe, remove any pinning info */
         unpinConnection();
     }
 
     return pinning.serverConnection;
 }
 
 void
+ConnStateData::usePinnedConnection()
+{
+    ++pinning.useCount;
+    debugs(33, 7, HERE << "using " << pinning.serverConnection <<
+           " * " << pinning.useCount);
+}
+
+void
 ConnStateData::unpinConnection()
 {
-    debugs(33, 3, HERE << pinning.serverConnection);
+    debugs(33, 3, HERE << pinning.serverConnection << " * " << pinning.useCount);
 
     if (pinning.peer)
         cbdataReferenceDone(pinning.peer);
 
     if (Comm::IsConnOpen(pinning.serverConnection)) {
         if (pinning.closeHandler != NULL) {
             comm_remove_close_handler(pinning.serverConnection->fd, pinning.closeHandler);
             pinning.closeHandler = NULL;
         }
         /// also close the server side socket, we should not use it for any future requests...
         // TODO: do not close if called from our close handler?
         pinning.serverConnection->close();
     }
 
     safe_free(pinning.host);
 
     /* NOTE: pinning.pinned should be kept. This combined with fd == -1 at the end of a request indicates that the host
      * connection has gone away */
 }

=== modified file 'src/client_side.h'
--- src/client_side.h	2012-10-04 00:23:44 +0000
+++ src/client_side.h	2012-11-30 21:19:50 +0000
@@ -244,40 +244,41 @@
     Auth::UserRequest::Pointer auth_user_request;
 #endif
 
     /**
      * used by the owner of the connection, opaque otherwise
      * TODO: generalise the connection owner concept.
      */
     ClientSocketContext::Pointer currentobject;
 
     Ip::Address log_addr;
     int nrequests;
 
     struct {
         bool readMore; ///< needs comm_read (for this request or new requests)
         bool swanSang; // XXX: temporary flag to check proper cleanup
     } flags;
     struct {
         Comm::ConnectionPointer serverConnection; /* pinned server side connection */
         char *host;             /* host name of pinned connection */
         int port;               /* port of pinned connection */
+        int useCount;           ///< number of requests sent on the connection
         bool pinned;             /* this connection was pinned */
         bool auth;               /* pinned for www authentication */
         CachePeer *peer;             /* CachePeer the connection goes via */
         AsyncCall::Pointer closeHandler; /*The close handler for pinned server side connection*/
     } pinning;
 
     AnyP::PortCfg *port;
 
     bool transparent() const;
     bool reading() const;
     void stopReading(); ///< cancels comm_read if it is scheduled
 
     /// true if we stopped receiving the request
     const char *stoppedReceiving() const { return stoppedReceiving_; }
     /// true if we stopped sending the response
     const char *stoppedSending() const { return stoppedSending_; }
     /// note request receiving error and close as soon as we write the response
     void stopReceiving(const char *error);
     /// note response sending error and close as soon as we read the request
     void stopSending(const char *error);
@@ -290,40 +291,42 @@
 
     bool handleReadData(char *buf, size_t size);
     bool handleRequestBodyData();
 
     /**
      * Correlate the current ConnStateData object with the pinning_fd socket descriptor.
      */
     void pinConnection(const Comm::ConnectionPointer &pinServerConn, HttpRequest *request, CachePeer *peer, bool auth);
     /**
      * Decorrelate the ConnStateData object from its pinned CachePeer
      */
     void unpinConnection();
     /**
      * Checks if there is pinning info if it is valid. It can close the server side connection
      * if pinned info is not valid.
      \param request   if it is not NULL also checks if the pinning info refers to the request client side HttpRequest
      \param CachePeer      if it is not NULL also check if the CachePeer is the pinning CachePeer
      \return          The details of the server side connection (may be closed if failures were present).
      */
     const Comm::ConnectionPointer validatePinnedConnection(HttpRequest *request, const CachePeer *peer);
+    /// call this just before sending a request on the pinned connection
+    void usePinnedConnection();
     /**
      * returts the pinned CachePeer if exists, NULL otherwise
      */
     CachePeer *pinnedPeer() const {return pinning.peer;}
     bool pinnedAuth() const {return pinning.auth;}
 
     // pining related comm callbacks
     void clientPinnedConnectionClosed(const CommCloseCbParams &io);
 
     // comm callbacks
     void clientReadRequest(const CommIoCbParams &io);
     void connStateClosed(const CommCloseCbParams &io);
     void requestTimeout(const CommTimeoutCbParams &params);
 
     // AsyncJob API
     virtual bool doneAll() const { return BodyProducer::doneAll() && false;}
     virtual void swanSong();
 
     /// Changes state so that we close the connection and quit after serving
     /// the client-side-detected error response instead of getting stuck.

=== modified file 'src/forward.cc'
--- src/forward.cc	2012-11-16 15:40:52 +0000
+++ src/forward.cc	2012-11-30 22:09:19 +0000
@@ -1137,60 +1137,73 @@
 
     /* calculate total forwarding timeout ??? */
     int ftimeout = Config.Timeout.forward - (squid_curtime - start_t);
     if (ftimeout < 0)
         ftimeout = 5;
 
     if (ftimeout < ctimeout)
         ctimeout = ftimeout;
 
     if (serverDestinations[0]->getPeer() && request->flags.sslBumped) {
         debugs(50, 4, "fwdConnectStart: Ssl bumped connections through parrent proxy are not allowed");
         ErrorState *anErr = new ErrorState(ERR_CANNOT_FORWARD, HTTP_SERVICE_UNAVAILABLE, request);
         fail(anErr);
         self = NULL; // refcounted
         return;
     }
 
     request->flags.pinned = 0; // XXX: what if the ConnStateData set this to flag existing credentials?
     // XXX: answer: the peer selection *should* catch it and give us only the pinned peer. so we reverse the =0 step below.
     // XXX: also, logs will now lie if pinning is broken and leads to an error message.
+    // TODO: move into its own method before committing but after review
     if (serverDestinations[0]->peerType == PINNED) {
         ConnStateData *pinned_connection = request->pinnedConnection();
         // pinned_connection may become nil after a pconn race
-        if (pinned_connection)
+        if (pinned_connection) {
+            if (pinned_connection->pinning.useCount > 0)
+                pconnRace = racePossible; // this is a used connection
+
+            // reuse a connection if it is safe or if we have no other choice
+            if (pconnRace != racePossible || // unused connections are safe
+                checkRetriable() || // we can retry if there is a race
+                !request->flags.canRePin) { // we have no other choice
             serverConn = pinned_connection->validatePinnedConnection(request, serverDestinations[0]->getPeer());
+            } else {
+                debugs(17,5, "will not reuse pinned connection: " << pinned_connection);
+                pinned_connection->unpinConnection();
+                serverConn = NULL;
+            }
+        }
         else
             serverConn = NULL;
         if (Comm::IsConnOpen(serverConn)) {
             flags.connected_okay = true;
 #if 0
             if (!serverConn->getPeer())
                 serverConn->peerType = HIER_DIRECT;
 #endif
             ++n_tries;
             request->flags.pinned = 1;
             if (pinned_connection->pinnedAuth())
                 request->flags.auth = 1;
             comm_add_close_handler(serverConn->fd, fwdServerClosedWrapper, this);
-            // the server may close the pinned connection before this request
-            pconnRace = racePossible;
+            pinned_connection->usePinnedConnection();
             dispatch();
             return;
         }
         /* Failure. Fall back on next path unless we can re-pin */
         debugs(17,2,HERE << "Pinned connection failed: " << pinned_connection);
         if (pconnRace != raceHappened || !request->flags.canRePin) {
             serverDestinations.shift();
             pconnRace = raceImpossible;
             startConnectionOrFail();
             return;
         }
         debugs(17,3, HERE << "There was a pconn race. Will try to repin.");
         // and fall through to regular handling
     }
 
     // Use pconn to avoid opening a new connection.
     const char *host;
     if (serverDestinations[0]->getPeer()) {
         host = serverDestinations[0]->getPeer()->host;
     } else {

Reply via email to