commit 06d3ba47ad300110d7ef55b7dd2ba5b615b469a4
Author: Paweł Sikora <pawel.sik...@agmk.net>
Date:   Wed Mar 6 20:01:01 2013 +0100

    one more autofs/nfs related fix, release 3.

 kernel-small_fixes.patch | 149 +++++++++++++++++++++++++++++++++++++++++++++++
 kernel.spec              |   2 +-
 2 files changed, 150 insertions(+), 1 deletion(-)
---
diff --git a/kernel.spec b/kernel.spec
index 5813ba1..1f9d665 100644
--- a/kernel.spec
+++ b/kernel.spec
@@ -66,7 +66,7 @@
 %define                have_pcmcia     0
 %endif
 
-%define                rel             2
+%define                rel             3
 %define                basever         3.7
 %define                postver         .10
 
diff --git a/kernel-small_fixes.patch b/kernel-small_fixes.patch
index 95b9948..9a09e76 100644
--- a/kernel-small_fixes.patch
+++ b/kernel-small_fixes.patch
@@ -795,3 +795,152 @@ Signed-off-by: Greg Kroah-Hartman 
<gre...@linuxfoundation.org>
  
        mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
  }
+From cc630d9f476445927fca599f81182c7f06f79058 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfie...@redhat.com>
+Date: Sun, 10 Feb 2013 16:08:11 -0500
+Subject: svcrpc: fix rpc server shutdown races
+
+From: "J. Bruce Fields" <bfie...@redhat.com>
+
+commit cc630d9f476445927fca599f81182c7f06f79058 upstream.
+
+Rewrite server shutdown to remove the assumption that there are no
+longer any threads running (no longer true, for example, when shutting
+down the service in one network namespace while it's still running in
+others).
+
+Do that by doing what we'd do in normal circumstances: just CLOSE each
+socket, then enqueue it.
+
+Since there may not be threads to handle the resulting queued xprts,
+also run a simplified version of the svc_recv() loop run by a server to
+clean up any closed xprts afterwards.
+
+Tested-by: Jason Tibbitts <ti...@math.uh.edu>
+Tested-by: Paweł Sikora <pawel.sik...@agmk.net>
+Acked-by: Stanislav Kinsbursky <skinsbur...@parallels.com>
+Signed-off-by: J. Bruce Fields <bfie...@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
+
+---
+ net/sunrpc/svc.c      |    9 -------
+ net/sunrpc/svc_xprt.c |   57 
++++++++++++++++++++++++++++----------------------
+ 2 files changed, 32 insertions(+), 34 deletions(-)
+
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
+ 
+ void svc_shutdown_net(struct svc_serv *serv, struct net *net)
+ {
+-      /*
+-       * The set of xprts (contained in the sv_tempsocks and
+-       * sv_permsocks lists) is now constant, since it is modified
+-       * only by accepting new sockets (done by service threads in
+-       * svc_recv) or aging old ones (done by sv_temptimer), or
+-       * configuration changes (excluded by whatever locking the
+-       * caller is using--nfsd_mutex in the case of nfsd).  So it's
+-       * safe to traverse those lists and shut everything down:
+-       */
+       svc_close_net(serv, net);
+ 
+       if (serv->sv_shutdown)
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -948,21 +948,24 @@ void svc_close_xprt(struct svc_xprt *xpr
+ }
+ EXPORT_SYMBOL_GPL(svc_close_xprt);
+ 
+-static void svc_close_list(struct svc_serv *serv, struct list_head 
*xprt_list, struct net *net)
++static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, 
struct net *net)
+ {
+       struct svc_xprt *xprt;
++      int ret = 0;
+ 
+       spin_lock(&serv->sv_lock);
+       list_for_each_entry(xprt, xprt_list, xpt_list) {
+               if (xprt->xpt_net != net)
+                       continue;
++              ret++;
+               set_bit(XPT_CLOSE, &xprt->xpt_flags);
+-              set_bit(XPT_BUSY, &xprt->xpt_flags);
++              svc_xprt_enqueue(xprt);
+       }
+       spin_unlock(&serv->sv_lock);
++      return ret;
+ }
+ 
+-static void svc_clear_pools(struct svc_serv *serv, struct net *net)
++static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net 
*net)
+ {
+       struct svc_pool *pool;
+       struct svc_xprt *xprt;
+@@ -977,42 +980,46 @@ static void svc_clear_pools(struct svc_s
+                       if (xprt->xpt_net != net)
+                               continue;
+                       list_del_init(&xprt->xpt_ready);
++                      spin_unlock_bh(&pool->sp_lock);
++                      return xprt;
+               }
+               spin_unlock_bh(&pool->sp_lock);
+       }
++      return NULL;
+ }
+ 
+-static void svc_clear_list(struct svc_serv *serv, struct list_head 
*xprt_list, struct net *net)
++static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
+ {
+       struct svc_xprt *xprt;
+-      struct svc_xprt *tmp;
+-      LIST_HEAD(victims);
+-
+-      spin_lock(&serv->sv_lock);
+-      list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+-              if (xprt->xpt_net != net)
+-                      continue;
+-              list_move(&xprt->xpt_list, &victims);
+-      }
+-      spin_unlock(&serv->sv_lock);
+ 
+-      list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
++      while ((xprt = svc_dequeue_net(serv, net))) {
++              set_bit(XPT_CLOSE, &xprt->xpt_flags);
+               svc_delete_xprt(xprt);
++      }
+ }
+ 
++/*
++ * Server threads may still be running (especially in the case where the
++ * service is still running in other network namespaces).
++ *
++ * So we shut down sockets the same way we would on a running server, by
++ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
++ * the close.  In the case there are no such other threads,
++ * threads running, svc_clean_up_xprts() does a simple version of a
++ * server's main event loop, and in the case where there are other
++ * threads, we may need to wait a little while and then check again to
++ * see if they're done.
++ */
+ void svc_close_net(struct svc_serv *serv, struct net *net)
+ {
+-      svc_close_list(serv, &serv->sv_tempsocks, net);
+-      svc_close_list(serv, &serv->sv_permsocks, net);
++      int delay = 0;
++
++      while (svc_close_list(serv, &serv->sv_permsocks, net) +
++             svc_close_list(serv, &serv->sv_tempsocks, net)) {
+ 
+-      svc_clear_pools(serv, net);
+-      /*
+-       * At this point the sp_sockets lists will stay empty, since
+-       * svc_xprt_enqueue will not add new entries without taking the
+-       * sp_lock and checking XPT_BUSY.
+-       */
+-      svc_clear_list(serv, &serv->sv_tempsocks, net);
+-      svc_clear_list(serv, &serv->sv_permsocks, net);
++              svc_clean_up_xprts(serv, net);
++              msleep(delay++);
++      }
+ }
+ 
+ /*
================================================================

---- gitweb:

http://git.pld-linux.org/gitweb.cgi/packages/kernel.git/commitdiff/06d3ba47ad300110d7ef55b7dd2ba5b615b469a4

_______________________________________________
pld-cvs-commit mailing list
pld-cvs-commit@lists.pld-linux.org
http://lists.pld-linux.org/mailman/listinfo/pld-cvs-commit

Reply via email to