Author: mav
Date: Sun Dec 29 17:10:21 2019
New Revision: 356178
URL: https://svnweb.freebsd.org/changeset/base/356178

Log:
  Fix GEOM_MOUNTVER orphanization.
  
  Previous code closed and detached consumer even with I/O still in progress.
  This patch adds locking and request counting to postpone the close till
  the last of running requests completes.
  
  MFC after:    2 weeks
  Sponsored by: iXsystems, Inc.

Modified:
  head/sys/geom/mountver/g_mountver.c

Modified: head/sys/geom/mountver/g_mountver.c
==============================================================================
--- head/sys/geom/mountver/g_mountver.c Sun Dec 29 15:53:55 2019        
(r356177)
+++ head/sys/geom/mountver/g_mountver.c Sun Dec 29 17:10:21 2019        
(r356178)
@@ -85,14 +85,29 @@ struct g_class g_mountver_class = {
 };
 
 static void
+g_mountver_detach(void *arg, int flags __unused)
+{
+       struct g_consumer *cp = arg;
+
+       g_topology_assert();
+       if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
+               g_access(cp, -cp->acr, -cp->acw, -cp->ace);
+       g_detach(cp);
+}
+
+static void
 g_mountver_done(struct bio *bp)
 {
+       struct g_mountver_softc *sc;
        struct g_geom *gp;
+       struct g_consumer *cp;
        struct bio *pbp;
 
+       cp = bp->bio_from;
+       gp = cp->geom;
        if (bp->bio_error != ENXIO) {
                g_std_done(bp);
-               return;
+               goto done;
        }
 
        /*
@@ -101,32 +116,45 @@ g_mountver_done(struct bio *bp)
         * gets called.  To work around that, we have to queue requests
         * that failed with ENXIO, in order to send them later.
         */
-       gp = bp->bio_from->geom;
-
        pbp = bp->bio_parent;
        KASSERT(pbp->bio_to == LIST_FIRST(&gp->provider),
            ("parent request was for someone else"));
        g_destroy_bio(bp);
        pbp->bio_inbed++;
        g_mountver_queue(pbp);
+
+done:
+       sc = gp->softc;
+       mtx_lock(&sc->sc_mtx);
+       if (--cp->index == 0 && sc->sc_orphaned)
+               g_post_event(g_mountver_detach, cp, M_NOWAIT, NULL);
+       mtx_unlock(&sc->sc_mtx);
 }
 
+/*
+ * Send the BIO down.  The function is called with sc_mtx held to cover
+ * the race with orphan, but drops it before external calls.
+ */
 static void
-g_mountver_send(struct bio *bp)
+g_mountver_send(struct g_geom *gp, struct bio *bp)
 {
-       struct g_geom *gp;
+       struct g_mountver_softc *sc = gp->softc;
+       struct g_consumer *cp;
        struct bio *cbp;
 
-       gp = bp->bio_to->geom;
-
+       mtx_assert(&sc->sc_mtx, MA_OWNED);
        cbp = g_clone_bio(bp);
        if (cbp == NULL) {
+               mtx_unlock(&sc->sc_mtx);
                g_io_deliver(bp, ENOMEM);
                return;
        }
+       cp = LIST_FIRST(&gp->consumer);
+       cp->index++;
+       mtx_unlock(&sc->sc_mtx);
 
        cbp->bio_done = g_mountver_done;
-       g_io_request(cbp, LIST_FIRST(&gp->consumer));
+       g_io_request(cbp, cp);
 }
 
 static void
@@ -152,10 +180,12 @@ g_mountver_send_queued(struct g_geom *gp)
        sc = gp->softc;
 
        mtx_lock(&sc->sc_mtx);
-       while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) {
+       while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL && !sc->sc_orphaned) {
                TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
                G_MOUNTVER_LOGREQ(bp, "Sending queued request.");
-               g_mountver_send(bp);
+               /* sc_mtx is dropped inside */
+               g_mountver_send(gp, bp);
+               mtx_lock(&sc->sc_mtx);
        }
        mtx_unlock(&sc->sc_mtx);
 }
@@ -171,8 +201,10 @@ g_mountver_discard_queued(struct g_geom *gp)
        mtx_lock(&sc->sc_mtx);
        while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) {
                TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue);
+               mtx_unlock(&sc->sc_mtx);
                G_MOUNTVER_LOGREQ(bp, "Discarding queued request.");
                g_io_deliver(bp, ENXIO);
+               mtx_lock(&sc->sc_mtx);
        }
        mtx_unlock(&sc->sc_mtx);
 }
@@ -192,7 +224,9 @@ g_mountver_start(struct bio *bp)
         * orphaning didn't happen yet.  In that case, queue all subsequent
         * requests in order to maintain ordering.
         */
+       mtx_lock(&sc->sc_mtx);
        if (sc->sc_orphaned || !TAILQ_EMPTY(&sc->sc_queue)) {
+               mtx_unlock(&sc->sc_mtx);
                if (sc->sc_shutting_down) {
                        G_MOUNTVER_LOGREQ(bp, "Discarding request due to 
shutdown.");
                        g_io_deliver(bp, ENXIO);
@@ -204,7 +238,8 @@ g_mountver_start(struct bio *bp)
                        g_mountver_send_queued(gp);
        } else {
                G_MOUNTVER_LOGREQ(bp, "Sending request.");
-               g_mountver_send(bp);
+               /* sc_mtx is dropped inside */
+               g_mountver_send(gp, bp);
        }
 }
 
@@ -466,14 +501,17 @@ static void
 g_mountver_orphan(struct g_consumer *cp)
 {
        struct g_mountver_softc *sc;
+       int done;
 
        g_topology_assert();
 
        sc = cp->geom->softc;
+       mtx_lock(&sc->sc_mtx);
        sc->sc_orphaned = 1;
-       if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
-               g_access(cp, -cp->acr, -cp->acw, -cp->ace);
-       g_detach(cp);
+       done = (cp->index == 0);
+       mtx_unlock(&sc->sc_mtx);
+       if (done)
+               g_mountver_detach(cp, 0);
        G_MOUNTVER_DEBUG(0, "%s is offline.  Mount verification in progress.", 
sc->sc_provider_name);
 }
 
@@ -571,8 +609,8 @@ g_mountver_taste(struct g_class *mp, struct g_provider
                        return (NULL);
                }
        }
-       g_mountver_send_queued(gp);
        sc->sc_orphaned = 0;
+       g_mountver_send_queued(gp);
        G_MOUNTVER_DEBUG(0, "%s has completed mount verification.", 
sc->sc_provider_name);
 
        return (gp);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to