On Thu, May 13, 2021 at 11:08:25AM -0500, Scott Cheloha wrote:
> > On May 13, 2021, at 10:57 AM, Visa Hankala <v...@hankala.org> wrote:
> > 
> > On Thu, May 13, 2021 at 12:04:57AM -0500, Scott Cheloha wrote:
> >> The funky locking dance in softclock() and softclock_thread() is
> >> needed to keep from violating the locking hierarchy.  The
> >> timeout_mutex is above the kernel lock, so we need to leave the
> >> timeout_mutex, then drop the kernel lock, and then reenter the
> >> timeout_mutex to start running TIMEOUT_MPSAFE timeouts.
> > 
> > Are you sure that dropping the kernel lock in softclock(), in
> > non-process context, is a good idea? That makes assumptions how the
> > layers above work.
> 
> I figured it was the most obvious way forward.
> 
> When you say "layers above," do you mean softintr_dispatch()?
> Or something else?

I mean softintr_dispatch() and anything else in the dispatch path.

> > At the minimum, I think the soft interrupt code has to be changed so
> > that it is possible to register MP-safe handlers.
> 
> Some platforms (e.g. arm64) have softintr_establish_flags(), but this
> is not universally available.
> 
> Do I need to unlock all the softintr internals before proceeding
> with this?  Or do I just need to bring softrintr_establish_flags()
> to every platform without it?

I think softrintr_establish() could register an MP-safe handler if
IPL_MPSAFE is ORed in the ipl parameter.

Dropping the kernel lock has various implications. At least the code
has to ensure that at most one CPU can run a given handler at a time.
In addition, softintr_disestablish() should probably block until the
handler has stopped (at the moment, this is not guaranteed on the
platforms that have softintr_establish_flags()).

Below is a hasty sketch for amd64 that I am not suggesting to commit.
It is probably wrong in more ways than one.

Index: arch/amd64/amd64/softintr.c
===================================================================
RCS file: src/sys/arch/amd64/amd64/softintr.c,v
retrieving revision 1.10
diff -u -p -r1.10 softintr.c
--- arch/amd64/amd64/softintr.c 11 Sep 2020 09:27:09 -0000      1.10
+++ arch/amd64/amd64/softintr.c 13 May 2021 17:28:51 -0000
@@ -37,6 +37,7 @@
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/malloc.h>
+#include <sys/smr.h>
 
 #include <machine/intr.h>
 
@@ -85,7 +86,6 @@ softintr_dispatch(int which)
        floor = ci->ci_handled_intr_level;
        ci->ci_handled_intr_level = ci->ci_ilevel;
 
-       KERNEL_LOCK();
        for (;;) {
                mtx_enter(&si->softintr_lock);
                sih = TAILQ_FIRST(&si->softintr_q);
@@ -95,14 +95,20 @@ softintr_dispatch(int which)
                }
                TAILQ_REMOVE(&si->softintr_q, sih, sih_q);
                sih->sih_pending = 0;
-
-               uvmexp.softs++;
-
                mtx_leave(&si->softintr_lock);
 
-               (*sih->sih_fn)(sih->sih_arg);
+               atomic_inc_int(&uvmexp.softs);
+
+               if (sih->sih_flags & IPL_MPSAFE) {
+                       mtx_enter(&sih->sih_mtx);
+                       (*sih->sih_fn)(sih->sih_arg);
+                       mtx_leave(&sih->sih_mtx);
+               } else {
+                       KERNEL_LOCK();
+                       (*sih->sih_fn)(sih->sih_arg);
+                       KERNEL_UNLOCK();
+               }
        }
-       KERNEL_UNLOCK();
 
        ci->ci_handled_intr_level = floor;
 }
@@ -117,7 +123,10 @@ softintr_establish(int ipl, void (*func)
 {
        struct x86_soft_intr *si;
        struct x86_soft_intrhand *sih;
-       int which;
+       int flags, which;
+
+       flags = ipl & IPL_MPSAFE;
+       ipl &= ~IPL_MPSAFE;
 
        switch (ipl) {
        case IPL_SOFTCLOCK:
@@ -141,9 +150,11 @@ softintr_establish(int ipl, void (*func)
 
        sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT);
        if (__predict_true(sih != NULL)) {
+               mtx_init(&sih->sih_mtx, IPL_NONE);
                sih->sih_intrhead = si;
                sih->sih_fn = func;
                sih->sih_arg = arg;
+               sih->sih_flags = flags;
                sih->sih_pending = 0;
        }
        return (sih);
@@ -167,5 +178,8 @@ softintr_disestablish(void *arg)
        }
        mtx_leave(&si->softintr_lock);
 
+       /* Wait for any running handlers to finish. */
+       smr_barrier();
+
        free(sih, M_DEVBUF, sizeof(*sih));
 }
Index: arch/amd64/include/intr.h
===================================================================
RCS file: src/sys/arch/amd64/include/intr.h,v
retrieving revision 1.32
diff -u -p -r1.32 intr.h
--- arch/amd64/include/intr.h   17 Jun 2020 06:14:52 -0000      1.32
+++ arch/amd64/include/intr.h   13 May 2021 17:28:51 -0000
@@ -232,13 +232,16 @@ extern void (*ipifunc[X86_NIPI])(struct 
 
 #ifndef _LOCORE
 #include <sys/queue.h>
+#include <sys/mutex.h>
 
 struct x86_soft_intrhand {
        TAILQ_ENTRY(x86_soft_intrhand)
                sih_q;
+       struct mutex sih_mtx;
        struct x86_soft_intr *sih_intrhead;
        void    (*sih_fn)(void *);
        void    *sih_arg;
+       int     sih_flags;
        int     sih_pending;
 };
 

Reply via email to