Author: markj
Date: Thu Dec 12 02:43:24 2019
New Revision: 355644
URL: https://svnweb.freebsd.org/changeset/base/355644

Log:
  Rename tdq_ipipending and clear it in sched_switch().
  
  This fixes a regression after r355311.  Specifically, sched_preempt()
  may trigger a context switch by calling thread_lock(), since
  thread_lock() calls critical_exit() in its slow path and the interrupted
  thread may have already been marked for preemption.  This would happen
  before tdq_ipipending is cleared, blocking further preemption IPIs.  The
  CPU can be left in this state indefinitely if the interrupted thread
  migrates.
  
  Rename tdq_ipipending to tdq_owepreempt.  Any switch satisfies a remote
  preemption request, so clear tdq_owepreempt in sched_switch() instead of
  sched_preempt() to avoid subtle problems of the sort described above.
  
  Reviewed by:  jeff, kib
  Sponsored by: The FreeBSD Foundation
  Differential Revision:        https://reviews.freebsd.org/D22758

Modified:
  head/sys/kern/sched_ule.c

Modified: head/sys/kern/sched_ule.c
==============================================================================
--- head/sys/kern/sched_ule.c   Thu Dec 12 02:42:27 2019        (r355643)
+++ head/sys/kern/sched_ule.c   Thu Dec 12 02:43:24 2019        (r355644)
@@ -244,7 +244,7 @@ struct tdq {
        volatile short  tdq_switchcnt;          /* Switches this tick. */
        volatile short  tdq_oldswitchcnt;       /* Switches last tick. */
        u_char          tdq_lowpri;             /* Lowest priority thread. */
-       u_char          tdq_ipipending;         /* IPI pending. */
+       u_char          tdq_owepreempt;         /* Remote preemption pending. */
        u_char          tdq_idx;                /* Current insert index. */
        u_char          tdq_ridx;               /* Current removal index. */
        int             tdq_id;                 /* cpuid. */
@@ -1073,7 +1073,7 @@ tdq_notify(struct tdq *tdq, struct thread *td)
        int pri;
        int cpu;
 
-       if (tdq->tdq_ipipending)
+       if (tdq->tdq_owepreempt)
                return;
        cpu = td_get_sched(td)->ts_cpu;
        pri = td->td_priority;
@@ -1096,7 +1096,12 @@ tdq_notify(struct tdq *tdq, struct thread *td)
                if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
                        return;
        }
-       tdq->tdq_ipipending = 1;
+
+       /*
+        * The run queues have been updated, so any switch on the remote CPU
+        * will satisfy the preemption request.
+        */
+       tdq->tdq_owepreempt = 1;
        ipi_cpu(cpu, IPI_PREEMPT);
 }
 
@@ -2079,8 +2084,10 @@ sched_switch(struct thread *td, struct thread *newtd, 
            (flags & SW_PREEMPT) != 0;
        td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
        td->td_owepreempt = 0;
+       tdq->tdq_owepreempt = 0;
        if (!TD_IS_IDLETHREAD(td))
                tdq->tdq_switchcnt++;
+
        /*
         * The lock pointer in an idle thread should never change.  Reset it
         * to CAN_RUN as well.
@@ -2386,7 +2393,6 @@ sched_preempt(struct thread *td)
        thread_lock(td);
        tdq = TDQ_SELF();
        TDQ_LOCK_ASSERT(tdq, MA_OWNED);
-       tdq->tdq_ipipending = 0;
        if (td->td_priority > tdq->tdq_lowpri) {
                int flags;
 
@@ -2397,6 +2403,8 @@ sched_preempt(struct thread *td)
                        mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL);
                else
                        mi_switch(flags | SWT_REMOTEPREEMPT, NULL);
+       } else {
+               tdq->tdq_owepreempt = 0;
        }
        thread_unlock(td);
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to