Author: avg
Date: Tue Apr  3 07:52:06 2018
New Revision: 331910
URL: https://svnweb.freebsd.org/changeset/base/331910

Log:
  MFC r327056: Use resume_cpus() instead of restart_cpus() to resume from ACPI 
suspension.
  
  The merge needed fixing because common x86 code changed by the original
  commit is still split between i386 and amd64 in this branch.

Modified:
  stable/10/sys/amd64/amd64/mp_machdep.c
  stable/10/sys/i386/i386/mp_machdep.c
  stable/10/sys/kern/subr_smp.c
  stable/10/sys/sys/smp.h
  stable/10/sys/x86/acpica/acpi_wakeup.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/amd64/amd64/mp_machdep.c
==============================================================================
--- stable/10/sys/amd64/amd64/mp_machdep.c      Tue Apr  3 07:31:22 2018        
(r331909)
+++ stable/10/sys/amd64/amd64/mp_machdep.c      Tue Apr  3 07:52:06 2018        
(r331910)
@@ -138,6 +138,9 @@ extern int pmap_pcid_enabled;
 
 static volatile cpuset_t ipi_nmi_pending;
 
+volatile cpuset_t resuming_cpus;
+volatile cpuset_t toresume_cpus;
+
 /* used to hold the AP's until we are ready to release them */
 static struct mtx ap_boot_mtx;
 
@@ -1486,6 +1489,13 @@ cpususpend_handler(void)
                fpususpend(susppcbs[cpu]->sp_fpususpend);
                wbinvd();
                CPU_SET_ATOMIC(cpu, &suspended_cpus);
+               /*
+                * Hack for xen, which does not use resumectx() so never
+                * uses the next clause: set resuming_cpus early so that
+                * resume_cpus() can wait on the same bitmap for acpi and
+                * xen.  resuming_cpus now means eventually_resumable_cpus.
+                */
+               CPU_SET_ATOMIC(cpu, &resuming_cpus);
        } else {
                fpuresume(susppcbs[cpu]->sp_fpususpend);
                pmap_init_pat();
@@ -1493,12 +1503,12 @@ cpususpend_handler(void)
                PCPU_SET(switchtime, 0);
                PCPU_SET(switchticks, ticks);
 
-               /* Indicate that we are resumed */
+               /* Indicate that we are resuming */
                CPU_CLR_ATOMIC(cpu, &suspended_cpus);
        }
 
-       /* Wait for resume */
-       while (!CPU_ISSET(cpu, &started_cpus))
+       /* Wait for resume directive */
+       while (!CPU_ISSET(cpu, &toresume_cpus))
                ia32_pause();
 
        if (cpu_ops.cpu_resume)
@@ -1510,9 +1520,10 @@ cpususpend_handler(void)
        mca_resume();
        lapic_setup(0);
 
-       CPU_CLR_ATOMIC(cpu, &started_cpus);
        /* Indicate that we are resumed */
+       CPU_CLR_ATOMIC(cpu, &resuming_cpus);
        CPU_CLR_ATOMIC(cpu, &suspended_cpus);
+       CPU_CLR_ATOMIC(cpu, &toresume_cpus);
 }
 
 /*

Modified: stable/10/sys/i386/i386/mp_machdep.c
==============================================================================
--- stable/10/sys/i386/i386/mp_machdep.c        Tue Apr  3 07:31:22 2018        
(r331909)
+++ stable/10/sys/i386/i386/mp_machdep.c        Tue Apr  3 07:52:06 2018        
(r331910)
@@ -178,6 +178,9 @@ struct cpu_ops cpu_ops = {
 
 static volatile cpuset_t ipi_nmi_pending;
 
+volatile cpuset_t resuming_cpus;
+volatile cpuset_t toresume_cpus;
+
 /* used to hold the AP's until we are ready to release them */
 static struct mtx ap_boot_mtx;
 
@@ -1537,6 +1540,13 @@ cpususpend_handler(void)
                npxsuspend(susppcbs[cpu]->sp_fpususpend);
                wbinvd();
                CPU_SET_ATOMIC(cpu, &suspended_cpus);
+               /*
+                * Hack for xen, which does not use resumectx() so never
+                * uses the next clause: set resuming_cpus early so that
+                * resume_cpus() can wait on the same bitmap for acpi and
+                * xen.  resuming_cpus now means eventually_resumable_cpus.
+                */
+               CPU_SET_ATOMIC(cpu, &resuming_cpus);
        } else {
                npxresume(susppcbs[cpu]->sp_fpususpend);
                pmap_init_pat();
@@ -1544,12 +1554,12 @@ cpususpend_handler(void)
                PCPU_SET(switchtime, 0);
                PCPU_SET(switchticks, ticks);
 
-               /* Indicate that we are resumed */
+               /* Indicate that we are resuming */
                CPU_CLR_ATOMIC(cpu, &suspended_cpus);
        }
 
-       /* Wait for resume */
-       while (!CPU_ISSET(cpu, &started_cpus))
+       /* Wait for resume directive */
+       while (!CPU_ISSET(cpu, &toresume_cpus))
                ia32_pause();
 
        if (cpu_ops.cpu_resume)
@@ -1560,8 +1570,9 @@ cpususpend_handler(void)
        lapic_setup(0);
 
        /* Indicate that we are resumed */
+       CPU_CLR_ATOMIC(cpu, &resuming_cpus);
        CPU_CLR_ATOMIC(cpu, &suspended_cpus);
-       CPU_CLR_ATOMIC(cpu, &started_cpus);
+       CPU_CLR_ATOMIC(cpu, &toresume_cpus);
 }
 
 /*

Modified: stable/10/sys/kern/subr_smp.c
==============================================================================
--- stable/10/sys/kern/subr_smp.c       Tue Apr  3 07:31:22 2018        
(r331909)
+++ stable/10/sys/kern/subr_smp.c       Tue Apr  3 07:52:06 2018        
(r331910)
@@ -331,13 +331,18 @@ generic_restart_cpus(cpuset_t map, u_int type)
 
 #if defined(__amd64__) || defined(__i386__)
        if (type == IPI_SUSPEND)
-               cpus = &suspended_cpus;
+               cpus = &resuming_cpus;
        else
 #endif
                cpus = &stopped_cpus;
 
        /* signal other cpus to restart */
-       CPU_COPY_STORE_REL(&map, &started_cpus);
+#if defined(__amd64__) || defined(__i386__)
+       if (type == IPI_SUSPEND)
+               CPU_COPY_STORE_REL(&map, &toresume_cpus);
+       else
+#endif
+               CPU_COPY_STORE_REL(&map, &started_cpus);
 
        /* wait for each to clear its bit */
        while (CPU_OVERLAP(cpus, &map))

Modified: stable/10/sys/sys/smp.h
==============================================================================
--- stable/10/sys/sys/smp.h     Tue Apr  3 07:31:22 2018        (r331909)
+++ stable/10/sys/sys/smp.h     Tue Apr  3 07:52:06 2018        (r331910)
@@ -72,10 +72,13 @@ struct cpu_group *smp_topo_find(struct cpu_group *top,
 
 extern void (*cpustop_restartfunc)(void);
 extern int smp_cpus;
-extern volatile cpuset_t started_cpus;
-extern volatile cpuset_t stopped_cpus;
-extern volatile cpuset_t suspended_cpus;
-extern cpuset_t hlt_cpus_mask;
+/* The suspend/resume cpusets are x86 only, but minimize ifdefs. */
+extern volatile cpuset_t resuming_cpus;        /* woken up cpus in suspend pen 
*/
+extern volatile cpuset_t started_cpus; /* cpus to let out of stop pen */
+extern volatile cpuset_t stopped_cpus; /* cpus in stop pen */
+extern volatile cpuset_t suspended_cpus; /* cpus [near] sleeping in susp pen */
+extern volatile cpuset_t toresume_cpus;        /* cpus to let out of suspend 
pen */
+extern cpuset_t hlt_cpus_mask;         /* XXX 'mask' is detail in old impl */
 extern cpuset_t logical_cpus_mask;
 #endif /* SMP */
 

Modified: stable/10/sys/x86/acpica/acpi_wakeup.c
==============================================================================
--- stable/10/sys/x86/acpica/acpi_wakeup.c      Tue Apr  3 07:31:22 2018        
(r331909)
+++ stable/10/sys/x86/acpica/acpi_wakeup.c      Tue Apr  3 07:52:06 2018        
(r331910)
@@ -278,7 +278,7 @@ acpi_wakeup_machdep(struct acpi_softc *sc, int state, 
 
 #ifdef SMP
                if (!CPU_EMPTY(&suspcpus))
-                       restart_cpus(suspcpus);
+                       resume_cpus(suspcpus);
 #endif
                mca_resume();
 #ifdef __amd64__
_______________________________________________
svn-src-stable-10@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-stable-10
To unsubscribe, send any mail to "svn-src-stable-10-unsubscr...@freebsd.org"

Reply via email to