From: Magnus Karlsson <magnus.karls...@intel.com>

Set_affinity now returns an error when called from the hotplug
path. This will be in place until we implement a way to migrate
interrupts between clusters during a hotplug operation. Note that
set_affinity from user space works as before.

Signed-off-by: Magnus Karlsson <magnus.karls...@intel.com>
---
 arch/arm/mach-axxia/axxia-gic.c |  135 ++++++++++++++++++++++-----------------
 1 file changed, 77 insertions(+), 58 deletions(-)

diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c
index 9d3ac12..a7656a7 100644
--- a/arch/arm/mach-axxia/axxia-gic.c
+++ b/arch/arm/mach-axxia/axxia-gic.c
@@ -497,6 +497,37 @@ static void gic_clr_affinity_remote(void *info)
        _gic_set_affinity(rpc->d, rpc->mask_val, true);
 }
 
+static bool on_same_cluster(u32 pcpu1, u32 pcpu2)
+{
+       return pcpu1 / CORES_PER_CLUSTER == pcpu2 / CORES_PER_CLUSTER;
+}
+
+static int exec_remote_set_affinity(bool clr_affinity, u32 cpu,
+                                   struct irq_data *d,
+                                   const struct cpumask *mask_val,
+                                   bool force)
+{
+       int ret = IRQ_SET_MASK_OK;
+
+       if (force == false) {
+               gic_rpc_data.d = d;
+               gic_rpc_data.mask_val = mask_val;
+               if (clr_affinity == true) {
+                       gic_rpc_data.oldcpu = cpu;
+                       gic_rpc_data.func_mask |= CLR_AFFINITY;
+               } else {
+                       gic_rpc_data.cpu = cpu;
+                       gic_rpc_data.func_mask |= SET_AFFINITY;
+               }
+
+       } else {
+               pr_warn("Set affinity for hotplug not implemented.\n");
+               return -ENOSYS;
+       }
+
+       return ret;
+}
+
 static int gic_set_affinity(struct irq_data *d,
                            const struct cpumask *mask_val,
                            bool force)
@@ -504,6 +535,7 @@ static int gic_set_affinity(struct irq_data *d,
        unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
        u32 pcpu = cpu_logical_map(smp_processor_id());
        unsigned int irqid = gic_irq(d);
+       int ret = IRQ_SET_MASK_OK;
 
        BUG_ON(!irqs_disabled());
 
@@ -529,14 +561,14 @@ static int gic_set_affinity(struct irq_data *d,
         * cluster as the cpu we're currently running on, set the IRQ
         * affinity directly. Otherwise, use the RPC mechanism.
         */
-       if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) ==
-               (pcpu / CORES_PER_CLUSTER)) {
+       if (on_same_cluster(cpu_logical_map(cpu), pcpu))
                _gic_set_affinity(d, mask_val, false);
-       } else {
-               gic_rpc_data.func_mask |= SET_AFFINITY;
-               gic_rpc_data.cpu = cpu;
-               gic_rpc_data.d = d;
-               gic_rpc_data.mask_val = mask_val;
+       else{
+               ret = exec_remote_set_affinity(false, cpu, d, mask_val,
+                                              force);
+
+               if (ret != IRQ_SET_MASK_OK)
+                       return ret;
        }
 
        /*
@@ -544,21 +576,28 @@ static int gic_set_affinity(struct irq_data *d,
         * different than the prior cluster, clear the IRQ affinity
         * on the old cluster.
         */
-       if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) !=
-               (irq_cpuid[irqid] / CORES_PER_CLUSTER))) {
+       if (!on_same_cluster(cpu_logical_map(cpu), irq_cpuid[irqid]) &&
+           (irqid != IRQ_PMU)) {
                /*
                 * If old cpu assignment falls within the same cluster as
                 * the cpu we're currently running on, clear the IRQ affinity
                 * directly. Otherwise, use RPC mechanism.
                 */
-               if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
-                       (pcpu / CORES_PER_CLUSTER)) {
+               if (on_same_cluster(irq_cpuid[irqid], pcpu))
                        _gic_set_affinity(d, mask_val, true);
-               } else {
-                       gic_rpc_data.func_mask |= CLR_AFFINITY;
-                       gic_rpc_data.oldcpu = irq_cpuid[irqid];
-                       gic_rpc_data.d = d;
-                       gic_rpc_data.mask_val = mask_val;
+               else
+                       ret = exec_remote_set_affinity(true,
+                                     get_logical_index(irq_cpuid[irqid]), d,
+                                     mask_val, force);
+               if (ret != IRQ_SET_MASK_OK) {
+                       /* Need to back out the set operation */
+                       if (on_same_cluster(cpu_logical_map(cpu), pcpu))
+                               _gic_set_affinity(d, mask_val, true);
+                       else
+                               exec_remote_set_affinity(true, cpu, d,
+                                                        mask_val, force);
+
+                       return ret;
                }
        }
 
@@ -1085,6 +1124,7 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
        cpumask |= cpumask << 16;
 
        writel_relaxed(0, base + GIC_DIST_CTRL);
+
        /*
         * Set all global interrupts to be level triggered, active low.
         */
@@ -1106,8 +1146,6 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
 
-
-       /*################################# TARGET 
####################################*/
        /*
         * Disable all interrupts.  Leave the PPI and SGIs alone
         * as these enables are banked registers.
@@ -1119,26 +1157,26 @@ static void __cpuinit gic_dist_init(struct 
gic_chip_data *gic)
        /*
         * Set Axxia IPI interrupts for all CPUs in this cluster.
         */
+
        for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) {
                cpumask_8 = 1 << ((i - IPI0_CPU0) % 4);
                writeb_relaxed(cpumask_8, base + GIC_DIST_TARGET + i);
-       }
-
-       /*
-        * Set the PMU IRQ to the first cpu in this cluster.
-        */
-       writeb_relaxed(0x01, base + GIC_DIST_TARGET + IRQ_PMU);
+               }
 
-       /*
-        * Set Axxia IPI interrupts to be edge triggered.
-        */
+               /*
+                * Set the PMU IRQ to the first cpu in this cluster.
+                */
+               writeb_relaxed(0x01, base + GIC_DIST_TARGET + IRQ_PMU);
 
-       for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) {
-               confmask = 0x2 << ((i % 16) * 2);
-               confoff = (i / 16) * 4;
-               val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
-               val |= confmask;
-               writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+               /*
+                * Set Axxia IPI interrupts to be edge triggered.
+                */
+               for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) {
+                       confmask = 0x2 << ((i % 16) * 2);
+                       confoff = (i / 16) * 4;
+                       val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+                       val |= confmask;
+                       writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
        }
 
        /*
@@ -1151,7 +1189,7 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
                enablemask = 1 << (i % 32);
                enableoff = (i / 32) * 4;
                writel_relaxed(enablemask,
-                               base + GIC_DIST_ENABLE_SET + enableoff);
+                              base + GIC_DIST_ENABLE_SET + enableoff);
        }
 
        /*
@@ -1164,7 +1202,7 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
        writel_relaxed(1, base + GIC_DIST_CTRL);
 }
 
-static void  gic_cpu_init(struct gic_chip_data *gic)
+static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
 {
 
        void __iomem *dist_base = gic_data_dist_base(gic);
@@ -1178,34 +1216,14 @@ static void  gic_cpu_init(struct gic_chip_data *gic)
         */
        writel_relaxed(0xffffffff, dist_base + GIC_DIST_ENABLE_CLEAR);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       if (!cluster_power_up[cluster]) {
-#endif
-               writel_relaxed(0, dist_base + GIC_DIST_CTRL);
-               for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) {
-                       cpumask_8 = 1 << ((i - IPI0_CPU0) % 4);
-                       enablemask = 1 << (i % 32);
-                       enableoff = (i / 32) * 4;
-                       ccpu = (cluster * 4) + ((i - IPI0_CPU0) % 
CORES_PER_CLUSTER);
-                       if (ccpu == cpu) {
-                               writeb_relaxed(cpumask_8, dist_base + 
GIC_DIST_TARGET + i);
-                               writel_relaxed(enablemask, dist_base + 
GIC_DIST_ENABLE_SET + enableoff);
-                       }
-               }
-               writel_relaxed(1, dist_base + GIC_DIST_CTRL);
-#ifdef CONFIG_HOTPLUG_CPU
-       }
-#endif
-
        /*
-        * Set priority on PPI and SGI interrupts
-        */
+       * Set priority on PPI and SGI interrupts
+       */
        for (i = 0; i < 32; i += 4)
                writel_relaxed(0xa0a0a0a0,
                                dist_base + GIC_DIST_PRI + i * 4 / 4);
 
        writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
-
        writel_relaxed(1, base + GIC_CPU_CTRL);
 
 }
@@ -1416,13 +1434,14 @@ void __cpuinit axxia_gic_secondary_init(void)
        gic_dist_init(gic);
        gic_cpu_init(&gic_data);
 }
-#endif
 
 void __cpuinit axxia_hotplug_gic_secondary_init(void)
 {
        gic_cpu_init(&gic_data);
 }
 
+#endif
+
 #ifdef CONFIG_OF
 
 int __init axxia_gic_of_init(struct device_node *node,
-- 
1.7.9.5

-- 
_______________________________________________
linux-yocto mailing list
linux-yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to