From: Magnus Karlsson <magnus.karls...@intel.com> This is a port of commit fc32c65b2d1df5a3a545e5a227c2ca993b78524e from linux-yocto-3.10, branch standard/axxia/base, that reverts c15c9b219f4ae722c24a2dc320f27a62ffafde82 from the same branch.
Signed-off-by: Magnus Karlsson <magnus.karls...@intel.com> --- arch/arm/mach-axxia/axxia-gic.c | 253 +++++++++++++++++---------------------- 1 file changed, 109 insertions(+), 144 deletions(-) diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c index 7ee51d7..c068c14 100644 --- a/arch/arm/mach-axxia/axxia-gic.c +++ b/arch/arm/mach-axxia/axxia-gic.c @@ -238,23 +238,22 @@ static void axxia_gic_flush_affinity_queue(struct work_struct *dummy) void *qdata; struct gic_rpc_data *rpc_data; - while (axxia_get_item(&axxia_circ_q, &qdata) != -1) { + while (axxia_get_item(&axxia_circ_q, &qdata) != -1) { rpc_data = (struct gic_rpc_data *) qdata; if (rpc_data->func_mask == SET_AFFINITY) { - if (cpu_online(rpc_data->cpu)) { - smp_call_function_single(rpc_data->cpu, gic_set_affinity_remote, - qdata, 1); - } + smp_call_function_single(rpc_data->cpu, + gic_set_affinity_remote, + qdata, 1); + } else if (rpc_data->func_mask == CLR_AFFINITY) { - if (cpu_online(rpc_data->cpu)) { - smp_call_function_single(rpc_data->cpu, gic_clr_affinity_remote, - qdata, 1); - } + + smp_call_function_single(rpc_data->cpu, + gic_clr_affinity_remote, + qdata, 1); } kfree(qdata); } - } /* @@ -496,45 +495,35 @@ static int gic_retrigger(struct irq_data *d) return -ENXIO; } -static void gic_set_irq_target(void __iomem *dist_base, - u32 irqid, u32 cpu, bool set) +static int _gic_clear_affinity(struct irq_data *d, u32 cpu, bool update_enable) { - void __iomem *reg; - unsigned int shift; + + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); + unsigned int shift = (gic_irq(d) % 4) * 8; u32 val; u32 mask = 0; - u32 bit; + u32 enable_mask, enable_offset; - reg = dist_base + GIC_DIST_TARGET + (irqid & ~3); - shift = (irqid % 4) * 8; mask = 0xff << shift; - val = readl_relaxed(reg) & ~mask; - - if (!set) - /* Clear affinity, mask IRQ. */ - writel_relaxed(val, reg); - else { - bit = 1 << ((cpu_logical_map(cpu) % CORES_PER_CLUSTER) + shift); - writel_relaxed(val | bit, reg); - } - -} - -static int _gic_clear_affinity(struct irq_data *d, u32 cpu, bool update_enable) -{ - - u32 enable_mask, enable_offset; + enable_mask = 1 << (gic_irq(d) % 32); + enable_offset = 4 * (gic_irq(d) / 32); raw_spin_lock(&irq_controller_lock); - gic_set_irq_target(gic_dist_base(d), gic_irq(d), cpu, false); + val = readl_relaxed(reg) & ~mask; + /* Clear affinity, mask IRQ. */ + writel_relaxed(val, reg); if (update_enable) { - enable_mask = 1 << (gic_irq(d) % 32); - enable_offset = 4 * (gic_irq(d) / 32); + + writel_relaxed(enable_mask, + gic_data_dist_base(&gic_data) + GIC_DIST_PENDING_CLEAR + enable_offset); + writel_relaxed(enable_mask, + gic_data_dist_base(&gic_data) + GIC_DIST_ACTIVE_CLEAR + enable_offset); writel_relaxed(enable_mask, gic_data_dist_base(&gic_data) + GIC_DIST_ENABLE_CLEAR + enable_offset); + } raw_spin_unlock(&irq_controller_lock); @@ -547,17 +536,33 @@ static int _gic_set_affinity(struct irq_data *d, u32 cpu, bool update_enable) { + void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); + unsigned int shift = (gic_irq(d) % 4) * 8; + u32 val; + u32 mask = 0; + u32 bit; u32 enable_mask, enable_offset; + /* + * Normalize the cpu number as seen by Linux (0-15) to a + * number as seen by a cluster (0-3). + */ + bit = 1 << ((cpu_logical_map(cpu) % CORES_PER_CLUSTER) + shift); + mask = 0xff << shift; + + enable_mask = 1 << (gic_irq(d) % 32); + enable_offset = 4 * (gic_irq(d) / 32); + raw_spin_lock(&irq_controller_lock); - gic_set_irq_target(gic_dist_base(d), gic_irq(d), cpu, true); + val = readl_relaxed(reg) & ~mask; + /* Set affinity, mask IRQ. */ + writel_relaxed(val | bit, reg); if (update_enable) { - enable_mask = 1 << (gic_irq(d) % 32); - enable_offset = 4 * (gic_irq(d) / 32); writel_relaxed(enable_mask, - gic_data_dist_base(&gic_data) + GIC_DIST_ENABLE_SET + enable_offset); + gic_data_dist_base(&gic_data) + GIC_DIST_ENABLE_SET + + enable_offset); } raw_spin_unlock(&irq_controller_lock); @@ -590,11 +595,11 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - u32 pcpu; - unsigned int irqid; - struct cpumask *affinity_mask; + u32 pcpu = cpu_logical_map(smp_processor_id()); + unsigned int irqid = gic_irq(d); + struct cpumask *affinity_mask = (struct cpumask *)mask_val; u32 mask; - u32 oldcpu; + u32 oldcpu = irq_cpuid[irqid]; struct gic_rpc_data *gic_rpc_ptr; int rval; bool new_same_core = false; @@ -607,12 +612,6 @@ static int gic_set_affinity(struct irq_data *d, BUG_ON(!irqs_disabled()); - - pcpu = cpu_logical_map(smp_processor_id()); - irqid = gic_irq(d); - affinity_mask = (struct cpumask *)mask_val; - oldcpu = irq_cpuid[irqid]; - if (irqid >= MAX_GIC_INTERRUPTS) return -EINVAL; @@ -620,6 +619,7 @@ static int gic_set_affinity(struct irq_data *d, if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM)) return IRQ_SET_MASK_OK; + if (force) add_cpu = cpumask_any(cpu_online_mask); else @@ -665,21 +665,25 @@ static int gic_set_affinity(struct irq_data *d, } } + mutex_lock(&affinity_lock); + + /* Update Axxia IRQ affinity table with the new physical CPU number. */ + irq_cpuid[irqid] = cpu_logical_map(add_cpu); /* * We clear first to make sure the affinity mask always has a bit set, * especially when the two cpus are in the same cluster. */ if (irqid != IRQ_PMU) { + if (clear_needed == AFFINITY_CLEAR_LOCAL) { _gic_clear_affinity(d, del_cpu, update_enable); } else if (clear_needed == AFFINITY_CLEAR_OTHER_CLUSTER) { - mask = 0xf << ((oldcpu / CORES_PER_CLUSTER) * 4); - del_cpu = cpumask_any_and((struct cpumask *)&mask, - cpu_online_mask); + mask = 0xf << (oldcpu / CORES_PER_CLUSTER); + del_cpu = cpumask_any_and((struct cpumask *)&mask, cpu_online_mask); if (del_cpu < nr_cpu_ids) { @@ -696,23 +700,19 @@ static int gic_set_affinity(struct irq_data *d, gic_rpc_ptr->oldcpu = oldcpu; gic_rpc_ptr->d = d; gic_rpc_ptr->update_enable = update_enable; - get_cpu(); rval = axxia_put_item(&axxia_circ_q, (void *) gic_rpc_ptr); - put_cpu(); if (rval) { pr_err( "ERROR: failed to add CLR_AFFINITY request for cpu: %d\n", del_cpu); kfree((void *) gic_rpc_ptr); - mutex_unlock(&affinity_lock); - return rval; } schedule_work_on(0, &axxia_gic_affinity_work); - } else - pr_err("ERROR: no CPUs left\n"); + } } } + if (set_needed == AFFINITY_SET_LOCAL) { _gic_set_affinity(d, add_cpu, update_enable); @@ -731,22 +731,19 @@ static int gic_set_affinity(struct irq_data *d, gic_rpc_ptr->cpu = add_cpu; gic_rpc_ptr->update_enable = update_enable; gic_rpc_ptr->d = d; - get_cpu(); rval = axxia_put_item(&axxia_circ_q, (void *) gic_rpc_ptr); - put_cpu(); if (rval) { pr_err("ERROR: failed to add SET_AFFINITY request for cpu: %d\n", add_cpu); kfree((void *) gic_rpc_ptr); - mutex_unlock(&affinity_lock); - return rval; } schedule_work_on(0, &axxia_gic_affinity_work); } - /* Update Axxia IRQ affinity table with the new physical CPU number. */ - irq_cpuid[irqid] = cpu_logical_map(add_cpu); + + mutex_unlock(&affinity_lock); + return IRQ_SET_MASK_OK; } @@ -1230,9 +1227,10 @@ static void __init gic_axxia_init(struct gic_chip_data *gic) writel_relaxed(cpumask, ipi_mask_reg_base + 0x40 + i * 4); } -static void gic_dist_init(struct gic_chip_data *gic) +static void __cpuinit gic_dist_init(struct gic_chip_data *gic) { unsigned int i; + u32 cpumask; unsigned int gic_irqs = gic->gic_irqs; void __iomem *base = gic_data_dist_base(gic); u32 cpu = cpu_logical_map(smp_processor_id()); @@ -1244,8 +1242,6 @@ static void gic_dist_init(struct gic_chip_data *gic) u32 val; #ifdef CONFIG_HOTPLUG_CPU u32 this_cluster = get_cluster_id(); - u32 powered_on = 0; - u32 ccpu; #endif /* Initialize the distributor interface once per CPU cluster */ @@ -1254,9 +1250,11 @@ static void gic_dist_init(struct gic_chip_data *gic) return; #endif - writel_relaxed(0, base + GIC_DIST_CTRL); + cpumask = 1 << cpu; + cpumask |= cpumask << 8; + cpumask |= cpumask << 16; - /*################################# CONFIG IRQS ####################################*/ + writel_relaxed(0, base + GIC_DIST_CTRL); /* * Set all global interrupts to be level triggered, active low. @@ -1265,23 +1263,13 @@ static void gic_dist_init(struct gic_chip_data *gic) writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); /* - * Set Axxia IPI interrupts to be edge triggered. - */ - for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { - confmask = 0x2 << ((i % 16) * 2); - confoff = (i / 16) * 4; - val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); - val |= confmask; - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); - } - - /*################################# PRIORITY ####################################*/ - /* - * Set priority on PPI and SGI interrupts + * Set all global interrupts to this CPU only. + * (Only do this for the first core on cluster 0). */ - for (i = 0; i < 32; i += 4) - writel_relaxed(0xa0a0a0a0, - base + GIC_DIST_PRI + i * 4 / 4); + if (cpu == 0) + for (i = 32; i < gic_irqs; i += 4) + writel_relaxed(cpumask, + base + GIC_DIST_TARGET + i * 4 / 4); /* * Set priority on all global interrupts. @@ -1289,58 +1277,54 @@ static void gic_dist_init(struct gic_chip_data *gic) for (i = 32; i < gic_irqs; i += 4) writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); - - /*################################# TARGET ####################################*/ /* - * Set all global interrupts to this CPU only. - * (Only do this for the first core on cluster 0). + * Disable all interrupts. Leave the PPI and SGIs alone + * as these enables are banked registers. */ - if (cpu == 0) - for (i = 32; i < gic_irqs; i += 4) - writel_relaxed(0x01010101, base + GIC_DIST_TARGET + i * 4 / 4); + for (i = 32; i < gic_irqs; i += 32) { + writel_relaxed(0xffffffff, + base + GIC_DIST_ACTIVE_CLEAR + i * 4 / 32); + writel_relaxed(0xffffffff, + base + GIC_DIST_PENDING_CLEAR + i * 4 / 32); + writel_relaxed(0xffffffff, + base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); + } /* * Set Axxia IPI interrupts for all CPUs in this cluster. */ -#ifdef CONFIG_HOTPLUG_CPU - powered_on = (~pm_cpu_powered_down) & 0xFFFF; -#endif for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { cpumask_8 = 1 << ((i - IPI0_CPU0) % 4); -#ifdef CONFIG_HOTPLUG_CPU - ccpu = (this_cluster * 4) + ((i - IPI0_CPU0) % CORES_PER_CLUSTER); - if ((1 << ccpu) & powered_on) - writeb_relaxed(cpumask_8, base + GIC_DIST_TARGET + i); - else - writeb_relaxed(0x00, base + GIC_DIST_TARGET + i); -#else writeb_relaxed(cpumask_8, base + GIC_DIST_TARGET + i); -#endif + } + /* + * Set the PMU IRQ to the first cpu in this cluster. + */ + writeb_relaxed(0x01, base + GIC_DIST_TARGET + IRQ_PMU); + + /* + * Set Axxia IPI interrupts to be edge triggered. + */ + for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { + confmask = 0x2 << ((i % 16) * 2); + confoff = (i / 16) * 4; + val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); + val |= confmask; + writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); } - /*################################# ENABLE IRQS ####################################*/ /* * Do the initial enable of the Axxia IPI interrupts here. * NOTE: Writing a 0 to this register has no effect, so * no need to read and OR in bits, just writing is OK. */ -#ifdef CONFIG_HOTPLUG_CPU - powered_on = (~pm_cpu_powered_down) & 0xFFFF; -#endif - for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { enablemask = 1 << (i % 32); enableoff = (i / 32) * 4; -#ifdef CONFIG_HOTPLUG_CPU - ccpu = (this_cluster * 4) + ((i - IPI0_CPU0) % CORES_PER_CLUSTER); - if ((1 << ccpu) & powered_on) - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); -#else writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); -#endif } /* @@ -1350,52 +1334,33 @@ static void gic_dist_init(struct gic_chip_data *gic) enableoff = (IRQ_PMU / 32) * 4; writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); - writel_relaxed(1, base + GIC_DIST_CTRL); - } -static void gic_cpu_init(struct gic_chip_data *gic) +static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) { void __iomem *dist_base = gic_data_dist_base(gic); void __iomem *base = gic_data_cpu_base(gic); int i; - u32 enablemask; - u32 enableoff; - u32 ccpu; - u32 cpu = smp_processor_id(); - u32 cluster = cpu / CORES_PER_CLUSTER; - u32 cpumask_8; + /* * Deal with the banked PPI and SGI interrupts - disable all * PPI interrupts, and also all SGI interrupts (we don't use * SGIs in the Axxia). */ + writel_relaxed(0xffffffff, dist_base + GIC_DIST_ENABLE_CLEAR); -#ifdef CONFIG_HOTPLUG_CPU - if (!cluster_power_up[cluster]) { -#endif - writel_relaxed(0, dist_base + GIC_DIST_CTRL); - for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { - cpumask_8 = 1 << ((i - IPI0_CPU0) % 4); - enablemask = 1 << (i % 32); - enableoff = (i / 32) * 4; - ccpu = (cluster * 4) + ((i - IPI0_CPU0) % CORES_PER_CLUSTER); - if (ccpu == cpu) { - writeb_relaxed(cpumask_8, dist_base + GIC_DIST_TARGET + i); - writel_relaxed(enablemask, dist_base + GIC_DIST_ENABLE_SET + enableoff); - } - } - writel_relaxed(1, dist_base + GIC_DIST_CTRL); -#ifdef CONFIG_HOTPLUG_CPU - } -#endif + /* + * Set priority on PPI and SGI interrupts + */ + for (i = 0; i < 32; i += 4) + writel_relaxed(0xa0a0a0a0, + dist_base + GIC_DIST_PRI + i * 4 / 4); writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); - writel_relaxed(1, base + GIC_CPU_CTRL); } @@ -1602,7 +1567,7 @@ void __init axxia_gic_init_bases(int irq_start, } #ifdef CONFIG_SMP -void axxia_gic_secondary_init(void) +void __cpuinit axxia_gic_secondary_init(void) { struct gic_chip_data *gic = &gic_data; -- 1.7.9.5 -- _______________________________________________ linux-yocto mailing list linux-yocto@yoctoproject.org https://lists.yoctoproject.org/listinfo/linux-yocto