From: Magnus Karlsson <magnus.karls...@intel.com>

This reverts commit b8dd1bdee59fd5dd8cdc038d802a3a68400066a6.

Signed-off-by: Magnus Karlsson <magnus.karls...@intel.com>
---
 arch/arm/Kconfig                             |   37 --
 arch/arm/mach-axxia/Makefile                 |    2 +-
 arch/arm/mach-axxia/axxia-gic.c              |  329 +++----------
 arch/arm/mach-axxia/axxia_circular_queue.c   |   63 ---
 arch/arm/mach-axxia/axxia_circular_queue.h   |   30 --
 arch/arm/mach-axxia/hotplug.c                |  238 ++-------
 arch/arm/mach-axxia/include/mach/axxia-gic.h |    3 +-
 arch/arm/mach-axxia/lsi_power_management.c   |  673 ++++++++++++++++----------
 arch/arm/mach-axxia/lsi_power_management.h   |    6 +-
 arch/arm/mach-axxia/platsmp.c                |   28 +-
 10 files changed, 538 insertions(+), 871 deletions(-)
 delete mode 100644 arch/arm/mach-axxia/axxia_circular_queue.c
 delete mode 100644 arch/arm/mach-axxia/axxia_circular_queue.h

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c766c5f..c15c6fa 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1593,8 +1593,6 @@ config NR_CPUS
        depends on SMP
        default "4"
 
-menu "Support for hot-pluggable CPUs"
-
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
        depends on SMP && HOTPLUG
@@ -1602,41 +1600,6 @@ config HOTPLUG_CPU
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
 
-choice
-       prompt "CPU Power Down Mode"
-       default HOTPLUG_CPU_COMPLETE_POWER_DOWN
-       help
-               This is used to select how the CPU is going to be powered down. 
If LOW POWER
-               is selected then the CPU enters a WFI state and waits for an 
interrupt to
-               wake up. If COMPLETE POWER down is selected the CPU power is 
turned off. The only
-               way to power on the CPU is to execute a command.
-
-config HOTPLUG_CPU_COMPLETE_POWER_DOWN
-       bool "Power off the CPU"
-       help
-               This will power off the CPU completely. The irqs are migrated
-               to another CPU.
-
-config HOTPLUG_CPU_LOW_POWER
-       bool "Low Power CPU (wfi)"
-       help
-               This will put the CPU into a low power mode wfi mode. When an 
interrupt
-               is received the CPU will power on again.
-
-endchoice
-
-config HOTPLUG_CPU_L2_POWER_DOWN
-       bool "Power Off L2 Cache"
-       depends on HOTPLUG_CPU_COMPLETE_POWER_DOWN
-       default n if HOTPLUG_CPU_LOW_POWER
-       help
-               Select this if you want to power down the L2 cache when
-               all CPUS of a cluster have been powered off.
-
-endmenu
-
-
-
 config ARM_PSCI
        bool "Support for the ARM Power State Coordination Interface (PSCI)"
        depends on CPU_V7
diff --git a/arch/arm/mach-axxia/Makefile b/arch/arm/mach-axxia/Makefile
index de95182..73392a4 100644
--- a/arch/arm/mach-axxia/Makefile
+++ b/arch/arm/mach-axxia/Makefile
@@ -11,6 +11,6 @@ obj-y                                 += pci.o
 obj-y                                  += ddr_retention.o ddr_shutdown.o
 obj-$(CONFIG_SMP)                      += platsmp.o headsmp.o
 obj-$(CONFIG_ARCH_AXXIA_GIC)           += axxia-gic.o
-obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o lsi_power_management.o 
axxia_circular_queue.o
+obj-$(CONFIG_HOTPLUG_CPU)              += hotplug.o lsi_power_management.o
 obj-$(CONFIG_AXXIA_RIO)                 += rapidio.o
 obj-$(CONFIG_HW_PERF_EVENTS)            += perf_event_platform.o smon.o
diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c
index 6345a99..29a80f6 100644
--- a/arch/arm/mach-axxia/axxia-gic.c
+++ b/arch/arm/mach-axxia/axxia-gic.c
@@ -32,15 +32,13 @@
  * As such, the enable set/clear, pending set/clear and active bit
  * registers are banked per-cpu for these sources.
  */
+
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/cpu_pm.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip/arm-gic.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
 
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
@@ -48,7 +46,6 @@
 
 #include <mach/axxia-gic.h>
 #include "lsi_power_management.h"
-#include "axxia_circular_queue.h"
 
 #define MAX_GIC_INTERRUPTS  1020
 
@@ -160,7 +157,6 @@ struct gic_notifier_data {
        struct notifier_block *self;
        unsigned long cmd;
        void *v;
-
 };
 #endif
 
@@ -169,16 +165,13 @@ struct gic_rpc_data {
        u32 func_mask;
        u32 cpu, oldcpu;
        u32 type;
-       bool update_enable;
        const struct cpumask *mask_val;
 #ifdef CONFIG_CPU_PM
        struct gic_notifier_data gn_data;
 #endif
 };
 
-
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
-
 static DEFINE_MUTEX(irq_bus_lock);
 
 static struct gic_chip_data gic_data __read_mostly;
@@ -205,47 +198,6 @@ static inline unsigned int gic_irq(struct irq_data *d)
        return d->hwirq;
 }
 
-
-/*************************** CIRCULAR QUEUE 
**************************************/
-struct circular_queue_t axxia_circ_q;
-static void axxia_gic_flush_affinity_queue(struct work_struct *dummy);
-static void gic_set_affinity_remote(void *info);
-static void gic_clr_affinity_remote(void *info);
-
-static DECLARE_WORK(axxia_gic_affinity_work, axxia_gic_flush_affinity_queue);
-static DEFINE_MUTEX(affinity_lock);
-
-enum axxia_affinity_mode {
-       AFFINITY_CLEAR_LOCAL = 1,
-       AFFINITY_CLEAR_OTHER_CLUSTER,
-       AFFINITY_SET_LOCAL,
-       AFFINITY_SET_OTHER_CLUSTER
-};
-
-static void axxia_gic_flush_affinity_queue(struct work_struct *dummy)
-{
-
-       void *qdata;
-       struct gic_rpc_data *rpc_data;
-
-
-       while (axxia_get_item(&axxia_circ_q, &qdata) != -1) {
-               rpc_data = (struct gic_rpc_data *) qdata;
-               if (rpc_data->func_mask == SET_AFFINITY) {
-                       smp_call_function_single(rpc_data->cpu,
-                                       gic_set_affinity_remote,
-                                       qdata, 1);
-
-               } else if (rpc_data->func_mask == CLR_AFFINITY) {
-
-                       smp_call_function_single(rpc_data->cpu,
-                                       gic_clr_affinity_remote,
-                                       qdata, 1);
-               }
-               kfree(qdata);
-       }
-}
-
 /*
  * This GIC driver implements IRQ management routines (e.g., gic_mask_irq,
  * etc.) that work across multiple clusters. Since a core cannot directly
@@ -258,7 +210,6 @@ static void axxia_gic_flush_affinity_queue(struct 
work_struct *dummy)
  * The Linux interrupt code has a mechanism, which is called bus lock/unlock,
  * which was created for irq chips hanging off slow busses like i2c/spi. The
  * bus lock is mutex that is used to serialize bus accesses. We take advantage
- *
  * of this feature here, because we can think of IRQ management routines having
  * to remotely execute on other clusters as a "slow bus" action. Doing this
  * here serializes all IRQ management interfaces and guarantees that different
@@ -482,52 +433,15 @@ static int gic_retrigger(struct irq_data *d)
        return -ENXIO;
 }
 
-static int _gic_clear_affinity(struct irq_data *d, u32 cpu, bool update_enable)
-{
-
-       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & 
~3);
-       unsigned int shift = (gic_irq(d) % 4) * 8;
-       u32 val;
-       u32 mask = 0;
-       u32 enable_mask, enable_offset;
-
-       mask = 0xff << shift;
-
-       enable_mask = 1 << (gic_irq(d) % 32);
-       enable_offset = 4 * (gic_irq(d) / 32);
-
-       raw_spin_lock(&irq_controller_lock);
-
-       val = readl_relaxed(reg) & ~mask;
-       /* Clear affinity, mask IRQ. */
-       writel_relaxed(val, reg);
-
-       if (update_enable) {
-
-               writel_relaxed(enable_mask,
-                               gic_data_dist_base(&gic_data) + 
GIC_DIST_PENDING_CLEAR + enable_offset);
-               writel_relaxed(enable_mask,
-                               gic_data_dist_base(&gic_data) + 
GIC_DIST_ACTIVE_CLEAR + enable_offset);
-               writel_relaxed(enable_mask,
-                               gic_data_dist_base(&gic_data) + 
GIC_DIST_ENABLE_CLEAR + enable_offset);
-
-       }
-
-       raw_spin_unlock(&irq_controller_lock);
-
-       return IRQ_SET_MASK_OK;
-
-}
-
 static int _gic_set_affinity(struct irq_data *d,
-                            u32 cpu,
-                            bool update_enable)
+                            const struct cpumask *mask_val,
+                            bool do_clear)
 {
-       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & 
~3);
+       void __iomem *reg  = gic_dist_base(d) + GIC_DIST_TARGET +
+                            (gic_irq(d) & ~3);
        unsigned int shift = (gic_irq(d) % 4) * 8;
-       u32 val;
-       u32 mask = 0;
-       u32 bit;
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       u32 val, mask, bit;
        u32 enable_mask, enable_offset;
 
        /*
@@ -541,17 +455,18 @@ static int _gic_set_affinity(struct irq_data *d,
        enable_offset = 4 * (gic_irq(d) / 32);
 
        raw_spin_lock(&irq_controller_lock);
-
        val = readl_relaxed(reg) & ~mask;
-       /* Set affinity, mask IRQ. */
-       writel_relaxed(val | bit, reg);
-
-       if (update_enable) {
-               writel_relaxed(enable_mask,
-                       gic_data_dist_base(&gic_data) + GIC_DIST_ENABLE_SET
-                                       + enable_offset);
+       if (do_clear == true) {
+               /* Clear affinity, mask IRQ. */
+               writel_relaxed(val, reg);
+               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data)
+                               + GIC_DIST_ENABLE_CLEAR + enable_offset);
+       } else {
+               /* Set affinity, unmask IRQ. */
+               writel_relaxed(val | bit, reg);
+               writel_relaxed(enable_mask, gic_data_dist_base(&gic_data)
+                               + GIC_DIST_ENABLE_SET + enable_offset);
        }
-
        raw_spin_unlock(&irq_controller_lock);
 
        return IRQ_SET_MASK_OK;
@@ -566,37 +481,27 @@ static int _gic_set_affinity(struct irq_data *d,
 static void gic_set_affinity_remote(void *info)
 {
        struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
-       _gic_set_affinity(rpc->d, rpc->cpu, rpc->update_enable);
-
+       _gic_set_affinity(rpc->d, rpc->mask_val, false);
 }
 static void gic_clr_affinity_remote(void *info)
 {
        struct gic_rpc_data *rpc = (struct gic_rpc_data *)info;
-       _gic_clear_affinity(rpc->d, rpc->oldcpu, rpc->update_enable);
-
+       _gic_set_affinity(rpc->d, rpc->mask_val, true);
 }
 
 static int gic_set_affinity(struct irq_data *d,
                            const struct cpumask *mask_val,
                            bool force)
 {
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
        u32 pcpu = cpu_logical_map(smp_processor_id());
        unsigned int irqid = gic_irq(d);
-       struct cpumask *affinity_mask = (struct cpumask *)mask_val;
-       u32 mask;
-       u32 oldcpu = irq_cpuid[irqid];
-       struct gic_rpc_data *gic_rpc_ptr;
-       int rval;
-       bool new_same_core = false;
-       bool old_same_core = false;
-       bool update_enable = false;
-       u32 clear_needed = 0;
-       u32  set_needed = 0;
-       u32 add_cpu;
-       u32 del_cpu;
 
        BUG_ON(!irqs_disabled());
 
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+
        if (irqid >= MAX_GIC_INTERRUPTS)
                return -EINVAL;
 
@@ -604,131 +509,53 @@ static int gic_set_affinity(struct irq_data *d,
        if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM))
                return IRQ_SET_MASK_OK;
 
-
-       if (force)
-               add_cpu = cpumask_any(cpu_online_mask);
-       else
-               add_cpu = cpumask_any_and(affinity_mask, cpu_online_mask);
-
-       if (add_cpu >= nr_cpu_ids) {
-               pr_err("ERROR: no cpus left\n");
-               return -EINVAL;
-       }
-
-       del_cpu = oldcpu;
-
-       if (add_cpu == del_cpu)
+       /*
+        * If the new IRQ affinity is the same as current, then
+        * there's no need to update anything.
+        */
+       if (cpu_logical_map(cpu) == irq_cpuid[irqid])
                return IRQ_SET_MASK_OK;
 
-       new_same_core =
-                       ((add_cpu / CORES_PER_CLUSTER) == (pcpu / 
CORES_PER_CLUSTER)) ?
-                                       true : false;
-       old_same_core =
-                       ((del_cpu / CORES_PER_CLUSTER) == (pcpu / 
CORES_PER_CLUSTER)) ?
-                                       true : false;
-
-       update_enable = ((add_cpu / CORES_PER_CLUSTER) == (del_cpu / 
CORES_PER_CLUSTER)) ? false : true;
-
-       if (new_same_core) {
-
-               if (old_same_core) {
-                       clear_needed = AFFINITY_CLEAR_LOCAL;
-                       set_needed = AFFINITY_SET_LOCAL;
-               } else {
-                       set_needed = AFFINITY_SET_LOCAL;
-                       clear_needed = AFFINITY_CLEAR_OTHER_CLUSTER;
-               }
-
+       /*
+        * If the new physical cpu assignment falls within the same
+        * cluster as the cpu we're currently running on, set the IRQ
+        * affinity directly. Otherwise, use the RPC mechanism.
+        */
+       if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) ==
+               (pcpu / CORES_PER_CLUSTER)) {
+               _gic_set_affinity(d, mask_val, false);
        } else {
-
-               if (old_same_core) {
-                       set_needed = AFFINITY_SET_OTHER_CLUSTER;
-                       clear_needed = AFFINITY_CLEAR_LOCAL;
-               } else {
-                       set_needed = AFFINITY_SET_OTHER_CLUSTER;
-                       clear_needed = AFFINITY_CLEAR_OTHER_CLUSTER;
-               }
+               gic_rpc_data.func_mask |= SET_AFFINITY;
+               gic_rpc_data.cpu = cpu;
+               gic_rpc_data.d = d;
+               gic_rpc_data.mask_val = mask_val;
        }
 
-       mutex_lock(&affinity_lock);
-
-       /* Update Axxia IRQ affinity table with the new physical CPU number. */
-       irq_cpuid[irqid] = cpu_logical_map(add_cpu);
-
        /*
-        * We clear first to make sure the affinity mask always has a bit set,
-        * especially when the two cpus are in the same cluster.
+        * If the new physical cpu assignment is on a cluster that's
+        * different than the prior cluster, clear the IRQ affinity
+        * on the old cluster.
         */
-       if (irqid != IRQ_PMU) {
-
-               if (clear_needed == AFFINITY_CLEAR_LOCAL) {
-
-                       _gic_clear_affinity(d, del_cpu, update_enable);
-
-               } else if (clear_needed == AFFINITY_CLEAR_OTHER_CLUSTER) {
-
-                       mask = 0xf << (oldcpu / CORES_PER_CLUSTER);
-                       del_cpu = cpumask_any_and((struct cpumask *)&mask, 
cpu_online_mask);
-
-                       if (del_cpu < nr_cpu_ids) {
-
-                               gic_rpc_ptr = kmalloc(sizeof(struct 
gic_rpc_data), GFP_KERNEL);
-                               if (!gic_rpc_ptr) {
-                                       pr_err(
-                                                       "ERROR: failed to get 
memory for workqueue to set affinity false\n");
-                                       mutex_unlock(&affinity_lock);
-                                       return -EFAULT;
-                               }
-
-                               gic_rpc_ptr->func_mask = CLR_AFFINITY;
-                               gic_rpc_ptr->cpu = del_cpu;
-                               gic_rpc_ptr->oldcpu = oldcpu;
-                               gic_rpc_ptr->d = d;
-                               gic_rpc_ptr->update_enable = update_enable;
-                               rval = axxia_put_item(&axxia_circ_q, (void *) 
gic_rpc_ptr);
-                               if (rval) {
-                                       pr_err(
-                                                       "ERROR: failed to add 
CLR_AFFINITY request for cpu: %d\n",
-                                                       del_cpu);
-                                       kfree((void *) gic_rpc_ptr);
-                               }
-                               schedule_work_on(0, &axxia_gic_affinity_work);
-                       }
-               }
-       }
-
-
-       if (set_needed == AFFINITY_SET_LOCAL) {
-
-               _gic_set_affinity(d, add_cpu, update_enable);
-
-       } else if (set_needed == AFFINITY_SET_OTHER_CLUSTER) {
-
-               gic_rpc_ptr = kmalloc(sizeof(struct gic_rpc_data), GFP_KERNEL);
-               if (!gic_rpc_ptr) {
-                       pr_err(
-                                       "ERROR: failed to get memory for 
workqueue to set affinity false\n");
-                       mutex_unlock(&affinity_lock);
-                       return -EFAULT;
-               }
-
-               gic_rpc_ptr->func_mask = SET_AFFINITY;
-               gic_rpc_ptr->cpu = add_cpu;
-               gic_rpc_ptr->update_enable = update_enable;
-               gic_rpc_ptr->d = d;
-               rval = axxia_put_item(&axxia_circ_q, (void *) gic_rpc_ptr);
-               if (rval) {
-                       pr_err("ERROR: failed to add SET_AFFINITY request for 
cpu: %d\n",
-                                       add_cpu);
-                       kfree((void *) gic_rpc_ptr);
+       if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) !=
+               (irq_cpuid[irqid] / CORES_PER_CLUSTER))) {
+               /*
+                * If old cpu assignment falls within the same cluster as
+                * the cpu we're currently running on, clear the IRQ affinity
+                * directly. Otherwise, use RPC mechanism.
+                */
+               if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
+                       (pcpu / CORES_PER_CLUSTER)) {
+                       _gic_set_affinity(d, mask_val, true);
+               } else {
+                       gic_rpc_data.func_mask |= CLR_AFFINITY;
+                       gic_rpc_data.oldcpu = irq_cpuid[irqid];
+                       gic_rpc_data.d = d;
+                       gic_rpc_data.mask_val = mask_val;
                }
-               schedule_work_on(0, &axxia_gic_affinity_work);
-
        }
 
-
-       mutex_unlock(&affinity_lock);
-
+       /* Update Axxia IRQ affinity table with the new physical CPU number. */
+       irq_cpuid[irqid] = cpu_logical_map(cpu);
 
        return IRQ_SET_MASK_OK;
 }
@@ -998,7 +825,6 @@ static void gic_irq_sync_unlock(struct irq_data *d)
        int i, j, cpu;
        int nr_cluster_ids = ((nr_cpu_ids - 1) / CORES_PER_CLUSTER) + 1;
 
-
        if (gic_rpc_data.func_mask & IRQ_MASK) {
                smp_call_function_single(gic_rpc_data.cpu,
                                         gic_mask_remote,
@@ -1034,6 +860,18 @@ static void gic_irq_sync_unlock(struct irq_data *d)
                }
        }
 
+       if (gic_rpc_data.func_mask & SET_AFFINITY) {
+               smp_call_function_single(gic_rpc_data.cpu,
+                                        gic_set_affinity_remote,
+                                        &gic_rpc_data, 1);
+       }
+
+       if (gic_rpc_data.func_mask & CLR_AFFINITY) {
+               smp_call_function_single(gic_rpc_data.oldcpu,
+                                        gic_clr_affinity_remote,
+                                        &gic_rpc_data, 1);
+       }
+
 #ifdef CONFIG_CPU_PM
        if (gic_rpc_data.func_mask & GIC_NOTIFIER) {
                for (i = 0; i < nr_cluster_ids; i++) {
@@ -1063,7 +901,6 @@ static void gic_irq_sync_unlock(struct irq_data *d)
 
        /* Give the bus lock. */
        mutex_unlock(&irq_bus_lock);
-
 }
 
 static
@@ -1256,14 +1093,9 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
         * Disable all interrupts.  Leave the PPI and SGIs alone
         * as these enables are banked registers.
         */
-       for (i = 32; i < gic_irqs; i += 32) {
-               writel_relaxed(0xffffffff,
-                              base + GIC_DIST_ACTIVE_CLEAR + i * 4 / 32);
-               writel_relaxed(0xffffffff,
-                              base + GIC_DIST_PENDING_CLEAR + i * 4 / 32);
+       for (i = 32; i < gic_irqs; i += 32)
                writel_relaxed(0xffffffff,
                               base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
-       }
 
        /*
         * Set Axxia IPI interrupts for all CPUs in this cluster.
@@ -1297,7 +1129,8 @@ static void __cpuinit gic_dist_init(struct gic_chip_data 
*gic)
        for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) {
                enablemask = 1 << (i % 32);
                enableoff = (i / 32) * 4;
-               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + 
enableoff);
+               writel_relaxed(enablemask,
+                              base + GIC_DIST_ENABLE_SET + enableoff);
        }
 
        /*
@@ -1316,13 +1149,11 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data 
*gic)
        void __iomem *base = gic_data_cpu_base(gic);
        int i;
 
-
        /*
         * Deal with the banked PPI and SGI interrupts - disable all
         * PPI interrupts, and also all SGI interrupts (we don't use
         * SGIs in the Axxia).
         */
-
        writel_relaxed(0xffffffff, dist_base + GIC_DIST_ENABLE_CLEAR);
 
        /*
@@ -1521,9 +1352,6 @@ void __init axxia_gic_init_bases(int irq_start,
        gic_dist_init(gic);
        gic_cpu_init(gic);
        gic_pm_init(gic);
-
-       axxia_initialize_queue(&axxia_circ_q);
-
 }
 
 void __cpuinit axxia_gic_secondary_init(void)
@@ -1534,6 +1362,11 @@ void __cpuinit axxia_gic_secondary_init(void)
        gic_cpu_init(&gic_data);
 }
 
+void __cpuinit axxia_hotplug_gic_secondary_init(void)
+{
+       gic_cpu_init(&gic_data);
+}
+
 #ifdef CONFIG_OF
 
 int __init axxia_gic_of_init(struct device_node *node,
@@ -1559,8 +1392,6 @@ int __init axxia_gic_of_init(struct device_node *node,
 
        axxia_gic_init_bases(-1, dist_base, cpu_base, node);
 
-
-
        return 0;
 }
 #endif
diff --git a/arch/arm/mach-axxia/axxia_circular_queue.c 
b/arch/arm/mach-axxia/axxia_circular_queue.c
deleted file mode 100644
index 971aead..0000000
--- a/arch/arm/mach-axxia/axxia_circular_queue.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * axxia_circular_queue.c
- *
- *  Created on: Sep 30, 2014
- *      Author: z8cpaul
- */
-
-
-#include <asm/exception.h>
-#include "axxia_circular_queue.h"
-
-
-void axxia_initialize_queue(struct circular_queue_t *queue)
-{
-       int i;
-
-       queue->valid_items = 0;
-       queue->first = 0;
-       queue->last = 0;
-
-       for (i = 0; i < MAX_ITEMS; i++)
-               queue->data[i] = NULL;
-
-       return;
-}
-
-bool axxia_is_empty(struct circular_queue_t *queue)
-{
-
-       if (queue->valid_items == 0)
-               return true;
-       else
-               return false;
-}
-
-int axxia_put_item(struct circular_queue_t *queue, void *item_value)
-
-{
-       if (queue->valid_items >= MAX_ITEMS) {
-               pr_err("ERROR: queue is full\n");
-               return -EINVAL;
-       } else {
-               queue->valid_items++;
-               queue->data[queue->last] = item_value;
-               queue->last = (queue->last + 1) % MAX_ITEMS;
-       }
-       return 0;
-}
-
-
-int axxia_get_item(struct circular_queue_t *queue, void **item_value)
-{
-
-       if (axxia_is_empty(queue)) {
-               return -1;
-       } else {
-               *item_value = queue->data[queue->first];
-               queue->first = (queue->first + 1) % MAX_ITEMS;
-               queue->valid_items--;
-       }
-       return 0;
-
-}
diff --git a/arch/arm/mach-axxia/axxia_circular_queue.h 
b/arch/arm/mach-axxia/axxia_circular_queue.h
deleted file mode 100644
index 0fe88a0..0000000
--- a/arch/arm/mach-axxia/axxia_circular_queue.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * axxia_circular_queue.h
- *
- *  Created on: Sep 30, 2014
- *      Author: z8cpaul
- */
-
-#ifndef AXXIA_CIRCULAR_QUEUE_H_
-#define AXXIA_CIRCULAR_QUEUE_H_
-
-#define MAX_ITEMS    1020
-
-struct circular_queue_t
-
-{
-       int first;
-       int last;
-       int valid_items;
-       void *data[MAX_ITEMS];
-};
-
-void axxia_initialize_queue(struct circular_queue_t *queue);
-
-bool axxia_is_empty(struct circular_queue_t *queue);
-
-int axxia_put_item(struct circular_queue_t *queue, void *item_value);
-
-int axxia_get_item(struct circular_queue_t *queue, void **item_value);
-
-#endif
diff --git a/arch/arm/mach-axxia/hotplug.c b/arch/arm/mach-axxia/hotplug.c
index d44fbb3..9e82bdc 100644
--- a/arch/arm/mach-axxia/hotplug.c
+++ b/arch/arm/mach-axxia/hotplug.c
@@ -11,140 +11,40 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/smp.h>
-#include <linux/of_address.h>
-#include <linux/delay.h>
 
-#include <mach/axxia-gic.h>
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
 #include <asm/cp15.h>
 #include "lsi_power_management.h"
-#include "axxia_circular_queue.h"
-extern struct circular_queue_t axxia_circ_q;
+
 
 extern volatile int pen_release;
 
-static inline void pm_cpu_logical_shutdown(u32 cpu)
+static inline void cpu_enter_lowpower_a15(void)
 {
-       u32 val;
-
-       asm volatile(
-       "       mrc     p15, 1, %0, c9, c0, 2\n"
-       : "=&r" (val)
-       : "Ir" (0x1)
-       : "cc");
+       unsigned int v;
 
        asm volatile(
        "       mrc     p15, 0, %0, c1, c0, 0\n"
        "       bic     %0, %0, %1\n"
        "       mcr     p15, 0, %0, c1, c0, 0\n"
-       : "=&r" (val)
+       : "=&r" (v)
        : "Ir" (CR_C)
        : "cc");
 
-       /* Clear and invalidate all date from L1 data cache */
        flush_cache_all();
 
-       /* Switch the processor over to AMP mode out of SMP */
-       asm volatile(
-                       "       mrc     p15, 0, %0, c1, c0, 1\n"
-                       "       bic     %0, %0, %1\n"
-                       "       mcr     p15, 0, %0, c1, c0, 1\n"
-                       : "=&r" (val)
-                       : "Ir" (0x40)
-                       : "cc");
-
-       isb();
-       dsb();
-
-       wfi();
-
-}
-
-static inline void pm_L2_logical_shutdown(u32 cpu)
-{
-       u32 val;
-
        asm volatile(
-       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       /*
+       * Turn off coherency
+       */
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
        "       bic     %0, %0, %1\n"
-       "       mcr     p15, 0, %0, c1, c0, 0\n"
-       : "=&r" (val)
-       : "Ir" (CR_C)
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+       : "=&r" (v)
+       : "Ir" (0x40)
        : "cc");
 
-
-       asm volatile(
-                       /*
-                        * Disable L2 prefetch
-                        */
-                       "       mrc     p15, 1, %0, c15, c0, 3\n"
-                       "       orr     %0, %0, %1\n"
-                       "       mcr     p15, 1, %0, c15, c0, 3\n"
-                       : "=&r" (val)
-                       : "Ir" (0x400)
-                       : "cc");
-
-       isb();
-       dsb();
-
-       /* Clear and invalidate all L1 and L2 data cache */
-       flush_cache_all();
-
-
-       /* Turn the DBG Double Lock quiet */
-       asm volatile(
-                       /*
-                        * Turn Off the DBGOSDLR.DLK bit
-                        */
-                       "       mrc     p14, 0, %0, c1, c3, 4\n"
-                       "       orr     %0, %0, %1\n"
-                       "       mcr     p14, 0, %0, c1, c3, 4\n"
-                       : "=&r" (val)
-                       : "Ir" (0x1)
-                       : "cc");
-
-       /* Switch the processor over to AMP mode out of SMP */
-       asm volatile(
-                       "       mrc     p15, 0, %0, c1, c0, 1\n"
-                       "       bic     %0, %0, %1\n"
-                       "       mcr     p15, 0, %0, c1, c0, 1\n"
-                       : "=&r" (val)
-                       : "Ir" (0x40)
-                       : "cc");
-
-       isb();
-       dsb();
-
-       wfi();
-}
-
-#ifdef CONFIG_HOTPLUG_CPU_LOW_POWER
-static inline void cpu_enter_lowpower_a15(void)
-{
-       unsigned int v;
-
-       asm volatile(
-                       "       mrc     p15, 0, %0, c1, c0, 0\n"
-                       "       bic     %0, %0, %1\n"
-                       "       mcr     p15, 0, %0, c1, c0, 0\n"
-                       : "=&r" (v)
-                       : "Ir" (CR_C)
-                       : "cc");
-
-       flush_cache_all();
-
-       asm volatile(
-                       /*
-                        * Turn off coherency
-                        */
-                       "       mrc     p15, 0, %0, c1, c0, 1\n"
-                       "       bic     %0, %0, %1\n"
-                       "       mcr     p15, 0, %0, c1, c0, 1\n"
-                       : "=&r" (v)
-                       : "Ir" (0x40)
-                       : "cc");
-
        isb();
        dsb();
 }
@@ -154,65 +54,21 @@ static inline void cpu_leave_lowpower(void)
        unsigned int v;
 
        asm volatile(
-                       "mrc    p15, 0, %0, c1, c0, 0\n"
-                       "       orr     %0, %0, %1\n"
-                       "       mcr     p15, 0, %0, c1, c0, 0\n"
-                       "       mrc     p15, 0, %0, c1, c0, 1\n"
-                       "       orr     %0, %0, %2\n"
-                       "       mcr     p15, 0, %0, c1, c0, 1\n"
-                       : "=&r" (v)
-                       : "Ir" (CR_C), "Ir" (0x40)
-                       : "cc");
+               "mrc    p15, 0, %0, c1, c0, 0\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       "       mrc     p15, 0, %0, c1, c0, 1\n"
+       "       orr     %0, %0, %2\n"
+       "       mcr     p15, 0, %0, c1, c0, 1\n"
+         : "=&r" (v)
+         : "Ir" (CR_C), "Ir" (0x40)
+         : "cc");
 }
 
-static void __ref platform_do_lowpower(unsigned int cpu, int *spurious)
-{
-       int phys_cpu, cluster;
-
-       /*
-        * there is no power-control hardware on this platform, so all
-        * we can do is put the core into WFI; this is safe as the calling
-        * code will have already disabled interrupts
-        */
-       for (;;) {
-               wfi();
-
-       /*
-       * Convert the "cpu" variable to be compatible with the
-       * ARM MPIDR register format (CLUSTERID and CPUID):
-       *
-       * Bits:   |11 10 9 8|7 6 5 4 3 2|1 0
-       *         | CLUSTER | Reserved  |CPU
-       */
-       phys_cpu = cpu_logical_map(cpu);
-       cluster = (phys_cpu / 4) << 8;
-       phys_cpu = cluster + (phys_cpu % 4);
-
-       if (pen_release == phys_cpu) {
-       /*
-        * OK, proper wakeup, we're done
-        */
-               break;
-       }
-
-       /*
-        * Getting here, means that we have come out of WFI without
-        * having been woken up - this shouldn't happen
-        *
-        * Just note it happening - when we're woken, we can report
-        * its occurrence.
-        */
-       (*spurious)++;
-       }
-}
-#endif
 
 int axxia_platform_cpu_kill(unsigned int cpu)
 {
-
-#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
        pm_cpu_shutdown(cpu);
-#endif
        return 1;
 }
 
@@ -224,65 +80,33 @@ int axxia_platform_cpu_kill(unsigned int cpu)
 
 void axxia_platform_cpu_die(unsigned int cpu)
 {
-#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
-       bool last_cpu;
 
-       int timeout;
-       timeout = 30;
+       pm_data pm_request;
+       int rVal = 0;
+       bool lastCpu;
 
-       /* make sure no migrations are happening */
-       while (!axxia_is_empty(&axxia_circ_q)) {
+       pm_request.cpu = cpu;
+       pm_request.cluster = 0;
 
-               if (timeout-- == 0)
-                       break;
 
-               mdelay(1);
-       }
-
-       if (timeout == 0)
-               pr_err("ERROR: tried to shut down and Q was still full\n");
-
-       last_cpu = pm_cpu_last_of_cluster(cpu);
-       if (last_cpu)
-               pm_L2_logical_shutdown(cpu);
+       lastCpu = pm_cpu_last_of_cluster(cpu);
+       if (lastCpu)
+               rVal = pm_cpul2_logical_die(&pm_request);
        else
-               pm_cpu_logical_shutdown(cpu);
+               rVal = pm_cpu_logical_die(&pm_request);
+       if (rVal)
+               pr_err("CPU %d failed to die\n", cpu);
 
        for (;;)
                wfi();
 
-
-#else /* CPU low power mode */
-
-       int spurious = 0;
-
-       /*
-        * we're ready for shutdown now, so do it
-        */
-       cpu_enter_lowpower_a15();
-       pm_in_progress[cpu] = true;
-
-       platform_do_lowpower(cpu, &spurious);
-
-       /*
-        * bring this CPU back into the world of cache
-        * coherency, and then restore interrupts
-        */
-       cpu_leave_lowpower();
-
-       if (spurious)
-               pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
-#endif
-
 }
 
 int platform_cpu_disable(unsigned int cpu)
 {
-
        /*
         * we don't allow CPU 0 to be shutdown (it is still too special
         * e.g. clock tick interrupts)
         */
-
        return cpu == 0 ? -EPERM : 0;
 }
diff --git a/arch/arm/mach-axxia/include/mach/axxia-gic.h 
b/arch/arm/mach-axxia/include/mach/axxia-gic.h
index b9e5574..af7fbdf 100644
--- a/arch/arm/mach-axxia/include/mach/axxia-gic.h
+++ b/arch/arm/mach-axxia/include/mach/axxia-gic.h
@@ -12,6 +12,5 @@ void axxia_gic_raise_softirq(const struct cpumask *mask, 
unsigned int irq);
 void axxia_gic_secondary_init(void);
 int __init axxia_gic_of_init(struct device_node *node,
                             struct device_node *parent);
-void axxia_gic_dump_mask(char *tmp, const struct cpumask *mask);
-void axxia_gic_kill_cpu(u32 rcpu);
+
 #endif
diff --git a/arch/arm/mach-axxia/lsi_power_management.c 
b/arch/arm/mach-axxia/lsi_power_management.c
index 3ae4afc..9723054 100644
--- a/arch/arm/mach-axxia/lsi_power_management.c
+++ b/arch/arm/mach-axxia/lsi_power_management.c
@@ -18,14 +18,12 @@
 #include <linux/errno.h>
 #include <linux/smp.h>
 #include <linux/delay.h>
-#include <linux/of_address.h>
 #include <asm/exception.h>
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
 #include <asm/cp15.h>
 
 #include "axxia.h"
-#include <mach/axxia-gic.h>
 #include "lsi_power_management.h"
 
 #undef DEBUG_CPU_PM
@@ -35,7 +33,6 @@
 
 #define PM_WAIT_TIME (10000)
 #define MAX_CLUSTER  (4)
-#define IPI_IRQ_MASK (0xFFFF)
 
 #define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
 
@@ -53,60 +50,25 @@ PORESET_CLUSTER1,
 PORESET_CLUSTER2,
 PORESET_CLUSTER3 };
 
-static const u32 ipi_register[MAX_IPI] = {
-               NCP_SYSCON_MASK_IPI0,
-               NCP_SYSCON_MASK_IPI1,
-               NCP_SYSCON_MASK_IPI2,
-               NCP_SYSCON_MASK_IPI3,
-               NCP_SYSCON_MASK_IPI4,
-               NCP_SYSCON_MASK_IPI5,
-               NCP_SYSCON_MASK_IPI6,
-               NCP_SYSCON_MASK_IPI7,
-               NCP_SYSCON_MASK_IPI8,
-               NCP_SYSCON_MASK_IPI9,
-               NCP_SYSCON_MASK_IPI10,
-               NCP_SYSCON_MASK_IPI11,
-               NCP_SYSCON_MASK_IPI12,
-               NCP_SYSCON_MASK_IPI13,
-               NCP_SYSCON_MASK_IPI14,
-               NCP_SYSCON_MASK_IPI15,
-               NCP_SYSCON_MASK_IPI16,
-               NCP_SYSCON_MASK_IPI17,
-               NCP_SYSCON_MASK_IPI18
-};
-
-enum pm_error_code {
-       PM_ERR_DICKENS_IOREMAP = 200,
-       PM_ERR_DICKENS_SNOOP_DOMAIN,
-       PM_ERR_FAILED_PWR_DWN_RAM,
-       PM_ERR_FAILED_STAGE_1,
-       PM_ERR_ACK1_FAIL,
-       PM_ERR_RAM_ACK_FAIL,
-       PM_ERR_FAIL_L2ACK,
-       PM_ERR_FAIL_L2HSRAM
-};
-static void __iomem *syscon;
-
-u32 pm_cpu_powered_down;
-
+static u32 pm_cpu_powered_down;
 
 /*======================= LOCAL FUNCTIONS ==============================*/
-static void pm_set_bits_syscon_register(u32 reg, u32 data);
-static void pm_clear_bits_syscon_register(u32 reg, u32 data);
-static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit);
-static bool pm_wait_for_bit_clear_with_timeout(u32 reg,
+static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 
data);
+static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 
data);
+static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 
bit);
+static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg,
                u32 bit);
 static void pm_dickens_logical_shutdown(u32 cluster);
 static int pm_dickens_logical_powerup(u32 cluster);
 static int pm_cpu_physical_isolation_and_power_down(int cpu);
 static void pm_L2_isolation_and_power_down(int cluster);
+static void __pm_cpu_shutdown(void *data);
 static int pm_cpu_physical_connection_and_power_up(int cpu);
 static int pm_L2_physical_connection_and_power_up(u32 cluster);
 static int pm_L2_logical_powerup(u32 cluster, u32 cpu);
 
 static bool pm_first_cpu_of_cluster(u32 cpu)
 {
-#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
        u32 count = 0;
        switch (cpu) {
        case (0):
@@ -165,14 +127,11 @@ static bool pm_first_cpu_of_cluster(u32 cpu)
                                __LINE__);
                break;
        }
-#endif
        return false;
 }
 
 bool pm_cpu_last_of_cluster(u32 cpu)
 {
-#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
-
        u32 count = 0;
        switch (cpu) {
        case (0):
@@ -231,11 +190,10 @@ bool pm_cpu_last_of_cluster(u32 cpu)
                                __LINE__);
                break;
        }
-#endif
        return false;
 }
 
-static void pm_set_bits_syscon_register(u32 reg, u32 data)
+static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 
data)
 {
        u32 tmp;
 
@@ -244,7 +202,7 @@ static void pm_set_bits_syscon_register(u32 reg, u32 data)
        writel(tmp, syscon + reg);
 }
 
-static void pm_clear_bits_syscon_register(u32 reg, u32 data)
+static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 
data)
 {
        u32 tmp;
 
@@ -253,7 +211,7 @@ static void pm_clear_bits_syscon_register(u32 reg, u32 data)
        writel(tmp, syscon + reg);
 }
 
-static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit)
+static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 
bit)
 {
 
        u32 tmp = 0;
@@ -272,7 +230,8 @@ static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit)
        return true;
 }
 
-static bool pm_wait_for_bit_clear_with_timeout(u32 reg, u32 bit)
+static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg,
+               u32 bit)
 {
        u32 cnt = 0;
        u32 tmp = 0;
@@ -366,7 +325,7 @@ static int pm_dickens_logical_powerup(u32 cluster)
        void __iomem *dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
        if (dickens == NULL) {
                pr_err("Failed to map dickens registers\n");
-               return -PM_ERR_DICKENS_IOREMAP;
+               return -EINVAL;
        }
 
        bit = (0x01 << cluster_to_node[cluster]);
@@ -388,7 +347,7 @@ static int pm_dickens_logical_powerup(u32 cluster)
 
                if (0 == retries) {
                        pr_err("DICKENS: Failed on the SNOOP DONAIN\n");
-                       rval = -PM_ERR_DICKENS_SNOOP_DOMAIN;
+                       rval = -EINVAL;
                        goto dickens_power_up;
                }
 
@@ -408,8 +367,8 @@ static int pm_dickens_logical_powerup(u32 cluster)
        } while ((0 < --retries) && !CHECK_BIT(status, bit_pos));
 
        if (0 == retries) {
-               pr_err("DICKENS: Failed on the SNOOP DONAIN CTL SET\n");
-               rval = -PM_ERR_DICKENS_SNOOP_DOMAIN;
+               pr_err("DICKENS: Failed on the SNOOP DONAIN\n");
+               rval = -EINVAL;
                goto dickens_power_up;
        }
 
@@ -419,155 +378,230 @@ dickens_power_up:
        return rval;
 }
 
-static int pm_enable_ipi_interrupts(u32 cpu)
-{
-
-       u32 i;
-       u32 cpumask = 1 << cpu;
-       u32 powered_on_cpu = (~(pm_cpu_powered_down) & IPI_IRQ_MASK);
-
-       /* Enable the CPU IPI */
-       pm_set_bits_syscon_register(ipi_register[cpu], powered_on_cpu);
-
-       for (i = 0; i < MAX_IPI; i++) {
-               if ((1 << i) & powered_on_cpu)
-                       pm_set_bits_syscon_register(ipi_register[i], cpumask);
-       }
-
-       return 0;
-
-}
-
-void pm_init_syscon(void)
-{
-       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
-}
-
-bool pm_cpu_active(u32 cpu)
-{
-
-       bool success = false;
-       u32 reg;
-
-       reg = readl(syscon + NCP_SYSCON_PWR_QACTIVE);
-       if (reg & (1 << cpu))
-               success = true;
-
-       return success;
-
-}
-
-void pm_cpu_shutdown(u32 cpu)
+static void __pm_cpu_shutdown(void *data)
 {
 
+       pm_data *pm_request = (pm_data *)data;
+       void __iomem *syscon;
        bool success;
-       u32 reqcpu = cpu_logical_map(cpu);
-       u32 cluster = reqcpu / CORES_PER_CLUSTER;
-       u32 cluster_mask = (0x01 << cluster);
+       u32 cluster_mask = (0x01 << pm_request->cluster);
        bool last_cpu;
        int rval = 0;
 
-       /* Check to see if the cpu is powered up */
-       if (pm_cpu_powered_down & (1 << reqcpu)) {
-               pr_err("CPU %d is already powered off - %s:%d\n", cpu, 
__FILE__, __LINE__);
-               return;
-       }
-
-       pm_init_syscon();
-
        /*
         * Is this the last cpu of a cluster then turn off the L2 cache
         * along with the CPU.
         */
-       last_cpu = pm_cpu_last_of_cluster(reqcpu);
+       last_cpu = pm_cpu_last_of_cluster(pm_request->cpu);
        if (last_cpu) {
 
-               /* Disable all the interrupts to the cluster gic */
-               pm_set_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, 
cluster_mask);
-
                /* Remove the cluster from the Dickens coherency domain */
-               pm_dickens_logical_shutdown(cluster);
+               pm_dickens_logical_shutdown(pm_request->cluster);
 
                /* Power down the cpu */
-               pm_cpu_physical_isolation_and_power_down(reqcpu);
+               pm_cpu_physical_isolation_and_power_down(pm_request->cpu);
+
+               syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+               if (WARN_ON(!syscon))
+                       return;
 
-               pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, 
cluster_mask);
-               success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_CACTIVE_CNT, cluster);
+#if 0
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_CSYSREQ_TS, cluster_mask);
+               success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_CACTIVE_TS, pm_request->cluster);
+               if (!success) {
+                       pr_err(
+                                       "Failed to keep other cluster TS going 
on cluster %d: %s-%d\n",
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
+               }
+
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_CSYSREQ_ATB, cluster_mask);
+               success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_CACTIVE_ATB, pm_request->cluster);
+               if (!success) {
+                       pr_err(
+                                       "Failed to keep other cluster ATB going 
on cluster %d: %s-%d\n",
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
+               }
+
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_CSYSREQ_APB, cluster_mask);
+               success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_CACTIVE_APB, pm_request->cluster);
+               if (!success) {
+                       pr_err(
+                                       "Failed to keep other cluster APB going 
on cluster %d: %s-%d\n",
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
+               }
+#endif
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_CSYSREQ_CNT, cluster_mask);
+               success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_CACTIVE_CNT, pm_request->cluster);
                if (!success) {
                        pr_err(
                                        "Failed to keep other cluster count 
going on cluster %d: %s-%d\n",
-                                       cluster, __FILE__, __LINE__);
-                       goto pm_shutdown_exit;
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
                }
 
                /* Turn off the ACE */
-               pm_set_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, 
cluster_mask);
+               pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, 
cluster_mask);
 
                /* Wait for ACE to complete power off */
-               success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NACEPWRDNACK, cluster);
+               success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_NACEPWRDNACK, pm_request->cluster);
                if (!success) {
                        pr_err("Failed to power off ACE on cluster %d: %s-%d\n",
-                                       cluster, __FILE__, __LINE__);
-                       goto pm_shutdown_exit;
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
                }
 
                /* Isolate the cluster */
-               pm_set_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, 
cluster_mask);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_ISOLATEL2MISC, cluster_mask);
 
                /* Wait for WFI L2 to go to standby */
-               success = 
pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_STANDBYWFIL2, cluster);
+               success = pm_test_for_bit_with_timeout(syscon, 
NCP_SYSCON_PWR_STANDBYWFIL2, pm_request->cluster);
                if (!success) {
                        pr_err("Failed to enter L2 WFI on cluster %d: %s-%d\n",
-                                       cluster, __FILE__, __LINE__);
-                       goto pm_shutdown_exit;
+                                       pm_request->cluster, __FILE__, 
__LINE__);
+                       iounmap(syscon);
+                       return;
                }
 
+               iounmap(syscon);
+
                /* Power off the L2 */
-               pm_L2_isolation_and_power_down(cluster);
+               pm_L2_isolation_and_power_down(pm_request->cluster);
                if (rval == 0) {
-                       pr_info("CPU %d is powered down with cluster: %d\n", 
reqcpu, cluster);
-                       pm_cpu_powered_down |= (1 << reqcpu);
+                       pr_info("CPU %d is powered down with cluster: %d\n", 
pm_request->cpu, pm_request->cluster);
+                       pm_cpu_powered_down |= (1 << pm_request->cpu);
                } else
-                       pr_err("CPU %d failed to power down\n", reqcpu);
+                       pr_err("CPU %d failed to power down\n", 
pm_request->cpu);
 
 
        } else {
 
-               rval = pm_cpu_physical_isolation_and_power_down(reqcpu);
+               rval = 
pm_cpu_physical_isolation_and_power_down(pm_request->cpu);
                if (rval == 0)
-                       pm_cpu_powered_down |= (1 << reqcpu);
+                       pm_cpu_powered_down |= (1 << pm_request->cpu);
                else
-                       pr_err("CPU %d failed to power down\n", reqcpu);
+                       pr_err("CPU %d failed to power down\n", 
pm_request->cpu);
        }
 
-pm_shutdown_exit:
-       iounmap(syscon);
        return;
 }
 
+
+int pm_cpu_logical_die(pm_data *pm_request)
+{
+       void __iomem *syscon;
+       bool success;
+
+       smp_call_function_single(pm_request->cpu, pm_cpu_logical_shutdown, 
(void *)pm_request, 1);
+
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
+       /* Wait for the cpu to enter wfi */
+       success = pm_test_for_bit_with_timeout(syscon, 
NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu);
+       if (!success) {
+               pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n",
+                               pm_request->cpu, __FILE__, __LINE__);
+               iounmap(syscon);
+               return -EINVAL;
+       }
+
+       iounmap(syscon);
+       return 0;
+}
+
+int pm_cpul2_logical_die(pm_data *pm_request)
+{
+       void __iomem *syscon;
+       bool success;
+
+       smp_call_function_single(pm_request->cpu, pm_L2_logical_shutdown, (void 
*)pm_request, 1);
+
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
+       /* Wait for the cpu to enter wfi */
+       success = pm_test_for_bit_with_timeout(syscon, 
NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu);
+       if (!success) {
+               pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n",
+                               pm_request->cpu, __FILE__, __LINE__);
+               iounmap(syscon);
+               return -EINVAL;
+       }
+
+       iounmap(syscon);
+       return 0;
+}
+
+void pm_cpu_shutdown(u32 cpu)
+{
+
+       pm_data pm_request;
+
+       u32 pcpu = cpu_logical_map(smp_processor_id());
+       u32 rcpu = cpumask_any_and(cpu_present_mask, cpu_online_mask);
+       u32 reqcpu = cpu_logical_map(cpu);
+
+       /* Check to see if the cpu is powered up */
+       if (pm_cpu_powered_down & (1 << cpu)) {
+               pr_err("CPU %d is already powered off - %s:%d\n", cpu, 
__FILE__, __LINE__);
+               return;
+       }
+       /*
+        * Is this the last cpu to be powered off, then don't
+        * allow the power to be shut off.
+        */
+       if (cpu == 0) {
+               pr_err("Cannot turn off cpu 0 - %s:%d\n", __FILE__, __LINE__);
+               return;
+       }
+
+       /*
+        * Is this process on the requested cpu to power down
+        * then send it to another cpu for processing
+        */
+       pm_request.cpu = cpu;
+       pm_request.cluster = reqcpu / CORES_PER_CLUSTER;
+
+       if (pcpu == cpu)
+               smp_call_function_single(rcpu, __pm_cpu_shutdown, (void 
*)&pm_request, 0);
+       else
+               __pm_cpu_shutdown(&pm_request);
+
+}
+
 int pm_cpu_powerup(u32 cpu)
 {
 
        bool first_cpu;
        int rval = 0;
+       void __iomem *syscon = NULL;
        u32 cpu_mask = (0x01 << cpu);
 
        u32 reqcpu = cpu_logical_map(cpu);
        u32 cluster = reqcpu / CORES_PER_CLUSTER;
-       u32 cluster_mask = (0x01 << cluster);
-       u32 timeout;
 
-       pm_init_syscon();
+       /* Hold the CPU in reset */
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
 
        /*
         * The key value has to be written before the CPU RST can be written.
         */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_set_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
 
-       /* Hold the CPU in reset */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_set_bits_syscon_register(NCP_SYSCON_HOLD_CPU, cpu_mask);
+       iounmap(syscon);
 
        /*
         * Is this the first cpu of a cluster to come back on?
@@ -576,16 +610,17 @@ int pm_cpu_powerup(u32 cpu)
        first_cpu = pm_first_cpu_of_cluster(cpu);
        if (first_cpu) {
 
+
                rval = pm_L2_logical_powerup(cluster, cpu);
                if (rval) {
                        pr_err("CPU: Failed the logical L2 power up\n");
-                       goto pm_power_up;
+                       return rval;
                }
-               pm_clear_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, 
cluster_mask);
                cluster_power_up[cluster] = true;
 
        }
 
+
        /*
         * Power up the CPU
         */
@@ -595,44 +630,32 @@ int pm_cpu_powerup(u32 cpu)
                goto pm_power_up;
        }
 
-       timeout = 30;
-
-       /* wait max 10 ms until cpuX is on */
-       while (!pm_cpu_active(cpu)) {
-
-               if (timeout-- == 0)
-                       break;
-
-               mdelay(1);
-       }
+       udelay(16);
 
-       if (timeout == 0) {
-               rval =  -ETIMEDOUT;
-               goto pm_power_up;
-       }
+       /* Clear the CPU from reset and let it go */
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
 
        /*
         * The key value must be written before the CPU RST can be written.
         */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, 
cpu_mask);
 
        /*
         * The key value must be written before HOLD CPU can be written.
         */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_CPU, cpu_mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_CPU, cpu_mask);
 
        /*
         * Clear the powered down mask
         */
        pm_cpu_powered_down &= ~(1 << cpu);
 
-       /* Enable the CPU IPI */
-       pm_enable_ipi_interrupts(cpu);
 
 pm_power_up:
-
        iounmap(syscon);
        return rval;
 }
@@ -642,8 +665,46 @@ unsigned long pm_get_powered_down_cpu(void)
        return pm_cpu_powered_down;
 }
 
+void pm_cpu_logical_shutdown(void *data)
+{
+       u32 val;
 
-inline void pm_cpu_logical_powerup(void)
+       asm volatile(
+       "       mrc     p15, 1, %0, c9, c0, 2\n"
+       : "=&r" (val)
+       : "Ir" (0x1)
+       : "cc");
+
+       asm volatile(
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       : "=&r" (val)
+       : "Ir" (CR_C)
+       : "cc");
+
+       /* Clear and invalidate all date from L1 data cache */
+       flush_cache_all();
+
+       /* Switch the processor over to AMP mode out of SMP */
+       asm volatile(
+                       "       mrc     p15, 0, %0, c1, c0, 1\n"
+                       "       bic     %0, %0, %1\n"
+                       "       mcr     p15, 0, %0, c1, c0, 1\n"
+                       : "=&r" (val)
+                       : "Ir" (0x40)
+                       : "cc");
+
+       isb();
+       dsb();
+
+       wfi();
+
+       return;
+
+}
+
+void pm_cpu_logical_powerup(void)
 {
        unsigned int v;
 
@@ -671,104 +732,174 @@ inline void pm_cpu_logical_powerup(void)
 
 static int pm_cpu_physical_isolation_and_power_down(int cpu)
 {
-
+       void __iomem *syscon;
        int rval = 0;
 
        bool success;
        u32 mask = (0x01 << cpu);
 
-       /* Disable the CPU IPI */
-       pm_clear_bits_syscon_register(ipi_register[cpu], IPI_IRQ_MASK);
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
 
        /* Initiate power down of the CPU's HS Rams */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask);
 
        /* Wait until the RAM power down is complete */
-       success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, 
cpu);
+       success = pm_test_for_bit_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
        if (!success) {
-               rval = -PM_ERR_FAILED_PWR_DWN_RAM;
+               rval = -EINVAL;
                pr_err("CPU: Failed to power down CPU RAM\n");
                goto power_down_cleanup;
        }
 
        /* Activate the CPU's isolation clamps */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask);
 
        /* Initiate power down of the CPU logic */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, 
mask);
 
        udelay(10);
 
        /* Continue power down of the CPU logic */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, 
mask);
 
-       success = 
pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+       success = pm_test_for_bit_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
        if (!success) {
-               rval = -PM_ERR_FAILED_STAGE_1;
+               rval = -EINVAL;
                pr_err("CPU: Failed to power down stage 1 cpu\n");
                goto power_down_cleanup;
        }
 
 power_down_cleanup:
-
+       iounmap(syscon);
        return rval;
 }
 
 static int pm_cpu_physical_connection_and_power_up(int cpu)
 {
        int rval = 0;
-
+       void __iomem *syscon;
        bool success;
        u32 mask = (0x01 << cpu);
 
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
        /* Initiate power up of the CPU */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
 
        /* Wait until CPU logic power is compete */
-       success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+       success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
        if (!success) {
-               rval = -PM_ERR_ACK1_FAIL;
+               rval = -EINVAL;
                pr_err("CPU: Failed to get ACK from power down stage 1\n");
                goto power_up_cleanup;
        }
 
        /* Continue stage 2 power up of the CPU*/
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
 
-       udelay(20);
+       udelay(10);
 
        /* Initiate power up of HS Rams */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask);
 
        /* Wait until the RAM power up is complete */
-       success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
+       success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
        if (!success) {
-               rval = -PM_ERR_RAM_ACK_FAIL;
+               rval = -EINVAL;
                pr_err("CPU: Failed to get ACK of power power up\n");
                goto power_up_cleanup;
        }
 
        /* Release the CPU's isolation clamps */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask);
 
 power_up_cleanup:
-
+       iounmap(syscon);
 
        return rval;
 
 }
 /*========================================== L2 FUNCTIONS 
========================================*/
 
-static void pm_L2_isolation_and_power_down(int cluster)
+void pm_L2_logical_shutdown(void *data)
 {
+       u32 val;
+
+
+       asm volatile(
+       "       mrc     p15, 0, %0, c1, c0, 0\n"
+       "       bic     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c1, c0, 0\n"
+       : "=&r" (val)
+       : "Ir" (CR_C)
+       : "cc");
+
+
+       asm volatile(
+                       /*
+                        * Disable L2 prefetch
+                        */
+                       "       mrc     p15, 1, %0, c15, c0, 3\n"
+                       "       orr     %0, %0, %1\n"
+                       "       mcr     p15, 1, %0, c15, c0, 3\n"
+                       : "=&r" (val)
+                       : "Ir" (0x400)
+                       : "cc");
 
+       isb();
+       dsb();
+
+       /* Clear and invalidate all L1 and L2 data cache */
+       flush_cache_all();
+
+
+       /* Turn the DBG Double Lock quiet */
+       asm volatile(
+                       /*
+                        * Turn Off the DBGOSDLR.DLK bit
+                        */
+                       "       mrc     p14, 0, %0, c1, c3, 4\n"
+                       "       orr     %0, %0, %1\n"
+                       "       mcr     p14, 0, %0, c1, c3, 4\n"
+                       : "=&r" (val)
+                       : "Ir" (0x1)
+                       : "cc");
+
+       /* Switch the processor over to AMP mode out of SMP */
+       asm volatile(
+                       "       mrc     p15, 0, %0, c1, c0, 1\n"
+                       "       bic     %0, %0, %1\n"
+                       "       mcr     p15, 0, %0, c1, c0, 1\n"
+                       : "=&r" (val)
+                       : "Ir" (0x40)
+                       : "cc");
+
+       isb();
+       dsb();
+
+       wfi();
+       return;
+}
+
+static void pm_L2_isolation_and_power_down(int cluster)
+{
+       void __iomem *syscon;
        u32 mask = (0x1 << cluster);
 
+
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return;
+
        /* Enable the chip select for the cluster */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask);
 
        /* Disable the hsram */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, 
mask);
 
        switch (cluster) {
        case (0):
@@ -789,11 +920,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
                                NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 
 #endif
@@ -817,11 +948,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
                                NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -844,11 +975,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
                                NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -871,11 +1002,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
                                NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_clear_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -885,45 +1016,51 @@ static void pm_L2_isolation_and_power_down(int cluster)
        }
 
        /* Power down stage 2 */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, 
mask);
 
        /* Power down stage 1 */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, 
mask);
 
+
+       iounmap(syscon);
 }
 
 static int pm_L2_physical_connection_and_power_up(u32 cluster)
 {
-
+       void __iomem *syscon;
        bool success;
        u32 mask = (0x1 << cluster);
        int rval = 0;
 
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
        /* Power up stage 1 */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, 
mask);
 
        /* Wait for the stage 1 power up to complete */
-       success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster);
+       success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster);
        if (!success) {
                pr_err("CPU: Failed to ack the L2 Stage 1 Power up\n");
-               rval = -PM_ERR_FAIL_L2ACK;
+               rval = -EINVAL;
                goto power_up_l2_cleanup;
        }
 
        /* Power on stage 2 */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, 
mask);
 
        /* Set the chip select */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask);
 
-       /* Power up the snoop ram */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+       /* Power up the snoop ramram */
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
 
        /* Wait for the stage 1 power up to complete */
-       success = 
pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster);
+       success = pm_wait_for_bit_clear_with_timeout(syscon, 
NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster);
        if (!success) {
                pr_err("CPU: failed to get the HSRAM power up ACK\n");
-               rval = -PM_ERR_FAIL_L2HSRAM;
+               rval = -EINVAL;
                goto power_up_l2_cleanup;
        }
 
@@ -946,11 +1083,11 @@ static int pm_L2_physical_connection_and_power_up(u32 
cluster)
                                NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 
 #endif
@@ -974,11 +1111,11 @@ static int pm_L2_physical_connection_and_power_up(u32 
cluster)
                                NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -1001,11 +1138,11 @@ static int pm_L2_physical_connection_and_power_up(u32 
cluster)
                                NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -1028,11 +1165,11 @@ static int pm_L2_physical_connection_and_power_up(u32 
cluster)
                                NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, 
RAM_BANK3_MASK);
                udelay(20);
 #else
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
                udelay(20);
-               
pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, 
RAM_ALL_MASK);
+               pm_set_bits_syscon_register(syscon, 
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
                udelay(20);
 #endif
                break;
@@ -1042,78 +1179,86 @@ static int pm_L2_physical_connection_and_power_up(u32 
cluster)
        }
 
        /* Clear the chip select */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, 
mask);
 
        /* Release the isolation clamps */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATEL2MISC, 
mask);
 
        /* Turn the ACE bridge power on*/
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, mask);
 
 power_up_l2_cleanup:
+
+       iounmap(syscon);
+
        return rval;
 }
 
 static int pm_L2_logical_powerup(u32 cluster, u32 cpu)
 {
 
+       void __iomem *syscon;
        u32 mask = (0x1 << cluster);
+       u32 cpu_mask = (0x1 << cpu);
        int rval = 0;
 
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
        /* put the cluster into a cpu hold */
-       pm_set_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS,
                        cluster_to_poreset[cluster]);
 
-       /* Allow the L2 to be reset */
-       pm_clear_bits_syscon_register(NCP_SYSCON_LRSTDISABLE, mask);
+       /*
+        * Write the key so the reset cpu register can be written to.
+        */
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
 
        /* Hold the chip debug cluster */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_set_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask);
 
        /* Hold the L2 cluster */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_set_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask);
+
+       iounmap(syscon);
 
        /* Cluster physical power up */
        rval = pm_L2_physical_connection_and_power_up(cluster);
-       if (rval)
-               goto exit_pm_L2_logical_powerup;
-
 
        udelay(16);
 
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return -EINVAL;
+
        /* take the cluster out of a cpu hold */
-       pm_clear_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS,
                        cluster_to_poreset[cluster]);
 
        udelay(64);
 
        /* Enable the system counter */
-       pm_set_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_CNT, mask);
 
        /* Release the L2 cluster */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask);
 
        /* Release the chip debug cluster */
-       pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
-       pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
+       pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask);
+
 
-       /* Power up the dickens */
        rval = pm_dickens_logical_powerup(cluster);
-       if (rval)
-               goto exit_pm_L2_logical_powerup;
 
        /* start L2 */
-       pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACINACTM, mask);
-
-       /* Disable the L2 reset */
-       pm_set_bits_syscon_register(NCP_SYSCON_LRSTDISABLE, mask);
+       pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACINACTM, mask);
 
-       udelay(64);
-
-exit_pm_L2_logical_powerup:
+       iounmap(syscon);
 
        return rval;
 
@@ -1125,6 +1270,12 @@ void pm_debug_read_pwr_registers(void)
 {
        u32 reg;
 
+       void __iomem *syscon;
+
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return;
+
        reg = readl(syscon + 0x1400);
        pr_err("NCP_SYSCON_PWR_CLKEN: 0x%x\n", reg);
        reg = readl(syscon + NCP_SYSCON_PWR_ACINACTM);
@@ -1293,6 +1444,7 @@ void pm_debug_read_pwr_registers(void)
 #endif
 
 
+       iounmap(syscon);
 }
 
 
@@ -1300,7 +1452,11 @@ void pm_dump_L2_registers(void)
 {
        u32 reg;
 
+       void __iomem *syscon;
 
+       syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+       if (WARN_ON(!syscon))
+               return;
        reg = readl(syscon + 0x1580);
        pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2: 0x%x\n", reg);
        reg = readl(syscon + 0x1584);
@@ -1326,7 +1482,8 @@ void pm_dump_L2_registers(void)
        reg = readl(syscon + 0x15ac);
        pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0: 0x%x\n", reg);
 
-
+       iounmap(syscon);
+}
 
 
 void pm_dump_dickens(void)
@@ -1357,6 +1514,8 @@ void pm_dump_dickens(void)
        pr_err("DKN_MN_DVM_DOMAIN_CTL: 0x%x\n", status);
 
 
+       iounmap(dickens);
+
 
 }
 
diff --git a/arch/arm/mach-axxia/lsi_power_management.h 
b/arch/arm/mach-axxia/lsi_power_management.h
index 4de6bd7..4cb6d1f 100644
--- a/arch/arm/mach-axxia/lsi_power_management.h
+++ b/arch/arm/mach-axxia/lsi_power_management.h
@@ -155,7 +155,6 @@
 
 #define MAX_NUM_CLUSTERS    (4)
 #define CORES_PER_CLUSTER   (4)
-#define MAX_IPI                                (19)
 
 typedef struct {
        u32 cpu;
@@ -167,19 +166,18 @@ void pm_cpu_shutdown(u32 cpu);
 int pm_cpu_powerup(u32 cpu);
 void pm_debug_read_pwr_registers(void);
 void pm_dump_L2_registers(void);
+void pm_cpu_logical_shutdown(void *data);
 int pm_cpu_logical_die(pm_data *pm_request);
 int pm_cpul2_logical_die(pm_data *pm_request);
 unsigned long pm_get_powered_down_cpu(void);
 bool pm_cpu_last_of_cluster(u32 cpu);
+void pm_L2_logical_shutdown(void *data);
 void pm_dump_dickens(void);
 void pm_init_cpu(u32 cpu);
 void pm_cpu_logical_powerup(void);
-bool pm_cpu_active(u32 cpu);
-void pm_init_syscon(void);
 
 extern bool pm_in_progress[];
 extern bool cluster_power_up[];
-extern u32 pm_cpu_powered_down;
 
 
 #endif /* LSI_POWER_MANAGEMENT_H_ */
diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c
index aad676d..d453cb9 100644
--- a/arch/arm/mach-axxia/platsmp.c
+++ b/arch/arm/mach-axxia/platsmp.c
@@ -27,8 +27,6 @@
 #include <mach/axxia-gic.h>
 
 extern void axxia_secondary_startup(void);
-extern void axxia_cpu_power_management_gic_control(u32 cpu, bool enable);
-extern void axxia_dist_power_management_gic_control(bool enable);
 
 #define SYSCON_PHYS_ADDR 0x002010030000ULL
 
@@ -91,11 +89,10 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
 
 void __cpuinit axxia_secondary_init(unsigned int cpu)
 {
-       int phys_cpu;
-       int phys_cluster;
+       int phys_cpu, cluster;
 
        phys_cpu = cpu_logical_map(cpu);
-       phys_cluster = phys_cpu / 4;
+       cluster = (phys_cpu / 4) << 8;
 
        /*
         * Only execute this when powering up a cpu for hotplug.
@@ -106,20 +103,10 @@ void __cpuinit axxia_secondary_init(unsigned int cpu)
 
                axxia_gic_secondary_init();
        } else {
-
-#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
-               pm_cpu_logical_powerup();
-               mdelay(16);
-#endif
-
                axxia_gic_secondary_init();
-
-#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
                pm_cpu_logical_powerup();
-               if (cluster_power_up[phys_cluster])
-                       cluster_power_up[phys_cluster] = false;
-               pm_in_progress[phys_cpu] = false;
-#endif
+               pm_in_progress[cpu] = false;
+               cluster_power_up[cluster] = false;
        }
 
        /*
@@ -155,13 +142,14 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, 
struct task_struct *idle)
        powered_down_cpu = pm_get_powered_down_cpu();
 
        if (powered_down_cpu & (1 << phys_cpu)) {
-               pm_in_progress[phys_cpu] = true;
+               pm_in_progress[cpu] = true;
 
                rVal = pm_cpu_powerup(phys_cpu);
                if (rVal) {
                        _raw_spin_unlock(&boot_lock);
                        return rVal;
                }
+
        }
 
        /*
@@ -182,6 +170,7 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, struct 
task_struct *idle)
         * Bits:   |11 10 9 8|7 6 5 4 3 2|1 0
         *         | CLUSTER | Reserved  |CPU
         */
+       phys_cpu = cpu_logical_map(cpu);
        cluster = (phys_cpu / 4) << 8;
        phys_cpu = cluster + (phys_cpu % 4);
 
@@ -191,12 +180,10 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, 
struct task_struct *idle)
        /* Send a wakeup IPI to get the idled cpu out of WFI state */
        arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
-
        /* Wait for so long, then give up if nothing happens ... */
        timeout = jiffies + (1 * HZ);
        while (time_before(jiffies, timeout)) {
                smp_rmb();
-
                if (pen_release == -1)
                        break;
 
@@ -304,7 +291,6 @@ static void __init axxia_smp_prepare_cpus(unsigned int 
max_cpus)
        }
 
        iounmap(syscon);
-
 }
 
 struct smp_operations axxia_smp_ops __initdata = {
-- 
1.7.9.5

-- 
_______________________________________________
linux-yocto mailing list
linux-yocto@yoctoproject.org
https://lists.yoctoproject.org/listinfo/linux-yocto

Reply via email to