On 14/12/18 2:41 PM, Anju T Sudhakar wrote:
Patch detects trace-imc events, does memory initilizations for each online
cpu, and registers cpuhotplug call-backs.

Reviewed-by: Madhavan Srinivasan <ma...@linux.vnet.ibm.com>

Signed-off-by: Anju T Sudhakar <a...@linux.vnet.ibm.com>
---
  arch/powerpc/perf/imc-pmu.c               | 91 +++++++++++++++++++++++
  arch/powerpc/platforms/powernv/opal-imc.c |  3 +
  include/linux/cpuhotplug.h                |  1 +
  3 files changed, 95 insertions(+)

diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 5ca80545a849..1f09265c8fb0 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -43,6 +43,10 @@ static DEFINE_PER_CPU(u64 *, thread_imc_mem);
  static struct imc_pmu *thread_imc_pmu;
  static int thread_imc_mem_size;

+/* Trace IMC data structures */
+static DEFINE_PER_CPU(u64 *, trace_imc_mem);
+static int trace_imc_mem_size;
+
  static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
  {
        return container_of(event->pmu, struct imc_pmu, pmu);
@@ -1068,6 +1072,54 @@ static void thread_imc_event_del(struct perf_event 
*event, int flags)
        imc_event_update(event);
  }

+/*
+ * Allocate a page of memory for each cpu, and load LDBAR with 0.
+ */
+static int trace_imc_mem_alloc(int cpu_id, int size)
+{
+       u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
+       int phys_id = cpu_to_node(cpu_id), rc = 0;
+
+       if (!local_mem) {
+               local_mem = page_address(alloc_pages_node(phys_id,
+                                       GFP_KERNEL | __GFP_ZERO | 
__GFP_THISNODE |
+                                       __GFP_NOWARN, get_order(size)));
+               if (!local_mem)
+                       return -ENOMEM;
+               per_cpu(trace_imc_mem, cpu_id) = local_mem;
+
+               /* Initialise the counters for trace mode */
+               rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void 
*)local_mem),
+                                           get_hard_smp_processor_id(cpu_id));
+               if (rc) {
+                       pr_info("IMC:opal init failed for trace imc\n");
+                       return rc;
+               }
+       }
+
+       mtspr(SPRN_LDBAR, 0);
+       return 0;
+}
+
+static int ppc_trace_imc_cpu_online(unsigned int cpu)
+{
+       return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
+}
+
+static int ppc_trace_imc_cpu_offline(unsigned int cpu)
+{
+       mtspr(SPRN_LDBAR, 0);
+       return 0;
+}
+
+static int trace_imc_cpu_init(void)
+{
+       return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+                         "perf/powerpc/imc_trace:online",
+                         ppc_trace_imc_cpu_online,
+                         ppc_trace_imc_cpu_offline);
+}
+
  /* update_pmu_ops : Populate the appropriate operations for "pmu" */
  static int update_pmu_ops(struct imc_pmu *pmu)
  {
@@ -1189,6 +1241,17 @@ static void cleanup_all_thread_imc_memory(void)
        }
  }

+static void cleanup_all_trace_imc_memory(void)
+{
+       int i, order = get_order(trace_imc_mem_size);
+
+       for_each_online_cpu(i) {
+               if (per_cpu(trace_imc_mem, i))
+                       free_pages((u64)per_cpu(trace_imc_mem, i), order);
+
+       }
+}
+
  /* Function to free the attr_groups which are dynamically allocated */
  static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
  {
@@ -1230,6 +1293,11 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu 
*pmu_ptr)
                cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
                cleanup_all_thread_imc_memory();
        }
+
+       if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
+               cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
+               cleanup_all_trace_imc_memory();
+       }
  }

  /*
@@ -1312,6 +1380,21 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct 
device_node *parent,

                thread_imc_pmu = pmu_ptr;
                break;
+       case IMC_DOMAIN_TRACE:
+               /* Update the pmu name */
+               pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
+               if (!pmu_ptr->pmu.name)
+                       return -ENOMEM;
+
+               trace_imc_mem_size = pmu_ptr->counter_mem_size;
+               for_each_online_cpu(cpu) {
+                       res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
+                       if (res) {
+                               cleanup_all_trace_imc_memory();
+                               goto err;
+                       }
+               }
+               break;
        default:
                return -EINVAL;
        }
@@ -1384,6 +1467,14 @@ int init_imc_pmu(struct device_node *parent, struct 
imc_pmu *pmu_ptr, int pmu_id
                        goto err_free_mem;
                }

+               break;
+       case IMC_DOMAIN_TRACE:
+               ret = trace_imc_cpu_init();
+               if (ret) {
+                       cleanup_all_trace_imc_memory();
+                       goto err_free_mem;
+               }
+
                break;
        default:
                return  -EINVAL;        /* Unknown domain */
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c 
b/arch/powerpc/platforms/powernv/opal-imc.c
index 58a07948c76e..dedc9ae22662 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -284,6 +284,9 @@ static int opal_imc_counters_probe(struct platform_device 
*pdev)
                case IMC_TYPE_THREAD:
                        domain = IMC_DOMAIN_THREAD;
                        break;
+               case IMC_TYPE_TRACE:
+                       domain = IMC_DOMAIN_TRACE;
+                       break;
                default:
                        pr_warn("IMC Unknown Device type \n");
                        domain = -1;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e0cd2baa8380..c471f2de878b 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -167,6 +167,7 @@ enum cpuhp_state {
        CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
        CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
        CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+       CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
        CPUHP_AP_WATCHDOG_ONLINE,
        CPUHP_AP_WORKQUEUE_ONLINE,
        CPUHP_AP_RCUTREE_ONLINE,

Reply via email to