alloc_pages_node return value should be tested before applying
page_address.

Cc: Anju T Sudhakar <a...@linux.vnet.ibm.com>
Cc: Madhavan Srinivasan <ma...@linux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/perf/imc-pmu.c | 29 ++++++++++++++++++-----------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index dea243185ea4..cb50a9e1fd2d 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size)
 {
        int nid, rc = 0, core_id = (cpu / threads_per_core);
        struct imc_mem_info *mem_info;
+       struct page *page;
 
        /*
         * alloc_pages_node() will allocate memory for core in the
@@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size)
        mem_info->id = core_id;
 
        /* We need only vbase for core counters */
-       mem_info->vbase = page_address(alloc_pages_node(nid,
-                                         GFP_KERNEL | __GFP_ZERO | 
__GFP_THISNODE |
-                                         __GFP_NOWARN, get_order(size)));
-       if (!mem_info->vbase)
+       page = alloc_pages_node(nid,
+                               GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                               __GFP_NOWARN, get_order(size));
+       if (!page)
                return -ENOMEM;
+       mem_info->vbase = page_address(page);
 
        /* Init the mutex */
        core_imc_refc[core_id].id = core_id;
@@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
        int nid = cpu_to_node(cpu_id);
 
        if (!local_mem) {
+               struct page *page;
                /*
                 * This case could happen only once at start, since we dont
                 * free the memory in cpu offline path.
                 */
-               local_mem = page_address(alloc_pages_node(nid,
+               page = alloc_pages_node(nid,
                                  GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
-                                 __GFP_NOWARN, get_order(size)));
-               if (!local_mem)
+                                 __GFP_NOWARN, get_order(size));
+               if (!page)
                        return -ENOMEM;
+               local_mem = page_address(page);
 
                per_cpu(thread_imc_mem, cpu_id) = local_mem;
        }
@@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
        int core_id = (cpu_id / threads_per_core);
 
        if (!local_mem) {
-               local_mem = page_address(alloc_pages_node(phys_id,
-                                       GFP_KERNEL | __GFP_ZERO | 
__GFP_THISNODE |
-                                       __GFP_NOWARN, get_order(size)));
-               if (!local_mem)
+               struct page *page;
+
+               page = alloc_pages_node(phys_id,
+                               GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                               __GFP_NOWARN, get_order(size));
+               if (!page)
                        return -ENOMEM;
+               local_mem = page_address(page);
                per_cpu(trace_imc_mem, cpu_id) = local_mem;
 
                /* Initialise the counters for trace mode */
-- 
2.20.1

Reply via email to