Re: [PATCH] cxl: fix NULL dereference in cxl_context_init() on PowerVM guests

2016-07-27 Thread Ian Munsie
Whoops!

Acked-by: Ian Munsie 

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH] cxl: fix NULL dereference in cxl_context_init() on PowerVM guests

2016-07-27 Thread Andrew Donnellan
Commit f67a6722d650 ("cxl: Workaround PE=0 hardware limitation in Mellanox
CX4") added a "min_pe" field to struct cxl_service_layer_ops, to allow us
to work around a Mellanox CX-4 hardware limitation.

When allocating the PE number in cxl_context_init(), we read from
ctx->afu->adapter->native->sl_ops->min_pe to get the minimum PE number.
Unsurprisingly, in a PowerVM guest ctx->afu->adapter->native is NULL, and
guests don't have a cxl_service_layer_ops struct anywhere.

Move min_pe from struct cxl_service_layer_ops to struct cxl so it's
accessible in both native and PowerVM environments. For the Mellanox CX-4,
set the min_pe value in set_sl_ops().

Fixes: f67a6722d650 ("cxl: Workaround PE=0 hardware limitation in Mellanox CX4")
Reported-by: Frederic Barrat 
Signed-off-by: Andrew Donnellan 
---
 drivers/misc/cxl/context.c | 3 +--
 drivers/misc/cxl/cxl.h | 2 +-
 drivers/misc/cxl/pci.c | 3 ++-
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index bdee9a0..c466ee2 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -90,8 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu 
*afu, bool master,
 */
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
-   i = idr_alloc(&ctx->afu->contexts_idr, ctx,
- ctx->afu->adapter->native->sl_ops->min_pe,
+   i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
  ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 19b132f..76f8b94 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -549,7 +549,6 @@ struct cxl_service_layer_ops {
u64 (*timebase_read)(struct cxl *adapter);
int capi_mode;
bool needs_reset_before_disable;
-   int min_pe;
 };
 
 struct cxl_native {
@@ -591,6 +590,7 @@ struct cxl {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
+   int min_pe;
u64 ps_size;
u16 psl_rev;
u16 base_image;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3c8d0d2..bbf5fc9 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1552,14 +1552,15 @@ static const struct cxl_service_layer_ops xsl_ops = {
.write_timebase_ctrl = write_timebase_ctrl_xsl,
.timebase_read = timebase_read_xsl,
.capi_mode = OPAL_PHB_CAPI_MODE_DMA,
-   .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */
 };
 
 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
 {
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+   /* Mellanox CX-4 */
dev_info(&adapter->dev, "Device uses an XSL\n");
adapter->native->sl_ops = &xsl_ops;
+   adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
} else {
dev_info(&adapter->dev, "Device uses a PSL\n");
adapter->native->sl_ops = &psl_ops;
-- 
Andrew Donnellan  OzLabs, ADL Canberra
andrew.donnel...@au1.ibm.com  IBM Australia Limited

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3] powernv/pci: Add PHB register dump debugfs handle

2016-07-27 Thread Russell Currey
On EEH events the kernel will print a dump of relevant registers.
If EEH is unavailable (i.e. CONFIG_EEH is disabled, a new platform
doesn't have EEH support, etc) this information isn't readily available.

Add a new debugfs handler to trigger a PHB register dump, so that this
information can be made available on demand.

Signed-off-by: Russell Currey 
---
V3 changes:
- use s64 instead of int for ret
- check val != 1ULL instead of 1
- return -ENODEV instead of -EFAULT
- check hose and phb in the same statement
- whitespace changes for DEFINE_SIMPLE_ATTRIBUTE
- rename handler to "dump_diag_regs"
- more debugfs handlers seemed to use _ instead of -
V2 changes:
- use a simple attribute instead of full fops thanks to mpe
- miscellanous fixes thanks to Gavin
- rename from "regdump" to "dump_regs"
---
 arch/powerpc/platforms/powernv/pci-ioda.c | 39 ++-
 1 file changed, 38 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
b/arch/powerpc/platforms/powernv/pci-ioda.c
index 891fc4a..2808fda 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3018,6 +3018,38 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
}
 }
 
+#ifdef CONFIG_DEBUG_FS
+static int pnv_pci_diag_data_set(void *data, u64 val)
+{
+   struct pci_controller *hose;
+   struct pnv_phb *phb;
+   s64 ret;
+
+   if (val != 1ULL)
+   return -EINVAL;
+
+   hose = (struct pci_controller *)data;
+   if (!hose || !hose->private_data)
+   return -ENODEV;
+
+   phb = hose->private_data;
+
+   /* Retrieve the diag data from firmware */
+   ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
+   if (ret != OPAL_SUCCESS)
+   return -EIO;
+
+   /* Print the diag data to the kernel log */
+   pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
+   return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
+   pnv_pci_diag_data_set, "%llu\n");
+
+#endif /* CONFIG_DEBUG_FS */
+
 static void pnv_pci_ioda_create_dbgfs(void)
 {
 #ifdef CONFIG_DEBUG_FS
@@ -3033,9 +3065,14 @@ static void pnv_pci_ioda_create_dbgfs(void)
 
sprintf(name, "PCI%04x", hose->global_number);
phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
-   if (!phb->dbgfs)
+   if (!phb->dbgfs) {
pr_warning("%s: Error on creating debugfs on PHB#%x\n",
__func__, hose->global_number);
+   continue;
+   }
+
+   debugfs_create_file("dump_diag_regs", 0200, phb->dbgfs, hose,
+   &pnv_pci_diag_data_fops);
}
 #endif /* CONFIG_DEBUG_FS */
 }
-- 
2.9.0

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier

2016-07-27 Thread Balbir Singh


On 28/07/16 00:18, Michael Ellerman wrote:
> Like we just did for hash, split the device tree scanning parts out and
> call them from mmu_early_init_devtree().
> 
> Signed-off-by: Michael Ellerman 
> ---
>  arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
>  arch/powerpc/mm/init_64.c| 4 +++-
>  arch/powerpc/mm/pgtable-radix.c  | 3 +--
>  3 files changed, 5 insertions(+), 3 deletions(-)
> 
> v3: Merged into this series.
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
> b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 358f1410dc0d..9ee00c2576d0 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -109,6 +109,7 @@ extern int mmu_io_psize;
>  /* MMU initialization */
>  void mmu_early_init_devtree(void);
>  void hash__early_init_devtree(void);
> +void radix__early_init_devtree(void);
>  extern void radix_init_native(void);
>  extern void hash__early_init_mmu(void);
>  extern void radix__early_init_mmu(void);
> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
> index d02c6c9a..e0ab33d20a10 100644
> --- a/arch/powerpc/mm/init_64.c
> +++ b/arch/powerpc/mm/init_64.c
> @@ -427,7 +427,9 @@ void __init mmu_early_init_devtree(void)
>   if (disable_radix)
>   cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
>  
> - if (!radix_enabled())
> + if (radix_enabled())
> + radix__early_init_devtree();
> + else
>   hash__early_init_devtree();
>  }
>  #endif /* CONFIG_PPC_STD_MMU_64 */
> diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
> index 003ff48a11b6..f34ccdbe0fbd 100644
> --- a/arch/powerpc/mm/pgtable-radix.c
> +++ b/arch/powerpc/mm/pgtable-radix.c
> @@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long 
> node,
>   return 1;
>  }
>  
> -static void __init radix_init_page_sizes(void)
> +void __init radix__early_init_devtree(void)
>  {
>   int rc;
>  
> @@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
>   __pte_frag_nr = H_PTE_FRAG_NR;
>   __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
>  
> - radix_init_page_sizes();
>   if (!firmware_has_feature(FW_FEATURE_LPAR)) {
>   radix_init_native();
>   lpcr = mfspr(SPRN_LPCR);
>

If I am reading this correctly, radix_init_page_sizes() has become
radix__early_init_devtree() where as hash__early_init_devtree() initializes
both segment and page sizes? I would still like to keep

mmu_early_init_devtree()
-> radix__early_init_devtree()
-> radix__init_page_sizes()


Balbir Singh.
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree()

2016-07-27 Thread Balbir Singh


On 28/07/16 00:17, Michael Ellerman wrote:
> Move the handling of the disable_radix command line argument into the
> newly created mmu_early_init_devtree().
> 
> It's an MMU option so it's preferable to have it in an mm related file,
> and it also means platforms that don't support radix don't have to carry
> the code.
> 
> Signed-off-by: Michael Ellerman 
> ---

Should patch 1 and 2 be squashed together? Ideally nothing should ever bisect 
at patch 1

Anyway,
Acked-by: Balbir Singh 
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 30/30] selftests/powerpc: Fix a build issue

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

Fixes the following build failure -

cp_abort.c:90:3: error: ‘for’ loop initial declarations are only
allowed in C99 or C11 mode
   for (int i = 0; i < NUM_LOOPS; i++) {
   ^
cp_abort.c:90:3: note: use option -std=c99, -std=gnu99, -std=c11 or
-std=gnu11 to compile your code
cp_abort.c:97:3: error: ‘for’ loop initial declarations are only
allowed in C99 or C11 mode
   for (int i = 0; i < NUM_LOOPS; i++) {

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/context_switch/cp_abort.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/powerpc/context_switch/cp_abort.c 
b/tools/testing/selftests/powerpc/context_switch/cp_abort.c
index 5a5b55a..1ce7dce 100644
--- a/tools/testing/selftests/powerpc/context_switch/cp_abort.c
+++ b/tools/testing/selftests/powerpc/context_switch/cp_abort.c
@@ -67,7 +67,7 @@ int test_cp_abort(void)
/* 128 bytes for a full cache line */
char buf[128] __cacheline_aligned;
cpu_set_t cpuset;
-   int fd1[2], fd2[2], pid;
+   int fd1[2], fd2[2], pid, i;
char c;
 
/* only run this test on a P9 or later */
@@ -87,14 +87,14 @@ int test_cp_abort(void)
FAIL_IF(pid < 0);
 
if (!pid) {
-   for (int i = 0; i < NUM_LOOPS; i++) {
+   for (i = 0; i < NUM_LOOPS; i++) {
FAIL_IF((write(fd1[WRITE_FD], &c, 1)) != 1);
FAIL_IF((read(fd2[READ_FD], &c, 1)) != 1);
/* A paste succeeds if CR0 EQ bit is set */
FAIL_IF(paste(buf) & 0x2000);
}
} else {
-   for (int i = 0; i < NUM_LOOPS; i++) {
+   for (i = 0; i < NUM_LOOPS; i++) {
FAIL_IF((read(fd1[READ_FD], &c, 1)) != 1);
copy(buf);
FAIL_IF((write(fd2[WRITE_FD], &c, 1) != 1));
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 29/30] selftests/powerpc: Add .gitignore file for ptrace executables

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds a .gitignore file for all the executables in
the ptrace test directory thus making invisible with git status
query.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/.gitignore | 11 +++
 1 file changed, 11 insertions(+)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/.gitignore

diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore 
b/tools/testing/selftests/powerpc/ptrace/.gitignore
new file mode 100644
index 000..bdf3566
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -0,0 +1,11 @@
+ptrace-ebb
+ptrace-gpr
+ptrace-tm-gpr
+ptrace-tm-spd-gpr
+ptrace-tar
+ptrace-tm-tar
+ptrace-tm-spd-tar
+ptrace-vsx
+ptrace-tm-vsx
+ptrace-tm-spd-vsx
+ptrace-tm-spr
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 28/30] selftests/powerpc: Add ptrace tests for TM SPR registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for TM SPR registers. This
also adds ptrace interface based helper functions related to TM
SPR registers access.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spr.c   | 186 +
 tools/testing/selftests/powerpc/ptrace/ptrace.h|  35 
 3 files changed, 223 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 797840a..f34670e 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,7 +1,8 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
 ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
-ptrace-tm-spd-vsx
+ptrace-tm-spd-vsx ptrace-tm-spr
 
+include ../../lib.mk
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
new file mode 100644
index 000..a4f361e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -0,0 +1,186 @@
+/*
+ * Ptrace test TM SPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+
+/* Tracee and tracer shared data */
+struct shared {
+   int flag;
+   struct tm_spr_regs regs;
+};
+unsigned long tfhar;
+
+int shm_id;
+volatile struct shared *cptr, *pptr;
+
+int shm_id1;
+volatile int *cptr1, *pptr1;
+
+#define TM_SCHED   0xde018c01
+#define TM_KVM_SCHED   0xe001ac01
+
+int validate_tm_spr(struct tm_spr_regs *regs)
+{
+   if (regs->tm_tfhar != tfhar )
+   return TEST_FAIL;
+
+   if ((regs->tm_texasr != TM_SCHED) && (regs->tm_texasr != TM_KVM_SCHED))
+   return TEST_FAIL;
+
+   if ((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0))
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+void tm_spr(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (struct shared *)shmat(shm_id, NULL, 0);
+   cptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+trans:
+   cptr1[0] = 0;
+   asm __volatile__(
+   "1: ;"
+   /* TM failover handler should follow TBEGIN */
+   "mflr 31;"
+   "bl 4f;"/* $ = TFHAR - 12 */
+   "4: ;"
+   "mflr %[tfhar];"
+   "mtlr 31;"
+
+   TBEGIN
+   "beq 2f;"
+
+   TSUSPEND
+   "li 8, 1;"
+   "sth 8, 0(%[cptr1]);"
+   TRESUME
+   "b .;"
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [tfhar] "=r" (tfhar), [res] "=r" (result),
+   [texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
+   : [sprn_texasr] "i"  (SPRN_TEXASR)
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r8", "r9", "r10", "r11", "r31"
+   );
+
+   /* There are 2 32bit instructions before tbegin. */
+   tfhar += 12; 
+
+   if (result) {
+   if (!cptr->flag)
+   goto trans;
+
+   ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs);
+   shmdt((void *)cptr);
+   shmdt((void *)cptr1);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   shmdt((void *)cptr1);
+   exit(1);
+}
+
+int trace_tm_spr(pid_t child)
+{
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_tfhar,
+   pptr->regs.tm_texasr, pptr->regs.tm_tfiar);
+
+   pptr->flag = 1;
+   ret = stop_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+int ptrace_tm_spr(void)
+{
+   pid_t pid;
+   int ret, status;
+
+   SKIP_IF(!((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_HTM));
+   shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT);
+   shm_id1 = s

[PATCH v13 27/30] selftests/powerpc: Add ptrace tests for VSX, VMX registers in suspended TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for VSX, VMX registers
inside suspended TM context.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c   | 222 +
 2 files changed, 224 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 1b07649..797840a 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,6 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
+ptrace-tm-spd-vsx
 
 
 all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
new file mode 100644
index 000..dbb0cc1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -0,0 +1,222 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+volatile int *cptr, *pptr;
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+   loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_new(void)
+{
+   loadvsx(fp_load_new, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+   loadvsx(fp_load_ckpt, 0);
+}
+
+__attribute__((used)) void wait_parent(void)
+{
+   cptr[2] = 1;
+   while (!cptr[1]);
+}
+
+void tm_spd_vsx(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[2] = 0;
+   asm __volatile__(
+   "bl load_vsx_ckpt;"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "bl load_vsx_new;"
+   TSUSPEND
+   "bl load_vsx;"
+   "bl wait_parent;"
+   TRESUME
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+   [sprn_texasr] "i"  (SPRN_TEXASR)
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r8", "r9", "r10", "r11"
+   );
+
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+   shmdt((void *)cptr);
+
+   storevsx(fp_store, 0);
+   ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   exit(1);
+}
+
+int trace_tm_spd_vsx(pid_t child)
+{
+   unsigned long vsx[VSX_MAX];
+   unsigned long vmx[VMX_MAX + 2][2];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load_ckpt);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx_ckpt(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load_ckpt);
+   if (ret)
+   return TEST_FAIL;
+
+   memset(vsx, 0, sizeof(vsx));
+   memset(vmx, 0, sizeof(vmx));
+
+   load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+   ret = write_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = write_vmx_ckpt(child, vmx);

[PATCH v13 26/30] selftests/powerpc: Add ptrace tests for VSX, VMX registers in TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for VSX, VMX registers
inside TM context. This also adds ptrace interface based helper
functions related to chckpointed VSX, VMX registers access.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-vsx.c   | 209 +
 2 files changed, 211 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index e3d9ceb..1b07649 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,6 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx
+
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
new file mode 100644
index 000..d63f45d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -0,0 +1,209 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+volatile unsigned long *cptr, *pptr;
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+   loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+   loadvsx(fp_load_ckpt, 0);
+}
+
+void tm_vsx(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[1] = 0;
+   asm __volatile__(
+   "bl load_vsx_ckpt;"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "bl load_vsx;"
+   TSUSPEND
+   "li 7, 1;"
+   "stw 7, 0(%[cptr1]);"
+   TRESUME
+   "b .;"
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+   [sprn_texasr] "i"  (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r7", "r8", "r9", "r10", "r11"
+   );
+
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+
+   shmdt((void *)cptr);
+   storevsx(fp_store, 0);
+   ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   exit(1);
+}
+
+int trace_tm_vsx(pid_t child)
+{
+   unsigned long vsx[VSX_MAX];
+   unsigned long vmx[VMX_MAX + 2][2];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load_ckpt);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx_ckpt(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load_ckpt);
+   if (ret)
+   return TEST_FAIL;
+
+   memset(vsx, 0, sizeof(vsx));
+   memset(vmx, 0, sizeof(vmx));
+
+   load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+   ret = write_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = write_vmx_ckpt(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   pptr[0] = 1;
+   ret = stop_trace(child);
+

[PATCH v13 25/30] selftests/powerpc: Add ptrace tests for VSX, VMX registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for VSX, VMX registers.
This also adds ptrace interface based helper functions related
to VSX, VMX registers access. This also adds some assembly
helper functions related to VSX and VMX registers.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   2 +-
 .../testing/selftests/powerpc/ptrace/ptrace-vsx.c  | 143 +++
 .../testing/selftests/powerpc/ptrace/ptrace-vsx.h  | 121 ++
 tools/testing/selftests/powerpc/ptrace/ptrace.S| 265 +
 tools/testing/selftests/powerpc/ptrace/ptrace.h| 119 +
 5 files changed, 649 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 2916759..e3d9ceb 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,5 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
new file mode 100644
index 000..be771e9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
@@ -0,0 +1,143 @@
+/*
+ * Ptrace test for VMX/VSX registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile int *cptr, *pptr;
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+
+void vsx(void)
+{
+   int ret;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+   loadvsx(fp_load, 0);
+   cptr[1] = 1;
+
+   while (!cptr[0]);
+   shmdt((void *) cptr);
+
+   storevsx(fp_store, 0);
+   ret = compare_vsx_vmx(fp_store, fp_load_new);
+   if (ret)
+   exit(1);
+   exit(0);
+}
+
+int trace_vsx(pid_t child)
+{
+   unsigned long vsx[VSX_MAX];
+   unsigned long vmx[VMX_MAX + 2][2];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   memset(vsx, 0, sizeof(vsx));
+   memset(vmx, 0, sizeof(vmx));
+   load_vsx_vmx(fp_load_new, vsx, vmx);
+
+   ret = write_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = write_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = stop_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+int ptrace_vsx(void)
+{
+   pid_t pid;
+   int ret, status, i;
+
+   shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+
+   for (i = 0; i < VEC_MAX; i++)
+   fp_load[i] = i + rand();
+
+   for (i = 0; i < VEC_MAX; i++)
+   fp_load_new[i] = i + 2 * rand();
+
+   pid = fork();
+   if (pid < 0) {
+   perror("fork() failed");
+   return TEST_FAIL;
+   }
+
+   if (pid == 0)
+   vsx();
+
+   if (pid) {
+   pptr = (int *)shmat(shm_id, NULL, 0);
+   while (!pptr[1]);
+
+   ret = trace_vsx(pid);
+   if (ret) {
+   kill(pid, SIGTERM);
+   shmdt((void *)pptr);
+   shmctl(shm_id, IPC_RMID, NULL);
+   return TEST_FAIL;
+   }
+
+   pptr[0] = 1;
+   shmdt((void *)pptr);
+
+   ret = wait(&status);
+   shmctl(shm_id, IPC_RMID, NULL);
+   if (ret != pid) {
+   printf("Child's exit status not captured\n");
+   return TEST_FAIL;
+   }
+
+   if (WIFEXITED(status)) {
+   if (WEXITSTATUS(status))
+   return TEST_FAIL;
+   }
+   }
+   return TEST

[PATCH v13 24/30] selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR in suspended TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for TAR, PPR, DSCR
registers inside suspended TM context.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   2 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spd-tar.c   | 195 +
 2 files changed, 196 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 77d7a13..2916759 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,5 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
new file mode 100644
index 000..da4c477
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -0,0 +1,195 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+volatile int *cptr, *pptr;
+
+__attribute__((used)) void wait_parent(void)
+{
+   cptr[2] = 1;
+   while (!cptr[1]);
+}
+
+void tm_spd_tar(void)
+{
+   unsigned long result, texasr;
+   unsigned long regs[3];
+   int ret;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[2] = 0;
+   asm __volatile__(
+   "li 4, %[tar_1];"
+   "mtspr %[sprn_tar],  4;"/* TAR_1 */
+   "li 4, %[dscr_1];"
+   "mtspr %[sprn_dscr], 4;"/* DSCR_1 */
+   "or 31,31,31;"  /* PPR_1*/
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "li 4, %[tar_2];"
+   "mtspr %[sprn_tar],  4;"/* TAR_2 */
+   "li 4, %[dscr_2];"
+   "mtspr %[sprn_dscr], 4;"/* DSCR_2 */
+   "or 1,1,1;" /* PPR_2 */
+
+   TSUSPEND
+   "li 4, %[tar_3];"
+   "mtspr %[sprn_tar],  4;"/* TAR_3 */
+   "li 4, %[dscr_3];"
+   "mtspr %[sprn_dscr], 4;"/* DSCR_3 */
+   "or 6,6,6;" /* PPR_3 */
+   "bl wait_parent;"
+   TRESUME
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   /* Transaction abort handler */
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [val] "r" (cptr[1]), [sprn_dscr]"i"(SPRN_DSCR),
+   [sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR),
+   [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
+   [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
+   [tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
+   : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+   );
+
+   /* TM failed, analyse */
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+
+   regs[0] = mfspr(SPRN_TAR);
+   regs[1] = mfspr(SPRN_PPR);
+   regs[2] = mfspr(SPRN_DSCR);
+
+   shmdt(&cptr);
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   user_read, regs[0], regs[1], regs[2]);
+
+   ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt(&cptr);
+   exit(1);
+}
+
+int trace_tm_spd_tar(pid_t child)
+{
+   unsigned long regs[3];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tar_registers(child, regs);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   ptrace_read_running, regs[0], regs[1], regs[2]);
+
+   ret = validate_tar_registers(regs, TAR_3, PPR_3, DSCR_3);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tm_checkpointed_state(child, regs);
+   if (ret)
+   return TEST_FAIL;
+
+   prin

[PATCH v13 23/30] selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR in TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for TAR, PPR, DSCR
registers inside TM context. This also adds ptrace
interface based helper functions related to checkpointed
TAR, PPR, DSCR register access.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   2 +-
 .../selftests/powerpc/ptrace/ptrace-tm-tar.c   | 182 +
 2 files changed, 183 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index c794057..77d7a13 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,5 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar
+ptrace-tar ptrace-tm-tar
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
new file mode 100644
index 000..1fd006c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -0,0 +1,182 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+volatile unsigned long *cptr, *pptr;
+
+
+void tm_tar(void)
+{
+   unsigned long result, texasr;
+   unsigned long regs[3];
+   int ret;
+
+   cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[1] = 0;
+   asm __volatile__(
+   "li 4, %[tar_1];"
+   "mtspr %[sprn_tar],  4;"/* TAR_1 */
+   "li 4, %[dscr_1];"
+   "mtspr %[sprn_dscr], 4;"/* DSCR_1 */
+   "or 31,31,31;"  /* PPR_1*/
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "li 4, %[tar_2];"
+   "mtspr %[sprn_tar],  4;"/* TAR_2 */
+   "li 4, %[dscr_2];"
+   "mtspr %[sprn_dscr], 4;"/* DSCR_2 */
+   "or 1,1,1;" /* PPR_2 */
+   TSUSPEND
+   "li 0, 1;"
+   "stw 0, 0(%[cptr1]);"
+   TRESUME
+   "b .;"
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   /* Transaction abort handler */
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR),
+   [sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
+   [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
+   [dscr_2]"i"(DSCR_2), [cptr1] "r" (&cptr[1])
+   : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+   );
+
+   /* TM failed, analyse */
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+
+   regs[0] = mfspr(SPRN_TAR);
+   regs[1] = mfspr(SPRN_PPR);
+   regs[2] = mfspr(SPRN_DSCR);
+
+   shmdt(&cptr);
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   user_read, regs[0], regs[1], regs[2]);
+
+   ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt(&cptr);
+   exit(1);
+}
+
+int trace_tm_tar(pid_t child)
+{
+   unsigned long regs[3];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tar_registers(child, regs);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   ptrace_read_running, regs[0], regs[1], regs[2]);
+
+   ret = validate_tar_registers(regs, TAR_2, PPR_2, DSCR_2);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tm_checkpointed_state(child, regs);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   ptrace_read_ckpt, regs[0], regs[1], regs[2]);
+
+   ret = validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4);

[PATCH v13 22/30] selftests/powerpc: Add ptrace tests for TAR, PPR, DSCR registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for TAR, PPR, DSCR
registers. This also adds ptrace interface based helper
functions related to TAR, PPR, DSCR register access.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../testing/selftests/powerpc/ptrace/ptrace-tar.c  | 159 ++
 .../testing/selftests/powerpc/ptrace/ptrace-tar.h  |  50 ++
 tools/testing/selftests/powerpc/ptrace/ptrace.h| 181 +
 4 files changed, 392 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tar.h

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index d0f000c..c794057 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,4 +1,5 @@
-TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr
+TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+ptrace-tar
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
new file mode 100644
index 000..e7d5938
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
@@ -0,0 +1,159 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-tar.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile int *cptr;
+volatile int *pptr;
+
+void tar(void)
+{
+   unsigned long reg[3];
+   int ret;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+   printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+   user_write, TAR_1, PPR_1, DSCR_1);
+
+   mtspr(SPRN_TAR, TAR_1);
+   mtspr(SPRN_PPR, PPR_1);
+   mtspr(SPRN_DSCR, DSCR_1);
+
+   cptr[2] = 1;
+
+   /* Wait on parent */
+   while (!cptr[0]);
+
+   reg[0] = mfspr(SPRN_TAR);
+   reg[1] = mfspr(SPRN_PPR);
+   reg[2] = mfspr(SPRN_DSCR);
+
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   user_read, reg[0], reg[1], reg[2]);
+
+   /* Unblock the parent now */
+   cptr[1] = 1;
+   shmdt((int *)cptr);
+
+   ret = validate_tar_registers(reg, TAR_2, PPR_2, DSCR_2);
+   if (ret)
+   exit(1);
+   exit(0);
+}
+
+int trace_tar(pid_t child)
+{
+   unsigned long reg[3];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tar_registers(child, reg);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+   ptrace_read_running, reg[0], reg[1], reg[2]);
+
+   ret = validate_tar_registers(reg, TAR_1, PPR_1, DSCR_1);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = stop_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+int trace_tar_write(pid_t child)
+{
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = write_tar_registers(child, TAR_2, PPR_2, DSCR_2);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+   ptrace_write_running, TAR_2, PPR_2, DSCR_2);
+
+   ret = stop_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+int ptrace_tar(void)
+{
+   pid_t pid;
+   int ret, status;
+
+   shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+   pid = fork();
+   if (pid < 0) {
+   perror("fork() failed");
+   return TEST_FAIL;
+   }
+
+   if (pid == 0)
+   tar();
+
+   if (pid) {
+   pptr = (int *)shmat(shm_id, NULL, 0);
+   pptr[0] = 0;
+   pptr[1] = 0;
+
+   while (!pptr[2]);
+   ret = trace_tar(pid);
+   if (ret)
+   return ret;
+
+   ret = trace_tar_write(pid);
+   if (ret)
+   return ret;
+
+   /* Unblock the child now */
+   pptr[0] = 1;
+
+   /* Wait on child */
+   while (!pptr[1]);
+
+   shmdt((int *)pptr);
+
+   ret = wait(&status);
+   shmctl(shm_id, IPC_RMID, NULL);
+   if (ret != pid) {
+   printf("Child's exit status not captur

[PATCH v13 21/30] selftests/powerpc: Add ptrace tests for GPR/FPR registers in suspended TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for GPR/FPR registers
inside suspended TM context.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   2 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c   | 327 +
 2 files changed, 328 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 170683a..d0f000c 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr
+TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
new file mode 100644
index 000..b144e87
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -0,0 +1,327 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile int *cptr, *pptr;
+
+void store_gpr(unsigned long *addr);
+void store_fpr(float *addr);
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+float d = FPR_4;
+
+__attribute__((used)) void wait_parent(void)
+{
+   cptr[2] = 1;
+   while (!cptr[1]);
+}
+
+void tm_spd_gpr(void)
+{
+   unsigned long gpr_buf[18];
+   unsigned long result, texasr;
+   float fpr_buf[32];
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[2] = 0;
+   asm __volatile__(
+
+   "li 14, %[gpr_1];"
+   "li 15, %[gpr_1];"
+   "li 16, %[gpr_1];"
+   "li 17, %[gpr_1];"
+   "li 18, %[gpr_1];"
+   "li 19, %[gpr_1];"
+   "li 20, %[gpr_1];"
+   "li 21, %[gpr_1];"
+   "li 22, %[gpr_1];"
+   "li 23, %[gpr_1];"
+   "li 24, %[gpr_1];"
+   "li 25, %[gpr_1];"
+   "li 26, %[gpr_1];"
+   "li 27, %[gpr_1];"
+   "li 28, %[gpr_1];"
+   "li 29, %[gpr_1];"
+   "li 30, %[gpr_1];"
+   "li 31, %[gpr_1];"
+
+   "lfs 0, 0(%[flt_1]);"
+   "lfs 1, 0(%[flt_1]);"
+   "lfs 2, 0(%[flt_1]);"
+   "lfs 3, 0(%[flt_1]);"
+   "lfs 4, 0(%[flt_1]);"
+   "lfs 5, 0(%[flt_1]);"
+   "lfs 6, 0(%[flt_1]);"
+   "lfs 7, 0(%[flt_1]);"
+   "lfs 8, 0(%[flt_1]);"
+   "lfs 9, 0(%[flt_1]);"
+   "lfs 10, 0(%[flt_1]);"
+   "lfs 11, 0(%[flt_1]);"
+   "lfs 12, 0(%[flt_1]);"
+   "lfs 13, 0(%[flt_1]);"
+   "lfs 14, 0(%[flt_1]);"
+   "lfs 15, 0(%[flt_1]);"
+   "lfs 16, 0(%[flt_1]);"
+   "lfs 17, 0(%[flt_1]);"
+   "lfs 18, 0(%[flt_1]);"
+   "lfs 19, 0(%[flt_1]);"
+   "lfs 20, 0(%[flt_1]);"
+   "lfs 21, 0(%[flt_1]);"
+   "lfs 22, 0(%[flt_1]);"
+   "lfs 23, 0(%[flt_1]);"
+   "lfs 24, 0(%[flt_1]);"
+   "lfs 25, 0(%[flt_1]);"
+   "lfs 26, 0(%[flt_1]);"
+   "lfs 27, 0(%[flt_1]);"
+   "lfs 28, 0(%[flt_1]);"
+   "lfs 29, 0(%[flt_1]);"
+   "lfs 30, 0(%[flt_1]);"
+   "lfs 31, 0(%[flt_1]);"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "li 14, %[gpr_2];"
+   "li 15, %[gpr_2];"
+   "li 16, %[gpr_2];"
+   "li 17, %[gpr_2];"
+   "li 18, %[gpr_2];"
+   "li 19, %[gpr_2];"
+   "li 20, %[gpr_2];"
+   "li 21, %[gpr_2];"
+   "li 22, %[gpr_2];"
+   "li 23, %[gpr_2];"
+   "li 24, %[gpr_2];"
+   "li 25, %[gpr_2];"
+   "li 26, %[gpr_2];"
+   "li 27, %[gpr_2];"
+   "li 28, %[gpr_2];"
+   "li 29, %[gpr_2];"
+   "li 30, %[gpr_2];"
+   "li 31, %[gpr_2];"
+
+   TSUSPEND
+
+   "li 14, %[gpr_4];"
+   "li 15, %[gpr_4];"
+   "li 16, %[gpr_4];"
+   "li 17, %[gpr_4];"
+   "li 18, %[gpr_4];"
+   "li 19, %[gpr_4];"
+   "li 20, %[gpr_4];"

[PATCH v13 20/30] selftests/powerpc: Add ptrace tests for GPR/FPR registers in TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for GPR/FPR registers
inside TM context. This adds ptrace interface based helper
functions related to checkpointed GPR/FPR access.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-gpr.c   | 299 +
 2 files changed, 301 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 31e8e33..170683a 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,8 +1,9 @@
-TEST_PROGS := ptrace-ebb ptrace-gpr
+TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
 $(TEST_PROGS): ../harness.c ptrace.S ../utils.c ptrace.h
 ptrace-ebb: ../pmu/event.c ../pmu/lib.c ../pmu/ebb/ebb_handler.S 
../pmu/ebb/busy_loop.S
+
 clean:
rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
new file mode 100644
index 000..51eb71e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -0,0 +1,299 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile unsigned long *cptr, *pptr;
+
+void store_gpr(unsigned long *addr);
+void store_fpr(float *addr);
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void tm_gpr(void)
+{
+   unsigned long gpr_buf[18];
+   unsigned long result, texasr;
+   float fpr_buf[32];
+
+   printf("Starting the child\n");
+   cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[1] = 0;
+   asm __volatile__(
+
+   "li 14, %[gpr_1];"
+   "li 15, %[gpr_1];"
+   "li 16, %[gpr_1];"
+   "li 17, %[gpr_1];"
+   "li 18, %[gpr_1];"
+   "li 19, %[gpr_1];"
+   "li 20, %[gpr_1];"
+   "li 21, %[gpr_1];"
+   "li 22, %[gpr_1];"
+   "li 23, %[gpr_1];"
+   "li 24, %[gpr_1];"
+   "li 25, %[gpr_1];"
+   "li 26, %[gpr_1];"
+   "li 27, %[gpr_1];"
+   "li 28, %[gpr_1];"
+   "li 29, %[gpr_1];"
+   "li 30, %[gpr_1];"
+   "li 31, %[gpr_1];"
+
+   "lfs 0, 0(%[flt_1]);"
+   "lfs 1, 0(%[flt_1]);"
+   "lfs 2, 0(%[flt_1]);"
+   "lfs 3, 0(%[flt_1]);"
+   "lfs 4, 0(%[flt_1]);"
+   "lfs 5, 0(%[flt_1]);"
+   "lfs 6, 0(%[flt_1]);"
+   "lfs 7, 0(%[flt_1]);"
+   "lfs 8, 0(%[flt_1]);"
+   "lfs 9, 0(%[flt_1]);"
+   "lfs 10, 0(%[flt_1]);"
+   "lfs 11, 0(%[flt_1]);"
+   "lfs 12, 0(%[flt_1]);"
+   "lfs 13, 0(%[flt_1]);"
+   "lfs 14, 0(%[flt_1]);"
+   "lfs 15, 0(%[flt_1]);"
+   "lfs 16, 0(%[flt_1]);"
+   "lfs 17, 0(%[flt_1]);"
+   "lfs 18, 0(%[flt_1]);"
+   "lfs 19, 0(%[flt_1]);"
+   "lfs 20, 0(%[flt_1]);"
+   "lfs 21, 0(%[flt_1]);"
+   "lfs 22, 0(%[flt_1]);"
+   "lfs 23, 0(%[flt_1]);"
+   "lfs 24, 0(%[flt_1]);"
+   "lfs 25, 0(%[flt_1]);"
+   "lfs 26, 0(%[flt_1]);"
+   "lfs 27, 0(%[flt_1]);"
+   "lfs 28, 0(%[flt_1]);"
+   "lfs 29, 0(%[flt_1]);"
+   "lfs 30, 0(%[flt_1]);"
+   "lfs 31, 0(%[flt_1]);"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "li 14, %[gpr_2];"
+   "li 15, %[gpr_2];"
+   "li 16, %[gpr_2];"
+   "li 17, %[gpr_2];"
+   "li 18, %[gpr_2];"
+   "li 19, %[gpr_2];"
+   "li 20, %[gpr_2];"
+   "li 21, %[gpr_2];"
+   "li 22, %[gpr_2];"
+   "li 23, %[gpr_2];"
+   "li 24, %[gpr_2];"
+   "li 25, %[gpr_2];"
+   "li 26, %[gpr_2];"
+   "li 27, %[gpr_2];"
+   "li 28, %[gpr_2];"
+   "li 29, %[gpr_2];"
+   "li 30, %[gpr_2];"
+   "li 31, %[gpr_2];"
+
+
+   "lfs 0, 0(%[flt_2]);"
+   "lfs 1, 0(%[flt_2]);"
+   "lfs 2, 0(%[flt_2]);"
+

[PATCH v13 19/30] selftests/powerpc: Add ptrace tests for GPR/FPR registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for GPR/FPR registers.
This adds ptrace interface based helper functions related to
GPR/FPR access and some assembly helper functions related to
GPR/FPR registers.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../testing/selftests/powerpc/ptrace/ptrace-gpr.c  | 199 
 .../testing/selftests/powerpc/ptrace/ptrace-gpr.h  |  74 
 tools/testing/selftests/powerpc/ptrace/ptrace.S| 131 +
 tools/testing/selftests/powerpc/ptrace/ptrace.h| 208 +
 5 files changed, 614 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace.S

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index f6948f3..31e8e33 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,4 +1,5 @@
-TEST_PROGS := ptrace-ebb
+TEST_PROGS := ptrace-ebb ptrace-gpr
+
 all: $(TEST_PROGS)
 CFLAGS += -m64
 $(TEST_PROGS): ../harness.c ptrace.S ../utils.c ptrace.h
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
new file mode 100644
index 000..86bd6a4d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -0,0 +1,199 @@
+/*
+ * Ptrace test for GPR/FPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile int *cptr, *pptr;
+
+void store_gpr(unsigned long *addr);
+void store_fpr(float *addr);
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void gpr(void)
+{
+   unsigned long gpr_buf[18];
+   float fpr_buf[32];
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+   asm __volatile__(
+   "li 14, %[gpr_1];"
+   "li 15, %[gpr_1];"
+   "li 16, %[gpr_1];"
+   "li 17, %[gpr_1];"
+   "li 18, %[gpr_1];"
+   "li 19, %[gpr_1];"
+   "li 20, %[gpr_1];"
+   "li 21, %[gpr_1];"
+   "li 22, %[gpr_1];"
+   "li 23, %[gpr_1];"
+   "li 24, %[gpr_1];"
+   "li 25, %[gpr_1];"
+   "li 26, %[gpr_1];"
+   "li 27, %[gpr_1];"
+   "li 28, %[gpr_1];"
+   "li 29, %[gpr_1];"
+   "li 30, %[gpr_1];"
+   "li 31, %[gpr_1];"
+
+   "lfs 0, 0(%[flt_1]);"
+   "lfs 1, 0(%[flt_1]);"
+   "lfs 2, 0(%[flt_1]);"
+   "lfs 3, 0(%[flt_1]);"
+   "lfs 4, 0(%[flt_1]);"
+   "lfs 5, 0(%[flt_1]);"
+   "lfs 6, 0(%[flt_1]);"
+   "lfs 7, 0(%[flt_1]);"
+   "lfs 8, 0(%[flt_1]);"
+   "lfs 9, 0(%[flt_1]);"
+   "lfs 10, 0(%[flt_1]);"
+   "lfs 11, 0(%[flt_1]);"
+   "lfs 12, 0(%[flt_1]);"
+   "lfs 13, 0(%[flt_1]);"
+   "lfs 14, 0(%[flt_1]);"
+   "lfs 15, 0(%[flt_1]);"
+   "lfs 16, 0(%[flt_1]);"
+   "lfs 17, 0(%[flt_1]);"
+   "lfs 18, 0(%[flt_1]);"
+   "lfs 19, 0(%[flt_1]);"
+   "lfs 20, 0(%[flt_1]);"
+   "lfs 21, 0(%[flt_1]);"
+   "lfs 22, 0(%[flt_1]);"
+   "lfs 23, 0(%[flt_1]);"
+   "lfs 24, 0(%[flt_1]);"
+   "lfs 25, 0(%[flt_1]);"
+   "lfs 26, 0(%[flt_1]);"
+   "lfs 27, 0(%[flt_1]);"
+   "lfs 28, 0(%[flt_1]);"
+   "lfs 29, 0(%[flt_1]);"
+   "lfs 30, 0(%[flt_1]);"
+   "lfs 31, 0(%[flt_1]);"
+
+   :
+   : [gpr_1]"i"(GPR_1), [flt_1] "r" (&a)
+   : "memory", "r6", "r7", "r8", "r9", "r10",
+   "r11", "r12", "r13", "r14", "r15", "r16", "r17",
+   "r18", "r19", "r20", "r21", "r22", "r23", "r24",
+   "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+   );
+
+   cptr[1] = 1;
+
+   while (!cptr[0]);
+
+   shmdt((void *)cptr);
+   store_gpr(gpr_buf);
+   store_fpr(fpr_buf);
+
+   if (validate_gpr(gpr_buf, GPR_3))
+   exit(1);
+
+   if (validate_fpr_float(fpr_buf, c))
+   exit(1);
+
+   exit(0);
+}
+
+int trace_gpr(pid_t child)
+{
+   unsigned long gpr[18];
+   unsigned long fpr[32];
+   int

[PATCH v13 18/30] selftests/powerpc: Add ptrace tests for EBB

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for EBB/PMU specific
registers. This also adds some generic ptrace interface
based helper functions to be used by other patches later
on in the series.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/Makefile   |   3 +-
 tools/testing/selftests/powerpc/ptrace/Makefile|   7 +
 .../testing/selftests/powerpc/ptrace/ptrace-ebb.c  | 185 
 .../testing/selftests/powerpc/ptrace/ptrace-ebb.h  | 102 +
 tools/testing/selftests/powerpc/ptrace/ptrace.h| 240 +
 5 files changed, 536 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/Makefile
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-ebb.c
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-ebb.h
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace.h

diff --git a/tools/testing/selftests/powerpc/Makefile 
b/tools/testing/selftests/powerpc/Makefile
index 4ca83fe..b7cb9f7 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -24,7 +24,8 @@ SUB_DIRS = benchmarks \
   syscalls \
   tm   \
   vphn \
-  math
+  math \
+  ptrace
 
 endif
 
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
new file mode 100644
index 000..f6948f3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -0,0 +1,7 @@
+TEST_PROGS := ptrace-ebb
+all: $(TEST_PROGS)
+CFLAGS += -m64
+$(TEST_PROGS): ../harness.c ptrace.S ../utils.c ptrace.h
+ptrace-ebb: ../pmu/event.c ../pmu/lib.c ../pmu/ebb/ebb_handler.S 
../pmu/ebb/busy_loop.S
+clean:
+   rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-ebb.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-ebb.c
new file mode 100644
index 000..fa0b541
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-ebb.c
@@ -0,0 +1,185 @@
+/*
+ * Ptrace interface test for EBB
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "../pmu/ebb/ebb.h"
+#include "ptrace.h"
+#include "ptrace-ebb.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+volatile int *cptr, *pptr;
+
+void ebb(void)
+{
+   struct event event;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+   event_init_named(&event, 0x1001e, "cycles");
+   event.attr.config |= (1ull << 63);
+   event.attr.exclusive = 1;
+   event.attr.pinned = 1;
+   event.attr.exclude_kernel = 1;
+   event.attr.exclude_hv = 1;
+   event.attr.exclude_idle = 1;
+
+   if (event_open(&event)) {
+   perror("event_open() failed");
+   exit(1);
+   }
+
+   setup_ebb_handler(standard_ebb_callee);
+   mtspr(SPRN_BESCR, 0x8001ull);
+
+   mb();
+
+   if (ebb_event_enable(&event)) {
+   perror("ebb_event_handler() failed");
+   exit(1);
+   }
+
+   mtspr(SPRN_PMC1, pmc_sample_period(SAMPLE_PERIOD));
+   core_busy_loop();
+   cptr[0] = 1;
+   while (1);
+
+   exit(0);
+}
+
+int validate_ebb(struct ebb_regs *regs)
+{
+   #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+   struct opd *opd = (struct opd *) ebb_handler;
+   #endif
+
+   printf("EBBRR: %lx\n", regs->ebbrr);
+   #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+   printf("EBBHR: %lx; expected: %lx\n",
+   regs->ebbhr, (unsigned long)opd->entry);
+   #else
+   printf("EBBHR: %lx; expected: %lx\n",
+   regs->ebbhr, (unsigned long)ebb_handler);
+   #endif
+   printf("BESCR: %lx\n", regs->bescr);
+
+   #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+   if (regs->ebbhr != opd->entry)
+   return TEST_FAIL;
+   #else
+   if (regs->ebbhr != (unsigned long) ebb_handler)
+   return TEST_FAIL;
+   #endif
+
+   return TEST_PASS;
+}
+
+int validate_pmu(struct pmu_regs *regs)
+{
+   printf("SIAR:  %lx\n", regs->siar);
+   printf("SDAR:  %lx\n", regs->sdar);
+   printf("SIER:  %lx; expected: %lx\n",
+   regs->sier, (unsigned long)SIER_EXP);
+   printf("MMCR2: %lx; expected: %lx\n",
+   regs->mmcr2, (unsigned long)MMCR2_EXP);
+   printf("MMCR0: %lx; expected: %lx\n",
+   regs->mmcr0, (unsigned long)MMCR0_EXP);
+
+   /* Validate SIER */
+   if (regs->sier != SIER_EXP)
+   return TEST_FAIL;
+
+   /* Validate MMCR2 */
+   if (regs

[PATCH v13 17/30] selftests/powerpc: Use the new SPRN_DSCR_PRIV definiton

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

Now that the new DSCR register definitions (SPRN_DSCR_PRIV and
SPRN_DSCR) are defined outside this directory, use them instead.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/dscr/dscr.h | 10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h 
b/tools/testing/selftests/powerpc/dscr/dscr.h
index a36af1b..18ea223b 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -28,8 +28,6 @@
 
 #include "utils.h"
 
-#define SPRN_DSCR  0x11/* Privilege state SPR */
-#define SPRN_DSCR_USR  0x03/* Problem state SPR */
 #define THREADS100 /* Max threads */
 #define COUNT  100 /* Max iterations */
 #define DSCR_MAX   16  /* Max DSCR value */
@@ -48,14 +46,14 @@ inline unsigned long get_dscr(void)
 {
unsigned long ret;
 
-   asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR));
+   asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR_PRIV));
 
return ret;
 }
 
 inline void set_dscr(unsigned long val)
 {
-   asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+   asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_PRIV));
 }
 
 /* Problem state DSCR access */
@@ -63,14 +61,14 @@ inline unsigned long get_dscr_usr(void)
 {
unsigned long ret;
 
-   asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR_USR));
+   asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR));
 
return ret;
 }
 
 inline void set_dscr_usr(unsigned long val)
 {
-   asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_USR));
+   asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
 }
 
 /* Default DSCR access */
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 16/30] selftests/powerpc: Add more SPR numbers, TM & VMX instructions to 'reg.h'

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds SPR number for TAR, PPR, DSCR special
purpose registers. It also adds TM, VSX, VMX related
instructions which will then be used by patches later
in the series.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/reg.h | 42 ---
 1 file changed, 39 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/powerpc/reg.h 
b/tools/testing/selftests/powerpc/reg.h
index 65bfdee..5183349 100644
--- a/tools/testing/selftests/powerpc/reg.h
+++ b/tools/testing/selftests/powerpc/reg.h
@@ -18,6 +18,19 @@
 
 #define mb()   asm volatile("sync" : : : "memory");
 
+/* Vector Instructions */
+#define VSX_XX1(xs, ra, rb)(((xs) & 0x1f) << 21 | ((ra) << 16) |  \
+((rb) << 11) | (((xs) >> 5)))
+#define STXVD2X(xs, ra, rb).long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
+#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
+
+/* TM instructions */
+#define TBEGIN ".long 0x7C00051D;"
+#define TABORT ".long 0x7C00071D;"
+#define TEND   ".long 0x7C00055D;"
+#define TSUSPEND   ".long 0x7C0005DD;"
+#define TRESUME".long 0x7C2005DD;"
+
 #define SPRN_MMCR2 769
 #define SPRN_MMCRA 770
 #define SPRN_MMCR0 779
@@ -46,10 +59,33 @@
 #define SPRN_SDAR  781
 #define SPRN_SIER  768
 
-#define SPRN_TEXASR 0x82
+#define SPRN_TEXASR 0x82/* Transaction Exception and Status Register */
 #define SPRN_TFIAR  0x81/* Transaction Failure Inst Addr*/
 #define SPRN_TFHAR  0x80/* Transaction Failure Handler Addr */
-#define TEXASR_FS   0x0800
-#define SPRN_TAR0x32f
+#define SPRN_TAR0x32f  /* Target Address Register */
+
+#define SPRN_DSCR_PRIV 0x11/* Privilege State DSCR */
+#define SPRN_DSCR  0x03/* Data Stream Control Register */
+#define SPRN_PPR   896 /* Program Priority Register */
+
+/* TEXASR register bits */
+#define TEXASR_FC  0xFE00
+#define TEXASR_FP  0x0100
+#define TEXASR_DA  0x0080
+#define TEXASR_NO  0x0040
+#define TEXASR_FO  0x0020
+#define TEXASR_SIC 0x0010
+#define TEXASR_NTC 0x0008
+#define TEXASR_TC  0x0004
+#define TEXASR_TIC 0x0002
+#define TEXASR_IC  0x0001
+#define TEXASR_IFC 0x8000
+#define TEXASR_ABT 0x0001
+#define TEXASR_SPD 0x8000
+#define TEXASR_HV  0x2000
+#define TEXASR_PR  0x1000
+#define TEXASR_FS  0x0800
+#define TEXASR_TE  0x0400
+#define TEXASR_ROT 0x0200
 
 #endif /* _SELFTESTS_POWERPC_REG_H */
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 15/30] powerpc/ptrace: Enable support for Performance Monitor registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for Performance monitor registers related
ELF core note NT_PPC_PMU based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding one new register sets REGSET_PMU in powerpc
corresponding to the ELF core note sections added in this
regard. It also implements the get, set and active functions
for this new register sets added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/uapi/asm/elf.h |  3 +-
 arch/powerpc/kernel/ptrace.c| 75 +
 2 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/uapi/asm/elf.h 
b/arch/powerpc/include/uapi/asm/elf.h
index 8c4d71a..3a9e44c 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -94,7 +94,8 @@
 #define ELF_NVMX   34  /* includes all vector registers */
 #define ELF_NVSX   32  /* includes all VSX registers */
 #define ELF_NTMSPRREG  3   /* include tfhar, tfiar, texasr */
-#define ELF_NEBB   3   /* includes ebbrr, ebbhr, bescr */
+#define ELF_NEBB   3   /* includes ebbrr, ebbhr, bescr */
+#define ELF_NPMU   5   /* includes siar, sdar, sier, mmcr2, mmcr0 */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 5cbabdb..1d8998b 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1824,6 +1824,75 @@ static int ebb_set(struct task_struct *target,
 
return ret;
 }
+static int pmu_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   return regset->n;
+}
+
+static int pmu_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   /* Build tests */
+   BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+   BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+   BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+   BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.siar, 0,
+   5 * sizeof(unsigned long));
+}
+
+static int pmu_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret = 0;
+
+   /* Build tests */
+   BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+   BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+   BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+   BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.siar, 0,
+   sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.sdar, sizeof(unsigned long),
+   2 * sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.sier, 2 * sizeof(unsigned long),
+   3 * sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.mmcr2, 3 * sizeof(unsigned long),
+   4 * sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.mmcr0, 4 * sizeof(unsigned long),
+   5 * sizeof(unsigned long));
+   return ret;
+}
 #endif
 /*
  * These are our native regset flavors.
@@ -1857,6 +1926,7 @@ enum powerpc_regset {
 #ifdef CONFIG_PPC_BOOK3S_64
REGSET_TAR, /* TAR register */
REGSET_EBB, /* EBB registers */
+   REGSET_PMR, /* Performance Monitor Registers */
 #endif
 };
 
@@ -1957,6 +2027,11 @@ static const struct user_regset native_regsets[] = {
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .get = ebb_get, .set = ebb_set
},
+   [REGSET_PMR] = {
+   .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
+   .size = sizeof(u64), .alig

[PATCH v13 14/30] powerpc/ptrace: Enable support for EBB registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for EBB state registers related
ELF core note NT_PPC_EBB based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding one new register sets REGSET_EBB in powerpc
corresponding to the ELF core note sections added in this
regard. It also implements the get, set and active functions
for this new register sets added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/uapi/asm/elf.h |  1 +
 arch/powerpc/kernel/ptrace.c| 75 +
 2 files changed, 76 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/elf.h 
b/arch/powerpc/include/uapi/asm/elf.h
index e703c64..8c4d71a 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -94,6 +94,7 @@
 #define ELF_NVMX   34  /* includes all vector registers */
 #define ELF_NVSX   32  /* includes all VSX registers */
 #define ELF_NTMSPRREG  3   /* include tfhar, tfiar, texasr */
+#define ELF_NEBB   3   /* includes ebbrr, ebbhr, bescr */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c710060..5cbabdb 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1760,6 +1760,70 @@ static int tar_set(struct task_struct *target,
&target->thread.tar, 0, sizeof(u64));
return ret;
 }
+
+static int ebb_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   if (target->thread.used_ebb)
+   return regset->n;
+
+   return 0;
+}
+
+static int ebb_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   /* Build tests */
+   BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+   BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   if (!target->thread.used_ebb)
+   return -ENODATA;
+
+   return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
+}
+
+static int ebb_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret = 0;
+
+   /* Build tests */
+   BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+   BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+   if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+   return -ENODEV;
+
+   if (target->thread.used_ebb)
+   return -ENODATA;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.ebbrr, 0, sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.ebbhr, sizeof(unsigned long),
+   2 * sizeof(unsigned long));
+
+   if (!ret)
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.bescr,
+   2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
+
+   return ret;
+}
 #endif
 /*
  * These are our native regset flavors.
@@ -1792,6 +1856,7 @@ enum powerpc_regset {
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
REGSET_TAR, /* TAR register */
+   REGSET_EBB, /* EBB registers */
 #endif
 };
 
@@ -1887,6 +1952,11 @@ static const struct user_regset native_regsets[] = {
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
+   [REGSET_EBB] = {
+   .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+   .size = sizeof(u64), .align = sizeof(u64),
+   .active = ebb_active, .get = ebb_get, .set = ebb_set
+   },
 #endif
 };
 
@@ -2173,6 +2243,11 @@ static const struct user_regset compat_regsets[] = {
.size = sizeof(u64), .align = sizeof(u64),
.get = tar_get, .set = tar_set
},
+   [REGSET_EBB] = {
+   .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+   .size = sizeof(u64), .align = sizeof(u64),
+   .active = ebb_active, .get = ebb_get, .set = ebb_set
+   },
 #endif
 };
 
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 13/30] powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 117 +++
 1 file changed, 117 insertions(+)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 2ce5f86..c710060 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1689,6 +1689,78 @@ static int tm_dscr_set(struct task_struct *target,
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
+#ifdef CONFIG_PPC64
+static int ppr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.ppr, 0, sizeof(u64));
+   return ret;
+}
+
+static int ppr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.ppr, 0, sizeof(u64));
+   return ret;
+}
+
+static int dscr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.dscr, 0, sizeof(u64));
+   return ret;
+}
+static int dscr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.dscr, 0, sizeof(u64));
+   return ret;
+}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+static int tar_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tar, 0, sizeof(u64));
+   return ret;
+}
+static int tar_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tar, 0, sizeof(u64));
+   return ret;
+}
+#endif
 /*
  * These are our native regset flavors.
  */
@@ -1714,6 +1786,13 @@ enum powerpc_regset {
REGSET_TM_CPPR, /* TM checkpointed PPR register */
REGSET_TM_CDSCR,/* TM checkpointed DSCR register */
 #endif
+#ifdef CONFIG_PPC64
+   REGSET_PPR, /* PPR register */
+   REGSET_DSCR,/* DSCR register */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+   REGSET_TAR, /* TAR register */
+#endif
 };
 
 static const struct user_regset native_regsets[] = {
@@ -1790,6 +1869,25 @@ static const struct user_regset native_regsets[] = {
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
},
 #endif
+#ifdef CONFIG_PPC64
+   [REGSET_PPR] = {
+   .core_note_type = NT_PPC_PPR, .n = 1,
+   .size = sizeof(u64), .align = sizeof(u64),
+   .get = ppr_get, .set = ppr_set
+   },
+   [REGSET_DSCR] = {
+   .core_note_type = NT_PPC_DSCR, .n = 1,
+   .size = sizeof(u64), .align = sizeof(u64),
+   .get = dscr_get, .set = dscr_set
+   },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+   [REGSET_TAR] = {
+   .core_note_type = NT_PPC_TAR, .n = 1,
+   .size = sizeof(u64), .align = sizeof(u64),
+   .get = tar_get, .set = tar_set
+   },
+#endif
 };
 
 static const struct user_regset_view user_ppc_native_view = {
@@ -2057,6 +2155,25 @@ static const struct user_regset compat_regsets[] = {
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_s

[PATCH v13 12/30] powerpc/ptrace: Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for all three TM checkpointed SPR
states related ELF core note  NT_PPC_TM_CTAR, NT_PPC_TM_CPPR,
NT_PPC_TM_CDSCR based ptrace requests through PTRACE_GETREGSET,
PTRACE_SETREGSET calls. This is achieved through adding three
new register sets REGSET_TM_CTAR, REGSET_TM_CPPR and
REGSET_TM_CDSCR in powerpc corresponding to the ELF core note
sections added. It implements the get, set and active functions
for all these new register sets added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 178 +++
 1 file changed, 178 insertions(+)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 24a6296..2ce5f86 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1542,6 +1542,151 @@ static int tm_spr_set(struct task_struct *target,
 2 * sizeof(u64), 3 * sizeof(u64));
return ret;
 }
+
+static int tm_tar_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (MSR_TM_ACTIVE(target->thread.regs->msr))
+   return regset->n;
+
+   return 0;
+}
+
+static int tm_tar_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_tar, 0, sizeof(u64));
+   return ret;
+}
+
+static int tm_tar_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_tar, 0, sizeof(u64));
+   return ret;
+}
+
+static int tm_ppr_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (MSR_TM_ACTIVE(target->thread.regs->msr))
+   return regset->n;
+
+   return 0;
+}
+
+
+static int tm_ppr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_ppr, 0, sizeof(u64));
+   return ret;
+}
+
+static int tm_ppr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_ppr, 0, sizeof(u64));
+   return ret;
+}
+
+static int tm_dscr_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (MSR_TM_ACTIVE(target->thread.regs->msr))
+   return regset->n;
+
+   return 0;
+}
+
+static int tm_dscr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_dscr, 0, sizeof(u64));
+   return ret;
+}
+
+static int tm_dscr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, con

[PATCH v13 11/30] powerpc/ptrace: Enable support for TM SPR state

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for TM SPR state related ELF core
note NT_PPC_TM_SPR based ptrace requests through PTRACE_GETREGSET,
PTRACE_SETREGSET calls. This is achieved through adding a register
set REGSET_TM_SPR in powerpc corresponding to the ELF core note
section added. It implements the get, set and active functions for
this new register set added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/uapi/asm/elf.h |   1 +
 arch/powerpc/kernel/ptrace.c| 143 +++-
 2 files changed, 143 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/uapi/asm/elf.h 
b/arch/powerpc/include/uapi/asm/elf.h
index 1549172..e703c64 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -93,6 +93,7 @@
 #define ELF_NFPREG 33  /* includes fpscr */
 #define ELF_NVMX   34  /* includes all vector registers */
 #define ELF_NVSX   32  /* includes all VSX registers */
+#define ELF_NTMSPRREG  3   /* include tfhar, tfiar, texasr */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c8bf7e9..24a6296 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -66,6 +66,7 @@ struct pt_regs_offset {
 
 #define TVSO(f)(offsetof(struct thread_vr_state, f))
 #define TFSO(f)(offsetof(struct thread_fp_state, f))
+#define TSO(f) (offsetof(struct thread_struct, f))
 
 static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
@@ -1412,7 +1413,136 @@ static int tm_cvsx_set(struct task_struct *target,
 
return ret;
 }
-#endif
+
+/**
+ * tm_spr_active - get active number of registers in TM SPR
+ * @target:The target task.
+ * @regset:The user regset structure.
+ *
+ * This function checks the active number of available
+ * regisers in the transactional memory SPR category.
+ */
+static int tm_spr_active(struct task_struct *target,
+const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   return regset->n;
+}
+
+/**
+ * tm_spr_get - get the TM related SPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy from.
+ * @ubuf:  User buffer to copy into.
+ *
+ * This function gets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+static int tm_spr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   /* Build tests */
+   BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+   BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+   BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   /* Flush the states */
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+
+   /* TFHAR register */
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_tfhar, 0, sizeof(u64));
+
+   /* TEXASR register */
+   if (!ret)
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_texasr, sizeof(u64),
+   2 * sizeof(u64));
+
+   /* TFIAR register */
+   if (!ret)
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.tm_tfiar,
+   2 * sizeof(u64), 3 * sizeof(u64));
+   return ret;
+}
+
+/**
+ * tm_spr_set - set the TM related SPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy into.
+ * @ubuf:  User buffer to copy from.
+ *
+ * This function sets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+static int tm_spr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+   int r

[PATCH v13 10/30] powerpc/ptrace: Enable support for NT_PPC_CVSX

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for TM checkpointed VSX register
set ELF core note NT_PPC_CVSX based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding a register set REGSET_CVSX in powerpc
corresponding to the ELF core note section added. It
implements the get, set and active functions for this new
register set added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/uapi/asm/elf.h |   1 +
 arch/powerpc/kernel/ptrace.c| 129 
 2 files changed, 130 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/elf.h 
b/arch/powerpc/include/uapi/asm/elf.h
index ecb4e84..1549172 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -92,6 +92,7 @@
 #define ELF_NGREG  48  /* includes nip, msr, lr, etc. */
 #define ELF_NFPREG 33  /* includes fpscr */
 #define ELF_NVMX   34  /* includes all vector registers */
+#define ELF_NVSX   32  /* includes all VSX registers */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 836a4b4..c8bf7e9 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -65,6 +65,7 @@ struct pt_regs_offset {
 #define REG_OFFSET_END {.name = NULL, .offset = 0}
 
 #define TVSO(f)(offsetof(struct thread_vr_state, f))
+#define TFSO(f)(offsetof(struct thread_fp_state, f))
 
 static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
@@ -1294,6 +1295,123 @@ static int tm_cvmx_set(struct task_struct *target,
 
return ret;
 }
+
+/**
+ * tm_cvsx_active - get active number of registers in CVSX
+ * @target:The target task.
+ * @regset:The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed VSX category.
+ */
+static int tm_cvsx_active(struct task_struct *target,
+   const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return 0;
+
+   flush_vsx_to_thread(target);
+   return target->thread.used_vsr ? regset->n : 0;
+}
+
+/**
+ * tm_cvsx_get - get CVSX registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy from.
+ * @ubuf:  User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed VSX registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+static int tm_cvsx_get(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   void *kbuf, void __user *ubuf)
+{
+   u64 buf[32];
+   int ret, i;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   /* Flush the state */
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+   flush_vsx_to_thread(target);
+
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+
+   return ret;
+}
+
+/**
+ * tm_cvsx_set - set CFPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy into.
+ * @ubuf:  User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * VSX register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+static int tm_cvsx_set(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   const void *kbuf, const void __user *ubuf)
+{
+   u64 buf[32];
+   int ret, i;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+  

[PATCH v13 09/30] powerpc/ptrace: Enable support for NT_PPC_CVMX

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for TM checkpointed VMX register
set ELF core note NT_PPC_CVMX based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding a register set REGSET_CVMX in powerpc
corresponding to the ELF core note section added. It
implements the get, set and active functions for this new
register set added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/uapi/asm/elf.h |   1 +
 arch/powerpc/kernel/ptrace.c| 158 
 2 files changed, 159 insertions(+)

diff --git a/arch/powerpc/include/uapi/asm/elf.h 
b/arch/powerpc/include/uapi/asm/elf.h
index c2d21d1..ecb4e84 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -91,6 +91,7 @@
 
 #define ELF_NGREG  48  /* includes nip, msr, lr, etc. */
 #define ELF_NFPREG 33  /* includes fpscr */
+#define ELF_NVMX   34  /* includes all vector registers */
 
 typedef unsigned long elf_greg_t64;
 typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cd10022..836a4b4 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -64,6 +64,8 @@ struct pt_regs_offset {
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
 #define REG_OFFSET_END {.name = NULL, .offset = 0}
 
+#define TVSO(f)(offsetof(struct thread_vr_state, f))
+
 static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
@@ -1147,6 +1149,151 @@ static int tm_cfpr_set(struct task_struct *target,
target->thread.fp_state.fpscr = buf[32];
return 0;
 }
+
+/**
+ * tm_cvmx_active - get active number of registers in CVMX
+ * @target:The target task.
+ * @regset:The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in checkpointed VMX category.
+ */
+static int tm_cvmx_active(struct task_struct *target,
+   const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return 0;
+
+   return regset->n;
+}
+
+/**
+ * tm_cvmx_get - get CMVX registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy from.
+ * @ubuf:  User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'vr_state' and 'vr_save' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128   vr[32];
+ * vector128   vscr;
+ * vector128   vrsave;
+ *};
+ */
+static int tm_cvmx_get(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   /* Flush the state */
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+   &target->thread.vr_state, 0,
+   33 * sizeof(vector128));
+   if (!ret) {
+   /*
+* Copy out only the low-order word of vrsave.
+*/
+   union {
+   elf_vrreg_t reg;
+   u32 word;
+   } vrsave;
+   memset(&vrsave, 0, sizeof(vrsave));
+   vrsave.word = target->thread.vrsave;
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+   33 * sizeof(vector128), -1);
+   }
+
+   return ret;
+}
+
+/**
+ * tm_cvmx_set - set CMVX registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy into.
+ * @ubuf:  User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'vr_state' and 'vr_save' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ 

[PATCH v13 08/30] powerpc/ptrace: Enable support for NT_PPC_CFPR

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for TM checkpointed FPR register
set ELF core note NT_PPC_CFPR based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding a register set REGSET_CFPR in powerpc
corresponding to the ELF core note section added. It
implements the get, set and active functions for this new
register set added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 126 +++
 1 file changed, 126 insertions(+)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index d42c79a..cd10022 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1032,6 +1032,121 @@ static int tm_cgpr_set(struct task_struct *target,
 
return ret;
 }
+
+/**
+ * tm_cfpr_active - get active number of registers in CFPR
+ * @target:The target task.
+ * @regset:The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed FPR category.
+ */
+static int tm_cfpr_active(struct task_struct *target,
+   const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return 0;
+
+   return regset->n;
+}
+
+/**
+ * tm_cfpr_get - get CFPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy from.
+ * @ubuf:  User buffer to copy into.
+ *
+ * This function gets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed FPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+static int tm_cfpr_get(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   void *kbuf, void __user *ubuf)
+{
+   u64 buf[33];
+   int i;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+
+   /* copy to local buffer then write that out */
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.TS_FPR(i);
+   buf[32] = target->thread.fp_state.fpscr;
+   return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+}
+
+/**
+ * tm_cfpr_set - set CFPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy into.
+ * @ubuf:  User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'fp_state' holds the checkpointed
+ * FPR register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+static int tm_cfpr_set(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   const void *kbuf, const void __user *ubuf)
+{
+   u64 buf[33];
+   int i;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+
+   /* copy to local buffer then write that out */
+   i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+   if (i)
+   return i;
+   for (i = 0; i < 32 ; i++)
+   target->thread.TS_FPR(i) = buf[i];
+   target->thread.fp_state.fpscr = buf[32];
+   return 0;
+}
 #endif
 
 /*
@@ -1051,6 +1166,7 @@ enum powerpc_regset {
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
REGSET_TM_CGPR, /* TM checkpointed GPR registers */
+   REGSET_TM_CFPR, /* TM checkpointed FPR registers */
 #endif
 };
 
@@ -1092,6 +1208,11 @@ static const struct user_regset native_regsets[] = {
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active, .get = tm_cgpr_get, .s

[PATCH v13 07/30] powerpc/ptrace: Enable support for NT_PPC_CGPR

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables support for TM checkpointed GPR register
set ELF core note NT_PPC_CGPR based ptrace requests through
PTRACE_GETREGSET, PTRACE_SETREGSET calls. This is achieved
through adding a register set REGSET_CGPR in powerpc
corresponding to the ELF core note section added. It
implements the get, set and active functions for this new
register set added.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 222 +++
 1 file changed, 222 insertions(+)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 2cdb4e7..d42c79a 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -181,6 +181,26 @@ static int set_user_msr(struct task_struct *task, unsigned 
long msr)
return 0;
 }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static unsigned long get_user_ckpt_msr(struct task_struct *task)
+{
+   return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
+}
+
+static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
+{
+   task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
+   task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
+   return 0;
+}
+
+static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
+{
+   task->thread.ckpt_regs.trap = trap & 0xfff0;
+   return 0;
+}
+#endif
+
 #ifdef CONFIG_PPC64
 static int get_user_dscr(struct task_struct *task, unsigned long *data)
 {
@@ -847,6 +867,172 @@ static int evr_set(struct task_struct *target, const 
struct user_regset *regset,
 }
 #endif /* CONFIG_SPE */
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+/**
+ * tm_cgpr_active - get active number of registers in CGPR
+ * @target:The target task.
+ * @regset:The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed GPR category.
+ */
+static int tm_cgpr_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return 0;
+
+   return regset->n;
+}
+
+/**
+ * tm_cgpr_get - get CGPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy from.
+ * @ubuf:  User buffer to copy into.
+ *
+ * This function gets transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds all the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function gets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+static int tm_cgpr_get(struct task_struct *target,
+   const struct user_regset *regset,
+   unsigned int pos, unsigned int count,
+   void *kbuf, void __user *ubuf)
+{
+   int ret;
+
+   if (!cpu_has_feature(CPU_FTR_TM))
+   return -ENODEV;
+
+   if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+   return -ENODATA;
+
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, offsetof(struct pt_regs, msr));
+   if (!ret) {
+   unsigned long msr = get_user_ckpt_msr(target);
+
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
+ offsetof(struct pt_regs, msr),
+ offsetof(struct pt_regs, msr) +
+ sizeof(msr));
+   }
+
+   BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+offsetof(struct pt_regs, msr) + sizeof(long));
+
+   if (!ret)
+   ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ offsetof(struct pt_regs, orig_gpr3),
+ sizeof(struct pt_regs));
+   if (!ret)
+   ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+  sizeof(struct pt_regs), -1);
+
+   return ret;
+}
+
+/*
+ * tm_cgpr_set - set the CGPR registers
+ * @target:The target task.
+ * @regset:The user regset structure.
+ * @pos:   The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf:  Kernel buffer to copy into.
+ * @ubuf:  User buffer to copy from.
+ *
+ * This

[PATCH v13 06/30] powerpc/ptrace: Adapt gpr32_get, gpr32_set functions for transaction

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch splits gpr32_get, gpr32_set functions to accommodate
in transaction ptrace requests implemented in patches later in
the series.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 64 +++-
 1 file changed, 51 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index ebf3b0e..2cdb4e7 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -907,24 +907,35 @@ static const struct user_regset_view user_ppc_native_view 
= {
 #ifdef CONFIG_PPC64
 #include 
 
-static int gpr32_get(struct task_struct *target,
+static int gpr32_get_common(struct task_struct *target,
 const struct user_regset *regset,
 unsigned int pos, unsigned int count,
-void *kbuf, void __user *ubuf)
+   void *kbuf, void __user *ubuf, bool tm_active)
 {
const unsigned long *regs = &target->thread.regs->gpr[0];
+   const unsigned long *ckpt_regs;
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
 
-   if (target->thread.regs == NULL)
-   return -EIO;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   ckpt_regs = &target->thread.ckpt_regs.gpr[0];
+#endif
+   if (tm_active) {
+   regs = ckpt_regs;
+   } else {
+   if (target->thread.regs == NULL)
+   return -EIO;
 
-   if (!FULL_REGS(target->thread.regs)) {
-   /* We have a partial register set.  Fill 14-31 with bogus 
values */
-   for (i = 14; i < 32; i++)
-   target->thread.regs->gpr[i] = NV_REG_POISON; 
+   if (!FULL_REGS(target->thread.regs)) {
+   /*
+* We have a partial register set.
+* Fill 14-31 with bogus values.
+*/
+   for (i = 14; i < 32; i++)
+   target->thread.regs->gpr[i] = NV_REG_POISON;
+   }
}
 
pos /= sizeof(reg);
@@ -964,20 +975,31 @@ static int gpr32_get(struct task_struct *target,
PT_REGS_COUNT * sizeof(reg), -1);
 }
 
-static int gpr32_set(struct task_struct *target,
+static int gpr32_set_common(struct task_struct *target,
 const struct user_regset *regset,
 unsigned int pos, unsigned int count,
-const void *kbuf, const void __user *ubuf)
+const void *kbuf, const void __user *ubuf, bool tm_active)
 {
unsigned long *regs = &target->thread.regs->gpr[0];
+   unsigned long *ckpt_regs;
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
 
-   if (target->thread.regs == NULL)
-   return -EIO;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   ckpt_regs = &target->thread.ckpt_regs.gpr[0];
+#endif
 
-   CHECK_FULL_REGS(target->thread.regs);
+   if (tm_active) {
+   regs = ckpt_regs;
+   } else {
+   regs = &target->thread.regs->gpr[0];
+
+   if (target->thread.regs == NULL)
+   return -EIO;
+
+   CHECK_FULL_REGS(target->thread.regs);
+   }
 
pos /= sizeof(reg);
count /= sizeof(reg);
@@ -1037,6 +1059,22 @@ static int gpr32_set(struct task_struct *target,
 (PT_TRAP + 1) * sizeof(reg), -1);
 }
 
+static int gpr32_get(struct task_struct *target,
+const struct user_regset *regset,
+unsigned int pos, unsigned int count,
+void *kbuf, void __user *ubuf)
+{
+   return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
+static int gpr32_set(struct task_struct *target,
+const struct user_regset *regset,
+unsigned int pos, unsigned int count,
+const void *kbuf, const void __user *ubuf)
+{
+   return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
+}
+
 /*
  * These are the regset flavors matching the CONFIG_PPC32 native set.
  */
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 05/30] powerpc/ptrace: Enable in transaction NT_PPC_VSX ptrace requests

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables in transaction NT_PPC_VSX ptrace requests. The
function vsr_get which gets the running value of all VSX registers
and the function vsr_set which sets the running value of of all VSX
registers work on the running set of VMX registers whose location
will be different if transaction is active. This patch makes these
functions adapt to situations when the transaction is active.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 64 
 1 file changed, 64 insertions(+)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3baa57e..ebf3b0e 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -681,6 +681,21 @@ static int vsr_active(struct task_struct *target,
return target->thread.used_vsr ? regset->n : 0;
 }
 
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which returns the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
 static int vsr_get(struct task_struct *target, const struct user_regset 
*regset,
   unsigned int pos, unsigned int count,
   void *kbuf, void __user *ubuf)
@@ -688,16 +703,47 @@ static int vsr_get(struct task_struct *target, const 
struct user_regset *regset,
u64 buf[32];
int ret, i;
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+#endif
flush_vsx_to_thread(target);
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.
+   transact_fp.fpr[i][TS_VSRLOWOFFSET];
+   } else {
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.
+   fp_state.fpr[i][TS_VSRLOWOFFSET];
+   }
+#else
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  buf, 0, 32 * sizeof(double));
 
return ret;
 }
 
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which sets the current running values of all
+ * the FPR registers, needs to know whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
 static int vsr_set(struct task_struct *target, const struct user_regset 
*regset,
   unsigned int pos, unsigned int count,
   const void *kbuf, const void __user *ubuf)
@@ -705,12 +751,30 @@ static int vsr_set(struct task_struct *target, const 
struct user_regset *regset,
u64 buf[32];
int ret,i;
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   flush_fp_to_thread(target);
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+#endif
flush_vsx_to_thread(target);
 
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 buf, 0, 32 * sizeof(double));
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+   for (i = 0; i < 32 ; i++)
+   target->thread.transact_fp.
+   fpr[i][TS_VSRLOWOFFSET] = buf[i];
+   } else {
+   for (i = 0; i < 32 ; i++)
+   target->thread.fp_state.
+   fpr[i][TS_VSRLOWOFFSET] = buf[i];
+   }
+#else
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+#endif
 
 
return ret;
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 04/30] powerpc/ptrace: Enable in transaction NT_PPC_VMX ptrace requests

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables in transaction NT_PPC_VMX ptrace requests. The
function vr_get which gets the running value of all VMX registers
and the function vr_set which sets the running value of of all VMX
registers work on the running set of VMX registers whose location
will be different if transaction is active. This patch makes these
functions adapt to situations when the transaction is active.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 90 ++--
 1 file changed, 87 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 82db082..3baa57e 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -518,10 +518,28 @@ static int vr_active(struct task_struct *target,
return target->thread.used_vr ? regset->n : 0;
 }
 
+/*
+ * When the transaction is active, 'transact_vr' holds the current running
+ * value of all the VMX registers and 'vr_state' holds the last checkpointed
+ * value of all the VMX registers for the current transaction to fall back
+ * on in case it aborts. When transaction is not active 'vr_state' holds
+ * the current running state of all the VMX registers. So this function which
+ * gets the current running values of all the VMX registers, needs to know
+ * whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128   vr[32];
+ * vector128   vscr;
+ * vector128   vrsave;
+ * };
+ */
 static int vr_get(struct task_struct *target, const struct user_regset *regset,
  unsigned int pos, unsigned int count,
  void *kbuf, void __user *ubuf)
 {
+   struct thread_vr_state *addr;
int ret;
 
flush_altivec_to_thread(target);
@@ -529,8 +547,19 @@ static int vr_get(struct task_struct *target, const struct 
user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 offsetof(struct thread_vr_state, vr[32]));
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+   flush_fp_to_thread(target);
+   flush_tmregs_to_thread(target);
+   addr = &target->thread.transact_vr;
+   } else {
+   addr = &target->thread.vr_state;
+   }
+#else
+   addr = &target->thread.vr_state;
+#endif
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
+ addr, 0,
  33 * sizeof(vector128));
if (!ret) {
/*
@@ -541,7 +570,16 @@ static int vr_get(struct task_struct *target, const struct 
user_regset *regset,
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (MSR_TM_ACTIVE(target->thread.regs->msr))
+   vrsave.word = target->thread.transact_vrsave;
+   else
+   vrsave.word = target->thread.vrsave;
+#else
vrsave.word = target->thread.vrsave;
+#endif
+
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
  33 * sizeof(vector128), -1);
}
@@ -549,10 +587,28 @@ static int vr_get(struct task_struct *target, const 
struct user_regset *regset,
return ret;
 }
 
+/*
+ * When the transaction is active, 'transact_vr' holds the current running
+ * value of all the VMX registers and 'vr_state' holds the last checkpointed
+ * value of all the VMX registers for the current transaction to fall back
+ * on in case it aborts. When transaction is not active 'vr_state' holds
+ * the current running state of all the VMX registers. So this function which
+ * sets the current running values of all the VMX registers, needs to know
+ * whether any transaction is active or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128   vr[32];
+ * vector128   vscr;
+ * vector128   vrsave;
+ * };
+ */
 static int vr_set(struct task_struct *target, const struct user_regset *regset,
  unsigned int pos, unsigned int count,
  const void *kbuf, const void __user *ubuf)
 {
+   struct thread_vr_state *addr;
int ret;
 
flush_altivec_to_thread(target);
@@ -560,8 +616,19 @@ static int vr_set(struct task_struct *target, const struct 
user_regset *regset,
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 offsetof(struct thread_vr_state, vr[32]));
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+   if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+   flush_fp_to_thread(target);
+   flush_tmregs_to_thread(target);
+   addr = &targe

[PATCH v13 03/30] powerpc/ptrace: Enable in transaction NT_PRFPREG ptrace requests

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch enables in transaction NT_PRFPREG ptrace requests.
The function fpr_get which gets the running value of all FPR
registers and the function fpr_set which sets the running
value of of all FPR registers work on the running set of FPR
registers whose location will be different if transaction is
active. This patch makes these functions adapt to situations
when the transaction is active.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/kernel/ptrace.c | 93 ++--
 1 file changed, 89 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 060b140..82db082 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -358,6 +358,29 @@ static int gpr_set(struct task_struct *target, const 
struct user_regset *regset,
return ret;
 }
 
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which returns the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
+ * which determines the final code in this function. All the combinations of
+ * these two config options are possible except the one below as transactional
+ * memory config pulls in CONFIG_VSX automatically.
+ *
+ * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ */
 static int fpr_get(struct task_struct *target, const struct user_regset 
*regset,
   unsigned int pos, unsigned int count,
   void *kbuf, void __user *ubuf)
@@ -368,14 +391,31 @@ static int fpr_get(struct task_struct *target, const 
struct user_regset *regset,
 #endif
flush_fp_to_thread(target);
 
-#ifdef CONFIG_VSX
+#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+   /* copy to local buffer then write that out */
+   if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
+   flush_altivec_to_thread(target);
+   flush_tmregs_to_thread(target);
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.TS_TRANS_FPR(i);
+   buf[32] = target->thread.transact_fp.fpscr;
+   } else {
+   for (i = 0; i < 32 ; i++)
+   buf[i] = target->thread.TS_FPR(i);
+   buf[32] = target->thread.fp_state.fpscr;
+   }
+   return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+#endif
+
+#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+#endif
 
-#else
+#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 offsetof(struct thread_fp_state, fpr[32]));
 
@@ -384,6 +424,29 @@ static int fpr_get(struct task_struct *target, const 
struct user_regset *regset,
 #endif
 }
 
+/*
+ * When the transaction is active, 'transact_fp' holds the current running
+ * value of all FPR registers and 'fp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction. When transaction
+ * is not active 'fp_state' holds the current running state of all the FPR
+ * registers. So this function which setss the current running values of
+ * all the FPR registers, needs to know whether any transaction is active
+ * or not.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
+ * which determines the final code in this function. All the combinations of
+ * these two config options are possible except the one below as transactional
+ * memory config pulls in CONFIG_VSX automatically.
+ *
+ * !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
+ */
 static int fpr_set(struct task_struct *target, const struct user_regset 
*regset,
   unsigned int pos, unsigned int count,
   const void *kbuf, const void __user *ubuf)
@@ -394,7 +457,27 @@ static int fpr_set(struct task_struct *target, const 
struct user_regset *regset,
 #endif
flush_fp_to_thread(target);
 
-#ifdef CONFIG_VSX
+#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTI

[PATCH v13 02/30] powerpc/process: Add the function flush_tmregs_to_thread

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch creates a function flush_tmregs_to_thread which
will then be used by subsequent patches in this series. The
function checks for self tracing ptrace interface attempts
while in the TM context and logs appropriate warning message.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 arch/powerpc/include/asm/switch_to.h |  8 
 arch/powerpc/kernel/process.c| 20 
 2 files changed, 28 insertions(+)

diff --git a/arch/powerpc/include/asm/switch_to.h 
b/arch/powerpc/include/asm/switch_to.h
index 17c8380..0a74ebe 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -75,6 +75,14 @@ static inline void disable_kernel_spe(void)
 static inline void __giveup_spe(struct task_struct *t) { }
 #endif
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+extern void flush_tmregs_to_thread(struct task_struct *);
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *t)
+{
+}
+#endif
+
 static inline void clear_task_ebb(struct task_struct *t)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0b93893..e9f7f52 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1051,6 +1051,26 @@ static inline void restore_sprs(struct thread_struct 
*old_thread,
 #endif
 }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+   /*
+* Process self tracing is not yet supported through
+* ptrace interface. Ptrace generic code should have
+* prevented this from happening in the first place.
+* Warn once here with the message, if some how it
+* is attempted.
+*/
+   WARN_ONCE(tsk == current,
+   "Not expecting ptrace on self: TM regs may be incorrect\n");
+
+   /*
+* If task is not current, it should have been flushed
+* already to it's thread_struct during __switch_to().
+*/
+}
+#endif
+
 struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
 {
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 01/30] elf: Add powerpc specific core note sections

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds twelve ELF core note sections for powerpc
architecture for various registers and register sets which
need to be accessed from ptrace interface and then gdb.
These additions include special purpose registers like TAR,
PPR, DSCR, TM running and checkpointed state for various
register sets, EBB related register set, performance monitor
register set etc. Addition of these new ELF core note
sections extends the existing ELF ABI on powerpc arch without
affecting it in any manner.

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 include/uapi/linux/elf.h | 13 +
 1 file changed, 13 insertions(+)

diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index cb4a72f..1be3c5f 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -381,6 +381,19 @@ typedef struct elf64_shdr {
 #define NT_PPC_VMX 0x100   /* PowerPC Altivec/VMX registers */
 #define NT_PPC_SPE 0x101   /* PowerPC SPE/EVR registers */
 #define NT_PPC_VSX 0x102   /* PowerPC VSX registers */
+#define NT_PPC_TAR 0x103   /* Target Address Register */
+#define NT_PPC_PPR 0x104   /* Program Priority Register */
+#define NT_PPC_DSCR0x105   /* Data Stream Control Register */
+#define NT_PPC_EBB 0x106   /* Event Based Branch Registers */
+#define NT_PPC_PMU 0x107   /* Performance Monitor Registers */
+#define NT_PPC_TM_CGPR 0x108   /* TM checkpointed GPR Registers */
+#define NT_PPC_TM_CFPR 0x109   /* TM checkpointed FPR Registers */
+#define NT_PPC_TM_CVMX 0x10a   /* TM checkpointed VMX Registers */
+#define NT_PPC_TM_CVSX 0x10b   /* TM checkpointed VSX Registers */
+#define NT_PPC_TM_SPR  0x10c   /* TM Special Purpose Registers */
+#define NT_PPC_TM_CTAR 0x10d   /* TM checkpointed Target Address 
Register */
+#define NT_PPC_TM_CPPR 0x10e   /* TM checkpointed Program Priority 
Register */
+#define NT_PPC_TM_CDSCR0x10f   /* TM checkpointed Data Stream 
Control Register */
 #define NT_386_TLS 0x200   /* i386 TLS slots (struct user_desc) */
 #define NT_386_IOPERM  0x201   /* x86 io permission bitmap (1=deny) */
 #define NT_X86_XSTATE  0x202   /* x86 extended state using xsave */
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v13 00/30] Add new powerpc specific ELF core notes

2016-07-27 Thread wei . guo . simon
From: Simon Guo 

This patch series adds thirteen new ELF core note sections which can 
be used with existing ptrace request PTRACE_GETREG/SET-SETREGSET for 
accessing various transactional memory and other miscellaneous debug 
register sets on powerpc platform. 

Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 

Test Result (All tests pass on both BE and LE) 
-- 
ptrace-ebb  PASS 
ptrace-gpr  PASS 
ptrace-tm-gpr   PASS 
ptrace-tm-spd-gpr   PASS 
ptrace-tar  PASS 
ptrace-tm-tar   PASS 
ptrace-tm-spd-tar   PASS 
ptrace-vsx  PASS 
ptrace-tm-vsx   PASS 
ptrace-tm-spd-vsx   PASS 
ptrace-tm-spr   PASS 

Previous versions: 
== 
RFC: https://lkml.org/lkml/2014/4/1/292
V1:  https://lkml.org/lkml/2014/4/2/43
V2:  https://lkml.org/lkml/2014/5/5/88
V3:  https://lkml.org/lkml/2014/5/23/486
V4:  https://lkml.org/lkml/2014/11/11/6
V5:  https://lkml.org/lkml/2014/11/25/134
V6:  https://lkml.org/lkml/2014/12/2/98
V7:  https://lkml.org/lkml/2015/1/14/19
V8:  https://lkml.org/lkml/2015/5/19/700
V9:  https://lkml.org/lkml/2015/10/8/522
V10: https://lkml.org/lkml/2016/2/16/219
V11: https://lkml.org/lkml/2016/7/16/231
V12: https://lkml.org/lkml/2016/7/27/134

Changes in V13: 
--- 
- Remove Cc lines from changelog
- Add more Signed-off-by lines of Simon Guo

Changes in V12: 
--- 
- Revert change which is trying to incoporate following patch:
  [PATCH 3/5] powerpc: tm: Always use fp_state and vr_state to store live 
registers
- Release share memory resource in all self test cases
- Optimize tfhar usage in ptrace-tm-spr.c

Changes in V11: 
--- 
- Rework based on following patch:
  [PATCH 3/5] powerpc: tm: Always use fp_state and vr_state to store live 
registers
- Split EBB/PMU register ptrace implementation.
- Clean some coding style warning
- Added more shared memory based sync between parent and child during TM tests
- Re worded some of the commit messages and cleaned them up
- selftests/powerpc/ebb/reg.h has already moved as selftests/powerpc/reg.h
  Dropped the previous patch doing the same thing
- Combined the definitions of SPRN_DSCR from dscr/ test cases
- Fixed dscr/ test cases for new SPRN_DSCR_PRIV definition available

Changes in V10: 
--- 
- Rebased against the latest mainline 
- Fixed couple of build failures in the test cases related to aux vector 

Changes in V9: 
-- 
- Fixed static build check failure after tm_orig_msr got dropped 
- Fixed asm volatile construct for used registers set 
- Fixed EBB, VSX, VMX tests for LE 
- Fixed TAR test which was failing because of system calls 
- Added checks for PPC_FEATURE2_HTM aux feature in the tests 
- Fixed copyright statements 

Changes in V8: 
-- 
- Split the misc register set into individual ELF core notes 
- Implemented support for VSX register set (on and off TM) 
- Implemented support for EBB register set 
- Implemented review comments on previous versions 
- Some code re-arrangements, re-writes and documentation 
- Added comprehensive list of test cases into selftests 

Changes in V7: 
-- 
- Fixed a config directive in the MISC code 
- Merged the two gitignore patches into a single one 

Changes in V6: 
-- 
- Added two git ignore patches for powerpc selftests 
- Re-formatted all in-code function definitions in kernel-doc format 

Changes in V5: 
-- 
- Changed flush_tmregs_to_thread, so not to take into account self tracing 
- Dropped the 3rd patch in the series which had merged two functions 
- Fixed one build problem for the misc debug register patch 
- Accommodated almost all the review comments from Suka on the 6th patch 
- Minor changes to the self test program 
- Changed commit messages for some of the patches 

Changes in V4: 
-- 
- Added one test program into the powerpc selftest bucket in this regard 
- Split the 2nd patch in the previous series into four different patches 
- Accommodated most of the review comments on the previous patch series 
- Added a patch to merge functions __switch_to_tm and tm_reclaim_task 

Changes in V3: 
-- 
- Added two new error paths in every TM related get/set functions when regset 
support is not present on the system (ENODEV) or when the process does not 
have any transaction active (ENODATA) in the context 
- Installed the active hooks for all the newly added regset core note
types 

Changes in V2: 
-- 
- Removed all the power specific ptrace requests corresponding to new
NT_PPC_* 
elf core note types. Now all the register sets can be accessed from
ptrace 
through PTRACE_GETREGSET/PTRACE_SETREGSET using the individual
NT_PPC* core 
note type instead 
- Fixed couple of attribute values for REGSET_TM_CGPR register set 
- Renamed flush_tmreg_to_thread as flush_tmregs_to_thread 
- Fixed 32 bit checkpointed GPR support 
- Changed commit messages accordingly 
-

Anshuman Khandual

Re: [PATCH v2] powernv/pci: Add PHB register dump debugfs handle

2016-07-27 Thread Michael Ellerman
Gavin Shan  writes:

> On Wed, Jul 27, 2016 at 04:14:04PM +1000, Russell Currey wrote:
>>diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
>>b/arch/powerpc/platforms/powernv/pci-ioda.c
>>index 891fc4a..2b9f114 100644
>>--- a/arch/powerpc/platforms/powernv/pci-ioda.c
>>+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
>>@@ -3018,6 +3018,42 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe 
>>*pe)
>>  }
>> }
>>
>>+#ifdef CONFIG_DEBUG_FS
>>+static int pnv_pci_diag_data_set(void *data, u64 val)
>>+{
>>+ struct pci_controller *hose;
>>+ struct pnv_phb *phb;
>>+ int ret;
>
> s/int/int64_t

Actually please stick to the kernel types, so s64.

>>+ /* Retrieve the diag data from firmware */
>
> Unnecessary comments as the code is obvious.

Only if you know the OPAL API, for the rest of us that comment is fine IMHO.

>>+ ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
>>+   PNV_PCI_DIAG_BUF_SIZE);
>>+ if (ret != OPAL_SUCCESS)
>>+ return -EIO;
>>+
>>+ /* Print the diag data to the kernel log */

And that comment is very helpful, because the expectation is that the output
would go to the debugfs file.

>>@@ -3033,9 +3069,14 @@ static void pnv_pci_ioda_create_dbgfs(void)
>>
>>  sprintf(name, "PCI%04x", hose->global_number);
>>  phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
>>- if (!phb->dbgfs)
>>+ if (!phb->dbgfs) {
>>  pr_warning("%s: Error on creating debugfs on PHB#%x\n",
>>  __func__, hose->global_number);
>>+ continue;
>>+ }
>>+
>>+ debugfs_create_file("dump_regs", 0200, phb->dbgfs, hose,
>>+ &pnv_pci_diag_data_fops);
>
> I still think "diag-data" is more indicative. It's also consistent with
> the handler's name (pnv_pci_diag_data_set())?

"dump_regs" is better because it's clearly a verb phrase. Which makes it clear
that manipulating the file causes something to happen.

"diag-data" could be a verb phrase, if you read "diag" as "diagnose", in which
case it would mean "diagnose the data". But that's not what the file does, it
doesn't diagnose anything, it just dumps some registers.

Most likely people will read "diag" as "diagnostic", in which case the name
means "diagnostic data" - which is a noun phrase. That indicates the file *is*
something, ie. an object, so when I read it I would expect to see the object,
and when I write to the file I would expect that to mutate the object.

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: linux-next: build failure after merge of the kbuild tree

2016-07-27 Thread Michael Ellerman
Stephen Rothwell  writes:

> Hi Michal,
>
> After merging the kbuild tree, today's linux-next build (powerpc
> ppc64_defconfig) failed like this:
>
...
> arch/powerpc/mm/hash_utils_64.c:929:10: note: in expansion of macro 
> 'IS_ENABLED'
>   else if IS_ENABLED(CONFIG_PPC_NATIVE)
>   ^
> cc1: all warnings being treated as errors
>
> Caused by commit
>
>   7353644fa9df ("powerpc/mm: Fix build break when PPC_NATIVE=n")
>
> from the powerpc tree interacting with commit
>
>   5e8754fd80b0 ("kconfig.h: allow to use IS_{ENABLE,REACHABLE} in macro 
> expansion")
>
> from the kbuild tree.

**expletive deleted**

I was wary of IS_ENABLED(), I should have trusted my instincts.

> I have applied the following fix patch (that should be applied to the
> powerpc tree):

Applied, thanks.

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v2] Make system_reset_pSeries relocatable

2016-07-27 Thread Balbir Singh
Currently the power management bits are broken w.r.t. relocation.
There are direct branches from system_reset_pSeries to
power7_wakeup_*. The correct way to do it is to do what
the slb miss handler does, which is jump to a small stub within
the first 64k of relocated address and then jump to the actual
location

The code has been lightly tested (not the kvm bits), I would highly
appreciate a review of the code. I suspect there might be easy
to find bugs :)

Cc: b...@kernel.crashing.org
Cc: m...@ellerman.id.au
Cc: pau...@samba.org
Cc: npig...@gmail.com

Signed-off-by: Balbir Singh 
---
v2:
Fix broken compile for other ppc architectures,
added #ifdef CONFIG_PPC_P7_NAP

 arch/powerpc/kernel/exceptions-64s.S | 84 +++-
 1 file changed, 53 insertions(+), 31 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
index 8bcc1b4..d5e0e96 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -118,39 +118,21 @@ BEGIN_FTR_SECTION
cmpwi   cr4,r5,1
mtspr   SPRN_HSPRG0,r13
 
-   lbz r0,PACA_THREAD_IDLE_STATE(r13)
-   cmpwi   cr2,r0,PNV_THREAD_NAP
-   bgt cr2,8f  /* Either sleep or Winkle */
-
-   /* Waking up from nap should not cause hypervisor state loss */
-   bgt cr3,.
-
-   /* Waking up from nap */
-   li  r0,PNV_THREAD_RUNNING
-   stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-   li  r0,KVM_HWTHREAD_IN_KERNEL
-   stb r0,HSTATE_HWTHREAD_STATE(r13)
-   /* Order setting hwthread_state vs. testing hwthread_req */
-   sync
-   lbz r0,HSTATE_HWTHREAD_REQ(r13)
-   cmpwi   r0,0
-   beq 1f
-   b   kvm_start_guest
-1:
+#ifndef CONFIG_RELOCATABLE
+   b   power7_wakeup_common
+#else
+   /*
+* We can't just use a direct branch to power7_wakeup_common
+* because the distance from here to there depends on where
+* the kernel ends up being put.
+*/
+   mfctr   r11
+   ld  r10, PACAKBASE(r13)
+   LOAD_HANDLER(r10, power7_wakeup_common)
+   mtctr   r10
+   bctr
 #endif
 
-   /* Return SRR1 from power7_nap() */
-   mfspr   r3,SPRN_SRR1
-   beq cr3,2f
-   b   power7_wakeup_noloss
-2: b   power7_wakeup_loss
-
-   /* Fast Sleep wakeup on PowerNV */
-8: GET_PACA(r13)
-   b   power7_wakeup_tb_loss
-
 9:
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 #endif /* CONFIG_PPC_P7_NAP */
@@ -1448,6 +1430,46 @@ power4_fixup_nap:
blr
 #endif
 
+#ifdef CONFIG_PPC_P7_NAP
+   .align 7
+_GLOBAL(power7_wakeup_common)
+#ifdef CONFIG_RELOCATABLE
+   mtctr   r11
+#endif
+   lbz r0,PACA_THREAD_IDLE_STATE(r13)
+   cmpwi   cr2,r0,PNV_THREAD_NAP
+   bgt cr2,8f  /* Either sleep or Winkle */
+
+   /* Waking up from nap should not cause hypervisor state loss */
+   bgt cr3,.
+
+   /* Waking up from nap */
+   li  r0,PNV_THREAD_RUNNING
+   stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+   li  r0,KVM_HWTHREAD_IN_KERNEL
+   stb r0,HSTATE_HWTHREAD_STATE(r13)
+   /* Order setting hwthread_state vs. testing hwthread_req */
+   sync
+   lbz r0,HSTATE_HWTHREAD_REQ(r13)
+   cmpwi   r0,0
+   beq 1f
+   b   kvm_start_guest
+1:
+#endif
+
+   /* Return SRR1 from power7_nap() */
+   mfspr   r3,SPRN_SRR1
+   beq cr3,2f
+   b   power7_wakeup_noloss
+2: b   power7_wakeup_loss
+
+   /* Fast Sleep wakeup on PowerNV */
+8: GET_PACA(r13)
+   b   power7_wakeup_tb_loss
+#endif
+
 /*
  * Hash table stuff
  */
-- 
2.5.5

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

linux-next: build failure after merge of the kbuild tree

2016-07-27 Thread Stephen Rothwell
Hi Michal,

After merging the kbuild tree, today's linux-next build (powerpc
ppc64_defconfig) failed like this:

In file included from :0:0:
arch/powerpc/mm/hash_utils_64.c: In function 'hash__early_init_mmu':
include/linux/kconfig.h:19:65: error: expected '(' before numeric constant
 #define or(arg1_or_junk, y)  __take_second_arg(arg1_or_junk 1, y)
 ^
include/linux/kconfig.h:7:48: note: in definition of macro '__take_second_arg'
 #define __take_second_arg(__ignored, val, ...) val
^
include/linux/kconfig.h:18:23: note: in expansion of macro 'or'
 #define ___or(x, y)   or(__ARG_PLACEHOLDER_##x, y)
   ^
include/linux/kconfig.h:17:22: note: in expansion of macro '___or'
 #define __or(x, y)   ___or(x, y)   
  ^
include/linux/kconfig.h:65:28: note: in expansion of macro '__or'
 #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
^
arch/powerpc/mm/hash_utils_64.c:929:10: note: in expansion of macro 'IS_ENABLED'
  else if IS_ENABLED(CONFIG_PPC_NATIVE)
  ^
include/linux/kconfig.h:19:65: error: statement with no effect 
[-Werror=unused-value]
 #define or(arg1_or_junk, y)  __take_second_arg(arg1_or_junk 1, y)
 ^
include/linux/kconfig.h:7:48: note: in definition of macro '__take_second_arg'
 #define __take_second_arg(__ignored, val, ...) val
^
include/linux/kconfig.h:18:23: note: in expansion of macro 'or'
 #define ___or(x, y)   or(__ARG_PLACEHOLDER_##x, y)
   ^
include/linux/kconfig.h:17:22: note: in expansion of macro '___or'
 #define __or(x, y)   ___or(x, y)   
  ^
include/linux/kconfig.h:65:28: note: in expansion of macro '__or'
 #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
^
arch/powerpc/mm/hash_utils_64.c:929:10: note: in expansion of macro 'IS_ENABLED'
  else if IS_ENABLED(CONFIG_PPC_NATIVE)
  ^
cc1: all warnings being treated as errors

Caused by commit

  7353644fa9df ("powerpc/mm: Fix build break when PPC_NATIVE=n")

from the powerpc tree interacting with commit

  5e8754fd80b0 ("kconfig.h: allow to use IS_{ENABLE,REACHABLE} in macro 
expansion")

from the kbuild tree.

I have applied the following fix patch (that should be applied to the
powerpc tree):

From: Stephen Rothwell 
Date: Thu, 28 Jul 2016 12:03:25 +1000
Subject: [PATCH] powerpc/mm: parenthesise the if condition

The breakage here used to be hidden by the macro expansion.

Fixes: 7353644fa9df ("powerpc/mm: Fix build break when PPC_NATIVE=n")
Signed-off-by: Stephen Rothwell 
---
 arch/powerpc/mm/hash_utils_64.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ff11c1bb182..b78b5d211278 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -926,7 +926,7 @@ void __init hash__early_init_mmu(void)
ps3_early_mm_init();
else if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_pseries();
-   else if IS_ENABLED(CONFIG_PPC_NATIVE)
+   else if (IS_ENABLED(CONFIG_PPC_NATIVE))
hpte_init_native();
 
if (!mmu_hash_ops.hpte_insert)
-- 
2.8.1

-- 
Cheers,
Stephen Rothwell
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v2] powernv/pci: Add PHB register dump debugfs handle

2016-07-27 Thread Gavin Shan
On Wed, Jul 27, 2016 at 04:14:04PM +1000, Russell Currey wrote:
>On EEH events the kernel will print a dump of relevant registers.
>If EEH is unavailable (i.e. CONFIG_EEH is disabled, a new platform
>doesn't have EEH support, etc) this information isn't readily available.
>
>Add a new debugfs handler to trigger a PHB register dump, so that this
>information can be made available on demand.
>
>Signed-off-by: Russell Currey 

Reviewed-by: Gavin Shan 

>---
>V2 changes:
>   - use a simple attribute instead of full fops thanks to mpe
>   - miscellanous fixes thanks to Gavin
>   - rename from "regdump" to "dump_regs"
>---
> arch/powerpc/platforms/powernv/pci-ioda.c | 43 ++-
> 1 file changed, 42 insertions(+), 1 deletion(-)
>
>diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c 
>b/arch/powerpc/platforms/powernv/pci-ioda.c
>index 891fc4a..2b9f114 100644
>--- a/arch/powerpc/platforms/powernv/pci-ioda.c
>+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
>@@ -3018,6 +3018,42 @@ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe 
>*pe)
>   }
> }
>
>+#ifdef CONFIG_DEBUG_FS
>+static int pnv_pci_diag_data_set(void *data, u64 val)
>+{
>+  struct pci_controller *hose;
>+  struct pnv_phb *phb;
>+  int ret;

s/int/int64_t

>+
>+  if (val != 1)
>+  return -EINVAL;

if (val != 1ULL)

>+
>+  hose = (struct pci_controller *)data;
>+  if (!hose)
>+  return -EFAULT;
>+
>+  phb = hose->private_data;
>+  if (!phb)
>+  return -EFAULT;

hose = (struct pci_controller *)data;
if (!hose || !hose->private_data)
return -ENODEV;

phb = hose->private_data;

>+
>+  /* Retrieve the diag data from firmware */

Unnecessary comments as the code is obvious.

>+  ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
>+PNV_PCI_DIAG_BUF_SIZE);
>+  if (ret != OPAL_SUCCESS)
>+  return -EIO;
>+
>+  /* Print the diag data to the kernel log */

Same as above.

>+  pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
>+  return 0;
>+}
>+
>+DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops,
>+  NULL,
>+  pnv_pci_diag_data_set,
>+  "%llu\n");
>+

Can be squeezed to 2 lines:

DEFINE_SIMPLE_ATTRIBUTE(pnv_pci_diag_data_fops, NULL,
pnv_pci_diag_data_set, "%llu\n");

>+#endif /* CONFIG_DEBUG_FS */
>+
> static void pnv_pci_ioda_create_dbgfs(void)
> {
> #ifdef CONFIG_DEBUG_FS
>@@ -3033,9 +3069,14 @@ static void pnv_pci_ioda_create_dbgfs(void)
>
>   sprintf(name, "PCI%04x", hose->global_number);
>   phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
>-  if (!phb->dbgfs)
>+  if (!phb->dbgfs) {
>   pr_warning("%s: Error on creating debugfs on PHB#%x\n",
>   __func__, hose->global_number);
>+  continue;
>+  }
>+
>+  debugfs_create_file("dump_regs", 0200, phb->dbgfs, hose,
>+  &pnv_pci_diag_data_fops);

I still think "diag-data" is more indicative. It's also consistent with
the handler's name (pnv_pci_diag_data_set())?

>   }
> #endif /* CONFIG_DEBUG_FS */
> }

Thanks,
Gavin

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH 12/15] cxl: Workaround PE=0 hardware limitation in Mellanox CX4

2016-07-27 Thread Andrew Donnellan

On 14/07/16 07:17, Ian Munsie wrote:

mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
-   i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
+   i = idr_alloc(&ctx->afu->contexts_idr, ctx,
+ ctx->afu->adapter->native->sl_ops->min_pe,


As it turns out, dereferencing ctx->afu->adapter->native doesn't exactly 
work on PowerVM...


Working on a fix.


Andrew

--
Andrew Donnellan  OzLabs, ADL Canberra
andrew.donnel...@au1.ibm.com  IBM Australia Limited

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers

2016-07-27 Thread Benjamin Herrenschmidt
On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote:
> --- a/arch/powerpc/mm/hash_utils_64.c
> +++ b/arch/powerpc/mm/hash_utils_64.c
> @@ -530,7 +530,7 @@ static bool might_have_hea(void)
>  * we will never see an HEA ethernet device.
>  */
>  #ifdef CONFIG_IBMEBUS
> -   return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
> +   return !__cpu_has_feature(CPU_FTR_ARCH_207S) &&
> !firmware_has_feature(FW_FEATURE_SPLPAR);
>  #else

All these could go if that function was split. The part that reads the
DT stays in early_init_mmu_devtree (bastically up to "found:" and then
the bit at the end that scans the huge pages).

The rest, which just assigns the various mmu_*_psize can go into
eary_init_mmu(). That means the only conversion needed is the one
below:

> return false;
> @@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void)
>  * Not in the device-tree, let's fallback on known size
>  * list for 16M capable GP & GR
>  */
> -   if (mmu_has_feature(MMU_FTR_16M_PAGE))
> +   if (__mmu_has_feature(MMU_FTR_16M_PAGE))
> memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
>    sizeof(mmu_psize_defaults_gp));
>  found:

And the rest can remain.

> @@ -591,7 +591,7 @@ found:
> mmu_vmalloc_psize = MMU_PAGE_64K;
> if (mmu_linear_psize == MMU_PAGE_4K)
> mmu_linear_psize = MMU_PAGE_64K;
> -   if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
> +   if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
> /*
>  * When running on pSeries using 64k pages
> for ioremap
>  * would stop us accessing the HEA ethernet.
> So if we
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers

2016-07-27 Thread Benjamin Herrenschmidt
On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote:
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h
> b/arch/powerpc/include/asm/book3s/64/mmu.h
> index 70c995870297..6deda6ecc4f7 100644
> --- a/arch/powerpc/include/asm/book3s/64/mmu.h
> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h
> @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void);
>  extern void radix__early_init_mmu_secondary(void);
>  static inline void early_init_mmu_secondary(void)
>  {
> -   if (radix_enabled())
> +   if (__radix_enabled())
> return radix__early_init_mmu_secondary();
> return hash__early_init_mmu_secondary();
>  }

This one can go, no ?

Cheers,
Ben.

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [v3] UCC_GETH/UCC_FAST: Use IS_ERR_VALUE_U32 API to avoid IS_ERR_VALUE abuses.

2016-07-27 Thread arvind Yadav

I am also agree with Arnd Bergmann. We should use 'static inline function'
instead of macro to deal with error check.

On Tuesday 26 July 2016 05:09 PM, Arnd Bergmann wrote:

On Saturday, July 23, 2016 11:35:51 PM CEST Arvind Yadav wrote:

diff --git a/include/linux/err.h b/include/linux/err.h
index 1e35588..a42f942 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -19,6 +19,7 @@
  #ifndef __ASSEMBLY__
  
  #define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)

+#define IS_ERR_VALUE_U32(x) unlikely((unsigned int)(x) >= (unsigned 
int)-MAX_ERRNO)
  
  static inline void * __must_check ERR_PTR(long error)

  {

This doesn't really look like something we want to have as a generic
interface. The IS_ERR_VALUE() API is rather awkward already, and your
use seems specific to the cpu_muram_alloc() function.

How about something like

int cpm_muram_error(unsigned long addr)
{
if (addr >= (unsigned long)-MAX_ERRNO)
return addr;
else
return 0;
}

and then use that to check the value returned by the allocation
that is still an 'unsigned long', before assigning it to a 'u32'.

Arnd


___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH] pasemi: Fix coherent_dma_mask for dma engine

2016-07-27 Thread Darren Stevens
Commit 817820b0226a ("powerpc/iommu: Support "hybrid" iommu/direct DMA
ops for coherent_mask < dma_mask) adds a check of coherent_dma_mask
for dma allocations.
Unfortunately current PASemi code does not set this value for the DMA
engine, which ends up with the default value of 0x, the result 
is on a PASemi system with >2Gb ram and iommu enabled the the onboard 
ethernet stops working due to an inability to allocate memory.
Add an initialisation to pci_dma_dev_setup_pasemi()
  
Signed-off-by: Darren Stevens 
  
---
diff --git a/arch/powerpc/platforms/pasemi/iommu.c 
b/arch/powerpc/platforms/pasemi/iommu.c
index c929644..81b334a 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -187,6 +187,11 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops;
+   /*
+* Set the coherent DMA mask to prevent the iommu
+* being used unnecessarily
+*/
+   dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return;
}
 #endif
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [V2,1/2] tty/hvc: Use IRQF_SHARED for OPAL hvc consoles

2016-07-27 Thread Michael Ellerman
On Mon, 2016-11-07 at 03:38:57 UTC, Sam Mendoza-Jonas wrote:
> Commit 2def86a7200c
> ("hvc: Convert to using interrupts instead of opal events")
> enabled the use of interrupts in the hvc_driver for OPAL platforms.
> However on machines with more than one hvc console, any console after
> the first will fail to register an interrupt handler in
> notifier_add_irq() since all consoles share the same IRQ number but do
> not set the IRQF_SHARED flag:
> 
> [   51.179907] genirq: Flags mismatch irq 31.  (hvc_console) vs.
>  (hvc_console)
> [   51.180010] hvc_open: request_irq failed with rc -16.
> 
> This error propagates up to hvc_open() and the console is closed, but
> OPAL will still generate interrupts that are not handled, leading to
> rcu_sched stall warnings.
> 
> Set IRQF_SHARED when calling request_irq, allowing additional consoles
> to start properly. This is only set for consoles handled by
> hvc_opal_probe(), leaving other types unaffected.
> 
> Signed-off-by: Samuel Mendoza-Jonas 
> Acked-by: Michael Ellerman 

Applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/bbc3dfe8805de86874b1a1b142

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PowerPC] Kernel OOPS while compiling LTP test suite on linus mainline

2016-07-27 Thread Michal Hocko
[CC linux-mm]

On Wed 27-07-16 16:45:35, Abdul Haleem wrote:
> Hi,
> 
> Kernel OOPS messages were seen while compiling linux test project (LTP) 
> source on 4.7.0-rc5 mainline.
> 
> Kernel config : pseries_le_defconfig
> Machine Type  : PowerVM LPAR
> Machine hardware : LPAR uses 16 vCPUs, and 29G memory
> 
> trace messages:
> *15:34:57* [  862.548866] Unable to handle kernel paging request for data at 
> address 0x
> *15:34:57* [  862.548904] Faulting instruction address: 0xc0260900
> *15:34:57* [  862.548911] Oops: Kernel access of bad area, sig: 11 [#1]
> *15:34:57* [  862.548917] SMP NR_CPUS=2048 NUMA pSeries
> *15:34:57* [  862.548924] Modules linked in: rtc_generic(E) pseries_rng(E) 
> autofs4(E)
> *15:34:57* [  862.548938] CPU: 0 PID: 129 Comm: kswapd2 Tainted: G
> E   4.7.0-rc5-autotest #1
> *15:34:57* [  862.548946] task: c007766a2600 ti: c00776764000 
> task.ti: c00776764000
> *15:34:57* [  862.548953] NIP: c0260900 LR: c026452c CTR: 
> 
> *15:34:57* [  862.548961] REGS: c00776767830 TRAP: 0300   Tainted: G  
>   E(4.7.0-rc5-autotest)
> *15:34:57* [  862.548968] MSR: 80010280b033 
>   CR: 24000222  XER: 2001
> *15:34:57* [  862.548996] CFAR: c0008468 DAR:  DSISR: 
> 4000 SOFTE: 0
> *15:34:57* GPR00: c026452c c00776767ab0 c13ac100 
> c0077ff54200
> *15:34:57* GPR04:  c00776767ba0 0001 
> c151c100
> *15:34:57* GPR08:  000b4057 8000 
> c0071664b7a0
> *15:34:57* GPR12:  ce80 0001 
> f15dc000
> *15:34:57* GPR16: c0077ff54700   
> c0077ff54700
> *15:34:57* GPR20: 0001 0100 0200 
> c0077ff54200
> *15:34:57* GPR24: c00776767ba0 0020  
> 0001
> *15:34:57* GPR28: 0010  c00776767ba0 
> f15dc020
> *15:34:57* [  862.549094] NIP [c0260900] 
> move_active_pages_to_lru.isra.16+0xa0/0x380
> *15:34:57* [  862.549102] LR [c026452c] shrink_active_list+0x2fc/0x510

Could you map this to the kernel source line please?

> *15:34:57* [  862.549108] Call Trace:
> *15:34:57* [  862.549112] [c00776767ab0] [f15dc000] 
> 0xf15dc000 (unreliable)
> *15:34:57* [  862.549122] [c00776767b60] [c026452c] 
> shrink_active_list+0x2fc/0x510
> *15:34:57* [  862.549131] [c00776767c50] [c02665d4] 
> kswapd+0x434/0xa70
> *15:34:57* [  862.549139] [c00776767d80] [c00f1b50] 
> kthread+0x110/0x130
> *15:34:57* [  862.549148] [c00776767e30] [c00095f0] 
> ret_from_kernel_thread+0x5c/0x6c
> *15:34:57* [  862.549155] Instruction dump:
> *15:34:57* [  862.549161] 6000 3b200020 3a81 7b7c26e4 3aa00100 
> 3ac00200 3a40 3a770500
> *15:34:57* [  862.549174] 3a20 6000 6000 6042  
> 7fbd4840 419e01b8 ebfd0008
> *15:34:57* [  862.549193] ---[ end trace fcc50906d9164c56 ]---
> *15:34:57* [  862.550562]
> *15:35:18* [  883.551577] INFO: rcu_sched self-detected stall on CPU
> *15:35:18* [  883.551578] INFO: rcu_sched self-detected stall on CPU
> *15:35:18* [  883.551588] 2-...: (5249 ticks this GP) 
> idle=cc5/141/0 softirq=50260/50260 fqs=5249
> *15:35:18* [  883.551591]  (t=5250 jiffies g=48365 c=48364 q=182)
> 
> Regard's
> Abdul

-- 
Michal Hocko
SUSE Labs
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [v2] selftests/powerpc: exec() with suspended transaction

2016-07-27 Thread Michael Ellerman
On Wed, 2016-29-06 at 11:41:51 UTC, Michael Ellerman wrote:
> From: Cyril Bur 
> 
> Perform an exec() class syscall with a suspended transaction.
> 
> Signed-off-by: Cyril Bur 
> [mpe: Fix build errors, use a single binary for the test]
> Signed-off-by: Michael Ellerman 

Applied to powerpc next.

https://git.kernel.org/powerpc/c/a431b946db581d6a121d035a88

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: powerpc: Improve comment explaining why we modify VRSAVE

2016-07-27 Thread Michael Ellerman
On Thu, 2016-19-05 at 18:41:34 UTC, Unknown sender due to SPF wrote:
> The comment explaining why we modify VRSAVE is misleading, glibc
> does rely on the behaviour. Update the comment.
> 
> Signed-off-by: Anton Blanchard 
> Reviewed-by: Cyril Bur 

Applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/dd57023747e33572b31867f890

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [3/3] powerpc/mm: Drop unused externs for hpte_init_beat[_v3]()

2016-07-27 Thread Michael Ellerman
On Mon, 2016-25-07 at 02:57:51 UTC, Michael Ellerman wrote:
> We removed the BEAT support in 2015 in commit bf4981a00636 ("powerpc:
> Remove the celleb support"). These externs are unused since then.
> 
> Signed-off-by: Michael Ellerman 

Applied to powerpc next.

https://git.kernel.org/powerpc/c/1a1cee843c4a532f57083ffe8c

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [V2,2/2] tty/hvc: Use opal irqchip interface if available

2016-07-27 Thread Michael Ellerman
On Mon, 2016-11-07 at 03:38:58 UTC, Sam Mendoza-Jonas wrote:
> Update the hvc driver to use the OPAL irqchip if made available by the
> running firmware. If it is not present, the driver falls back to the
> existing OPAL event number.
> 
> Signed-off-by: Samuel Mendoza-Jonas 
> Acked-by: Michael Ellerman 

Applied to powerpc next, thanks.

https://git.kernel.org/powerpc/c/00dab8187e182da41122f66c20

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [v2,1/2] powerpc/mm: Fix build break when PPC_NATIVE=n

2016-07-27 Thread Michael Ellerman
On Tue, 2016-26-07 at 03:38:37 UTC, Michael Ellerman wrote:
> The recent commit to rework the hash MMU setup broke the build when
> CONFIG_PPC_NATIVE=n. Fix it by adding an IS_ENABLED() check before
> calling hpte_init_native().
> 
> Removing the else clause opens the possibility that we don't set any
> ops, which would probably lead to a strange crash later. So add a check
> that we correctly initialised at least one member of the struct.
> 
> Fixes: 166dd7d3fbf2 ("powerpc/64: Move MMU backend selection out of platform 
> code")
> Reported-by: Stephen Rothwell 
> Signed-off-by: Michael Ellerman 
> Acked-by: Stephen Rothwell 

Series applied to powerpc next.

https://git.kernel.org/powerpc/c/7353644fa9df875aee778a802e

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH V2 8/8] powerpc: Enable support for new DRC devtree properties

2016-07-27 Thread Michael Bringmann
prom_init.c: Enable support for new DRC device tree properties
"ibm,drc-info" and "ibm,dynamic-memory-v2" in initial handshake
between the Linux kernel and the front end processor.

Signed-off-by: Michael Bringmann 
---
diff -Naur linux-rhel/arch/powerpc/kernel/prom_init.c 
linux-rhel-patch/arch/powerpc/kernel/prom_init.c
--- linux-rhel/arch/powerpc/kernel/prom_init.c  2016-03-03 07:36:25.0 
-0600
+++ linux-rhel-patch/arch/powerpc/kernel/prom_init.c2016-06-20 
15:59:58.016373676 -0500
@@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
OV4_MIN_ENT_CAP,/* minimum VP entitled capacity */
 
/* option vector 5: PAPR/OF options */
-   VECTOR_LENGTH(18),  /* length */
+   VECTOR_LENGTH(22),  /* length */
0,  /* don't ignore, don't halt */
OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -728,6 +728,10 @@ unsigned char ibm_architecture_vec[] = {
OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
OV5_FEAT(OV5_PFO_HW_842),
OV5_FEAT(OV5_SUB_PROCESSORS),
+   0,
+   0,
+   0,
+   OV5_FEAT(OV5_DYN_MEM_V2) | OV5_FEAT(OV5_DRC_INFO),
 
/* option vector 6: IBM PAPR hints */
VECTOR_LENGTH(3),   /* length */

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Resend: [PATCH 7/8] powerpc: Check arch.vec earlier during boot for memory features

2016-07-27 Thread Michael Bringmann
architecture.vec5 features: The boot-time memory management needs to
know the form of the "ibm,dynamic-memory-v2" property early during
scanning of the flattened device tree.  This patch moves execution of
the function pseries_probe_fw_features() early enough to be before
the scanning of the memory properties in the device tree to allow
recognition of the supported properties.

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9d86c66..e4c5076 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -215,6 +215,8 @@ extern int early_init_dt_scan_opal(unsigned long node, 
const char *uname,
   int depth, void *data);
 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
 const char *uname, int depth, void *data);
+extern int pseries_probe_fw_features(unsigned long node,
+const char *uname, int depth, void *data);
 
 extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
 extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 946e34f..2034edc 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -777,6 +777,7 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
 
/* Scan memory nodes and rebuild MEMBLOCKs */
+   of_scan_flat_dt(pseries_probe_fw_features, NULL);
of_scan_flat_dt(early_init_dt_scan_root, NULL);
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
 
diff --git a/arch/powerpc/platforms/pseries/setup.c 
b/arch/powerpc/platforms/pseries/setup.c
index 9883bc7..f554205 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -736,7 +736,7 @@ static void pseries_power_off(void)
  * Called very early, MMU is off, device-tree isn't unflattened
  */
 
-static int __init pseries_probe_fw_features(unsigned long node,
+int __init pseries_probe_fw_features(unsigned long node,
const char *uname, int depth,
void *data)
 {
@@ -770,6 +770,7 @@ static int __init pseries_probe_fw_features(unsigned long 
node,
 
return hypertas_found && vec5_found;
 }
+EXPORT_SYMBOL(pseries_probe_fw_features);
 
 static int __init pSeries_probe(void)
 {

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH V2 6/8] hotplug/drc-info: Add code to search new devtree properties

2016-07-27 Thread Michael Bringmann
rpadlpar_core.c: Provide parallel routines to search the older device-
tree properties ("ibm,drc-indexes", "ibm,drc-names", "ibm,drc-types"
and "ibm,drc-power-domains"), or the new property "ibm,drc-info".  The
code searches for PHP PCI Slots, gets the DRC properties within the
current node (using my-drc-index as correlation), and performs searches
by name or type of DRC node.

Signed-off-by: Michael Bringmann 
---
diff --git a/drivers/pci/hotplug/rpadlpar_core.c 
b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39..bea9723 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -27,6 +27,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "../pci.h"
 #include "rpaphp.h"
@@ -44,15 +45,14 @@ static struct device_node *find_vio_slot_node(char 
*drc_name)
 {
struct device_node *parent = of_find_node_by_name(NULL, "vdevice");
struct device_node *dn = NULL;
-   char *name;
int rc;
 
if (!parent)
return NULL;
 
while ((dn = of_get_next_child(parent, dn))) {
-   rc = rpaphp_get_drc_props(dn, NULL, &name, NULL, NULL);
-   if ((rc == 0) && (!strcmp(drc_name, name)))
+   rc = rpaphp_check_drc_props(dn, drc_name, NULL);
+   if (rc == 0)
break;
}
 
@@ -64,15 +64,12 @@ static struct device_node *find_php_slot_pci_node(char 
*drc_name,
  char *drc_type)
 {
struct device_node *np = NULL;
-   char *name;
-   char *type;
int rc;
 
while ((np = of_find_node_by_name(np, "pci"))) {
-   rc = rpaphp_get_drc_props(np, NULL, &name, &type, NULL);
+   rc = rpaphp_check_drc_props(np, drc_name, drc_type);
if (rc == 0)
-   if (!strcmp(drc_name, name) && !strcmp(drc_type, type))
-   break;
+   break;
}
 
return np;
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 7db024e..8db5f2e 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -91,8 +91,8 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state);
 
 /* rpaphp_core.c */
 int rpaphp_add_slot(struct device_node *dn);
-int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
-   char **drc_name, char **drc_type, int *drc_power_domain);
+int rpaphp_check_drc_props(struct device_node *dn, char *drc_name,
+   char *drc_type);
 
 /* rpaphp_slot.c */
 void dealloc_slot_struct(struct slot *slot);
diff --git a/drivers/pci/hotplug/rpaphp_core.c 
b/drivers/pci/hotplug/rpaphp_core.c
index 8d13202..0cfdbd9 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 #include/* for eeh_add_device() */
 #include   /* rtas_call */
 #include /* for pci_controller */
@@ -142,15 +143,6 @@ static enum pci_bus_speed get_max_bus_speed(struct slot 
*slot)
case 5:
case 6:
speed = PCI_SPEED_33MHz;/* speed for case 1-6 */
-   break;
-   case 7:
-   case 8:
-   speed = PCI_SPEED_66MHz;
-   break;
-   case 11:
-   case 14:
-   speed = PCI_SPEED_66MHz_PCIX;
-   break;
case 12:
case 15:
speed = PCI_SPEED_100MHz_PCIX;
@@ -196,25 +188,21 @@ static int get_children_props(struct device_node *dn, 
const int **drc_indexes,
return 0;
 }
 
-/* To get the DRC props describing the current node, first obtain it's
- * my-drc-index property.  Next obtain the DRC list from it's parent.  Use
- * the my-drc-index for correlation, and obtain the requested properties.
+
+/* Verify the existence of 'drc_name' and/or 'drc_type' within the
+ * current node.  First obtain it's my-drc-index property.  Next,
+ * obtain the DRC info from it's parent.  Use the my-drc-index for
+ * correlation, and obtain/validate the requested properties.
  */
-int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
-   char **drc_name, char **drc_type, int *drc_power_domain)
+
+static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name,
+   char *drc_type, unsigned int my_index)
 {
+   char *name_tmp, *type_tmp;
const int *indexes, *names;
const int *types, *domains;
-   const unsigned int *my_index;
-   char *name_tmp, *type_tmp;
int i, rc;
 
-   my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
-   if (!my_index) {
-   /* Node isn't DLPAR/hotplug capable */
-   return -EINVAL;
-   }
-
rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
if (rc < 0) {
return -EINVAL;
@@ -225,24 +213,83 @@ int rpaphp_get_drc_props(struct dev

[PATCH V2 5/8] pseries/drc-info: Search new DRC properties for CPU indexes

2016-07-27 Thread Michael Bringmann
pseries/drc-info: Provide parallel routines to convert between
drc_index and CPU numbers at runtime, using the older device-tree
properties ("ibm,drc-indexes", "ibm,drc-names", "ibm,drc-types"
and "ibm,drc-power-domains"), or the new property "ibm,drc-info".

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c 
b/arch/powerpc/platforms/pseries/pseries_energy.c
index 9276779..10c4200 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -35,10 +35,68 @@ static int sysfs_entries;
 
 /* Helper Routines to convert between drc_index to cpu numbers */
 
+void read_one_drc_info(int **info, char **dtype, char **dname,
+   unsigned long int *fdi_p, unsigned long int *nsl_p,
+   unsigned long int *si_p, unsigned long int *ldi_p)
+{
+   char *drc_type, *drc_name, *pc;
+   u32 fdi, nsl, si, ldi;
+
+   fdi = nsl = si = ldi = 0;
+
+   /* Get drc-type:encode-string */
+   pc = (char *)info;
+   drc_type = pc;
+   pc += (strlen(drc_type) + 1);
+
+   /* Get drc-name-prefix:encode-string */
+   drc_name = (char *)pc;
+   pc += (strlen(drc_name) + 1);
+
+   /* Get drc-index-start:encode-int */
+   memcpy(&fdi, pc, 4);
+   fdi = be32_to_cpu(fdi);
+   pc += 4;
+
+   /* Get/skip drc-name-suffix-start:encode-int */
+   pc += 4;
+
+   /* Get number-sequential-elements:encode-int */
+   memcpy(&nsl, pc, 4);
+   nsl = be32_to_cpu(nsl);
+   pc += 4;
+
+   /* Get sequential-increment:encode-int */
+   memcpy(&si, pc, 4);
+   si = be32_to_cpu(si);
+   pc += 4;
+
+   /* Get/skip drc-power-domain:encode-int */
+   pc += 4;
+
+   /* Should now know end of current entry */
+   ldi = fdi + ((nsl-1)*si);
+
+   (*info) = (int *)pc;
+
+   if (dtype)
+   *dtype = drc_type;
+   if (dname)
+   *dname = drc_name;
+   if (fdi_p)
+   *fdi_p = fdi;
+   if (nsl_p)
+   *nsl_p = nsl;
+   if (si_p)
+   *si_p = si;
+   if (ldi_p)
+   *ldi_p = ldi;
+}
+EXPORT_SYMBOL(read_one_drc_info);
+
 static u32 cpu_to_drc_index(int cpu)
 {
struct device_node *dn = NULL;
-   const int *indexes;
int i;
int rc = 1;
u32 ret = 0;
@@ -46,18 +104,54 @@ static u32 cpu_to_drc_index(int cpu)
dn = of_find_node_by_path("/cpus");
if (dn == NULL)
goto err;
-   indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-   if (indexes == NULL)
-   goto err_of_node_put;
+
/* Convert logical cpu number to core number */
i = cpu_core_index_of_thread(cpu);
-   /*
-* The first element indexes[0] is the number of drc_indexes
-* returned in the list.  Hence i+1 will get the drc_index
-* corresponding to core number i.
-*/
-   WARN_ON(i > indexes[0]);
-   ret = indexes[i + 1];
+
+   if (firmware_has_feature(FW_FEATURE_DRC_INFO)) {
+   int *info = (int *)4;
+   unsigned long int num_set_entries, j, iw = i, fdi = 0;
+   unsigned long int ldi = 0, nsl = 0, si = 0;
+   char *dtype;
+   char *dname;
+
+   info = (int *)of_get_property(dn, "ibm,drc-info", NULL);
+   if (info == NULL)
+   goto err_of_node_put;
+
+   num_set_entries = be32_to_cpu(*info++);
+
+   for (j = 0; j < num_set_entries; j++) {
+
+   read_one_drc_info(&info, &dtype, &dname, &fdi,
+   &nsl, &si, &ldi);
+   if (strcmp(dtype, "CPU"))
+   goto err;
+
+   if (iw < ldi)
+   break;
+
+   WARN_ON(((iw-fdi)%si) != 0);
+   }
+   WARN_ON((nsl == 0) | (si == 0));
+
+   ret = ldi + (iw*si);
+   } else {
+   const int *indexes;
+
+   indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+   if (indexes == NULL)
+   goto err_of_node_put;
+
+   /*
+* The first element indexes[0] is the number of drc_indexes
+* returned in the list.  Hence i+1 will get the drc_index
+* corresponding to core number i.
+*/
+   WARN_ON(i > indexes[0]);
+   ret = indexes[i + 1];
+   }
+
rc = 0;
 
 err_of_node_put:
@@ -78,21 +172,51 @@ static int drc_index_to_cpu(u32 drc_index)
dn = of_find_node_by_path("/cpus");
if (dn == NULL)
goto err;
-   indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-   if (indexes == NULL)
-   goto err_of_node_put;
-   /*
-* First element in the array is the number

[PATCH V2 4/8] pseries/hotplug init: Convert new DRC memory property for hotplug runtime

2016-07-27 Thread Michael Bringmann
hotplug_init: Simplify the code needed for runtime memory hotplug and
maintenance with a conversion routine that transforms the compressed
property "ibm,dynamic-memory-v2" to the form of "ibm,dynamic-memory"
within the "ibm,dynamic-reconfiguration-memory" property.  Thus only
a single set of routines should be required at runtime to parse, edit,
and manipulate the memory representation in the device tree.  Similarly,
any userspace applications that need this information will only need
to recognize the older format to be able to continue to operate.

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c 
b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2ce1385..f422dcb 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -839,6 +839,95 @@ static int pseries_update_drconf_memory(struct 
of_reconfig_data *pr)
return rc;
 }
 
+static int pseries_rewrite_dynamic_memory_v2(void)
+{
+   unsigned long memblock_size;
+   struct device_node *dn;
+   struct property *prop, *prop_v2;
+   __be32 *p;
+   struct of_drconf_cell *lmbs;
+   u32 num_lmb_desc_sets, num_lmbs;
+   int i;
+
+   dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+   if (!dn)
+   return -EINVAL;
+
+   prop_v2 = of_find_property(dn, "ibm,dynamic-memory-v2", NULL);
+   if (!prop_v2)
+   return -EINVAL;
+
+   memblock_size = pseries_memory_block_size();
+   if (!memblock_size)
+   return -EINVAL;
+
+   /* The first int of the property is the number of lmb sets
+* described by the property.
+*/
+   p = (__be32 *)prop_v2->value;
+   num_lmb_desc_sets = be32_to_cpu(*p++);
+
+   /* Count the number of LMBs for generating the alternate format
+*/
+   for (i = 0, num_lmbs = 0; i < num_lmb_desc_sets; i++) {
+   struct of_drconf_cell_v2 drmem;
+
+   read_drconf_cell_v2(&drmem, (const __be32 **)&p);
+   num_lmbs += drmem.num_seq_lmbs;
+   }
+
+   /* Create an empty copy of the new 'ibm,dynamic-memory' property
+*/
+   {
+   prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+   if (!prop)
+   return -ENOMEM;
+   prop->name = kstrdup("ibm,dynamic-memory", GFP_KERNEL);
+   prop->length = dyn_mem_v2_len(num_lmbs);
+   prop->value = kzalloc(prop->length, GFP_KERNEL);
+   }
+
+   /* Copy/expand the ibm,dynamic-memory-v2 format to produce the
+* ibm,dynamic-memory format.
+*/
+   p = (__be32 *)prop->value;
+   *p = cpu_to_be32(num_lmbs);
+   p++;
+   lmbs = (struct of_drconf_cell *)p;
+
+   p = (__be32 *)prop_v2->value;
+   p++;
+
+   for (i = 0; i < num_lmb_desc_sets; i++) {
+   struct of_drconf_cell_v2 drmem;
+   int j, k = 0;
+
+   read_drconf_cell_v2(&drmem, (const __be32 **)&p);
+
+   for (j = 0; j < drmem.num_seq_lmbs; j++) {
+   lmbs[k+j].base_addr = be64_to_cpu(drmem.base_addr);
+   lmbs[k+j].drc_index = be32_to_cpu(drmem.drc_index);
+   lmbs[k+j].reserved  = 0;
+   lmbs[k+j].aa_index  = be32_to_cpu(drmem.aa_index);
+   lmbs[k+i].flags = be32_to_cpu(drmem.flags);
+
+   drmem.base_addr += memblock_size;
+   drmem.drc_index++;
+   }
+
+   k += drmem.num_seq_lmbs;
+   }
+
+   of_remove_property(dn, prop_v2);
+
+   of_add_property(dn, prop);
+
+   /* And disable feature flag since the property has gone away */
+   powerpc_firmware_features &= ~FW_FEATURE_DYN_MEM_V2;
+
+   return 0;
+}
+
 static int pseries_memory_notifier(struct notifier_block *nb,
   unsigned long action, void *data)
 {
@@ -866,6 +952,8 @@ static struct notifier_block pseries_mem_nb = {
 
 static int __init pseries_memory_hotplug_init(void)
 {
+   if (firmware_has_feature(FW_FEATURE_DYN_MEM_V2))
+   pseries_rewrite_dynamic_memory_v2();
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_mem_nb);
 

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH V2 3/8] powerpc/memory: Parse new memory property to initialize structures.

2016-07-27 Thread Michael Bringmann
powerpc/memory: Add parallel routines to parse the new property
"ibm,dynamic-memory-v2" property when it is present, and then to
finish initialization of the relevant memory structures with the
operating system.  This code is shared between the boot-time
initialization functions and the runtime functions for memory
hotplug, so it needs to be able to handle both formats.

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 669a15e..18b4ee7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -405,9 +407,8 @@
 
*cellp = cp + 4;
 }
- 
- /*
- * Retrieve and validate the ibm,dynamic-memory property of the device tree.
+
+/*
  * Read the next memory block set entry from the ibm,dynamic-memory-v2 property
  * and return the information in the provided of_drconf_cell_v2 structure.
  */
@@ -425,30 +426,55 @@
 EXPORT_SYMBOL(read_drconf_cell_v2);
 
 /*
- * Retrieve and validate the ibm,dynamic-memory property of the device tree.
+ * Retrieve and validate the ibm,dynamic-memory[-v2] property of the
+ * device tree.
+ *
+ * The layout of the ibm,dynamic-memory property is a number N of memory
+ * block description list entries followed by N memory block description
+ * list entries.  Each memory block description list entry contains
+ * information as laid out in the of_drconf_cell struct above.
  *
- * The layout of the ibm,dynamic-memory property is a number N of memblock
- * list entries followed by N memblock list entries.  Each memblock list entry
- * contains information as laid out in the of_drconf_cell struct above.
+ * The layout of the ibm,dynamic-memory-v2 property is a number N of memory
+ * block set description list entries, followed by N memory block set
+ * description set entries.
  */
 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
 {
const __be32 *prop;
u32 len, entries;
 
-   prop = of_get_property(memory, "ibm,dynamic-memory", &len);
-   if (!prop || len < sizeof(unsigned int))
-   return 0;
+   if (firmware_has_feature(FW_FEATURE_DYN_MEM_V2)) {
 
-   entries = of_read_number(prop++, 1);
+   prop = of_get_property(memory, "ibm,dynamic-memory-v2", &len);
+   if (!prop || len < sizeof(unsigned int))
+   return 0;
 
-   /* Now that we know the number of entries, revalidate the size
-* of the property read in to ensure we have everything
-*/
-   if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
-   return 0;
+   entries = of_read_number(prop++, 1);
+
+   /* Now that we know the number of set entries, revalidate the
+* size of the property read in to ensure we have everything.
+*/
+   if (len < dyn_mem_v2_len(entries))
+   return 0;
+
+   *dm = prop;
+   } else {
+   prop = of_get_property(memory, "ibm,dynamic-memory", &len);
+   if (!prop || len < sizeof(unsigned int))
+   return 0;
+
+   entries = of_read_number(prop++, 1);
+
+   /* Now that we know the number of entries, revalidate the size
+* of the property read in to ensure we have everything
+*/
+   if (len < (entries * (n_mem_addr_cells + 4) + 1) *
+  sizeof(unsigned int))
+   return 0;
+
+   *dm = prop;
+   }
 
-   *dm = prop;
return entries;
 }
 
@@ -511,7 +537,7 @@
  * This is like of_node_to_nid_single() for memory represented in the
  * ibm,dynamic-reconfiguration-memory node.
  */
-static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
+static int of_drconf_to_nid_single(u32 drmem_flags, u32 drmem_aa_index,
   struct assoc_arrays *aa)
 {
int default_nid = 0;
@@ -519,16 +545,16 @@
int index;
 
if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
-   !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
-   drmem->aa_index < aa->n_arrays) {
-   index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
+   !(drmem_flags & DRCONF_MEM_AI_INVALID) &&
+   drmem_aa_index < aa->n_arrays) {
+   index = drmem_aa_index * aa->array_sz + min_common_depth - 1;
nid = of_read_number(&aa->arrays[index], 1);
 
if (nid == 0x || nid >= MAX_NUMNODES)
nid = default_nid;
 
if (nid > 0) {
-   index = drmem->aa_index * aa->array_sz;
+   index = drmem_aa_index * aa->array_sz;
initialize_distance_lookup_table(nid,
&aa->arrays[index]);
}
@@ -671,7 +697,7 @@
  * Extract NUMA information from the ibm,dyna

[PATCH V2 1/8] powerpc/firmware: Add definitions for new firmware features.

2016-07-27 Thread Michael Bringmann
Firmware Features: Define new bit flags representing the presence of
new device tree properties "ibm,drc-info", and "ibm,dynamic-memory-v2".
These flags are used to tell the front end processor when the Linux
kernel supports the new properties, and by the front end processor to
tell the Linux kernel that the new properties are present in the devie
tree.

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/include/asm/firmware.h 
b/arch/powerpc/include/asm/firmware.h
index b062924..a9d66d5 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -51,6 +51,8 @@
 #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x8000)
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0001)
 #define FW_FEATURE_PRRNASM_CONST(0x0002)
+#define FW_FEATURE_DYN_MEM_V2  ASM_CONST(0x0004)
+#define FW_FEATURE_DRC_INFOASM_CONST(0x0008)
 
 #ifndef __ASSEMBLY__
 
@@ -66,7 +68,8 @@ enum {
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
-   FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
+   FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
+   FW_FEATURE_DYN_MEM_V2 | FW_FEATURE_DRC_INFO,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7f436ba..b9a1534 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -155,6 +203,8 @@ struct of_drconf_cell {
 #define OV5_PFO_HW_842 0x0E40  /* PFO Compression Accelerator */
 #define OV5_PFO_HW_ENCR0x0E20  /* PFO Encryption Accelerator */
 #define OV5_SUB_PROCESSORS 0x0F01  /* 1,2,or 4 Sub-Processors supported */
+#define OV5_DYN_MEM_V2 0x1680  /* Redef Prop Structures: dyn-mem-v2 */
+#define OV5_DRC_INFO   0x1640  /* Redef Prop Structures: drc-info   */
 
 /* Option Vector 6: IBM PAPR hints */
 #define OV6_LINUX  0x02/* Linux is our OS */
diff --git a/arch/powerpc/platforms/pseries/firmware.c 
b/arch/powerpc/platforms/pseries/firmware.c
index 8c80588..00243ee 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -111,6 +111,8 @@ static __initdata struct vec5_fw_feature
 vec5_fw_features_table[] = {
{FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
{FW_FEATURE_PRRN,   OV5_PRRN},
+   {FW_FEATURE_DYN_MEM_V2, OV5_DYN_MEM_V2},
+   {FW_FEATURE_DRC_INFO,   OV5_DRC_INFO},
 };
 
 void __init fw_vec5_feature_init(const char *vec5, unsigned long len)

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH V2 2/8] powerpc/memory: Parse new memory property to register blocks.

2016-07-27 Thread Michael Bringmann
powerpc/memory: Add parallel routines to parse the new property
"ibm,dynamic-memory-v2" property when it is present, and then to
register the relevant memory blocks with the operating system.
This property format is intended to provide a more compact
representation of memory when communicating with the front end
processor, especially when describing vast amounts of RAM.

Signed-off-by: Michael Bringmann 
---
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7f436ba..b9a1534 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -69,6 +69,8 @@ struct boot_param_header {
  * OF address retreival & translation
  */
 
+extern int n_mem_addr_cells;
+
 /* Parse the ibm,dma-window property of an OF node into the busno, phys and
  * size parameters.
  */
@@ -81,8 +83,9 @@ extern void of_instantiate_rtc(void);
 extern int of_get_ibm_chip_id(struct device_node *np);
 
 /* The of_drconf_cell struct defines the layout of the LMB array
- * specified in the device tree property
- * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
+ * specified in the device tree properties,
+ * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
+ * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory-v2
  */
 struct of_drconf_cell {
u64 base_addr;
@@ -92,9 +95,32 @@ struct of_drconf_cell {
u32 flags;
 };
 
-#define DRCONF_MEM_ASSIGNED0x0008
-#define DRCONF_MEM_AI_INVALID  0x0040
-#define DRCONF_MEM_RESERVED0x0080
+#define DRCONF_MEM_ASSIGNED0x0008
+#define DRCONF_MEM_AI_INVALID  0x0040
+#define DRCONF_MEM_RESERVED0x0080
+
+struct of_drconf_cell_v2 {
+   u32 num_seq_lmbs;
+   u64 base_addr;
+   u32 drc_index;
+   u32 aa_index;
+   u32 flags;
+} __attribute__((packed));
+
+
+static inline int dyn_mem_v2_len(int entries)
+{
+   int drconf_v2_cells = (n_mem_addr_cells + 4);
+   int drconf_v2_cells_len = (drconf_v2_cells * sizeof(unsigned int));
+   return (((entries) * drconf_v2_cells_len) +
+(1 * sizeof(unsigned int)));
+}
+
+extern void read_drconf_cell_v2(struct of_drconf_cell_v2 *drmem,
+   const __be32 **cellp);
+extern void read_one_drc_info(int **info, char **drc_type, char **drc_name,
+   unsigned long int *fdi_p, unsigned long int *nsl_p,
+   unsigned long int *si_p, unsigned long int *ldi_p);
 
 /*
  * There are two methods for telling firmware what our capabilities are.
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 669a15e..ad294ce 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -57,8 +57,10 @@
 EXPORT_SYMBOL(node_data);
 
 static int min_common_depth;
-static int n_mem_addr_cells, n_mem_size_cells;
+int n_mem_addr_cells;
+static int n_mem_size_cells;
 static int form1_affinity;
+EXPORT_SYMBOL(n_mem_addr_cells);
 
 #define MAX_DISTANCE_REF_POINTS 4
 static int distance_ref_points_depth;
@@ -405,6 +405,24 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, 
const __be32 **cellp)
 
*cellp = cp + 4;
 }
+ 
+ /*
+ * Retrieve and validate the ibm,dynamic-memory property of the device tree.
+ * Read the next memory block set entry from the ibm,dynamic-memory-v2 property
+ * and return the information in the provided of_drconf_cell_v2 structure.
+ */
+void read_drconf_cell_v2(struct of_drconf_cell_v2 *drmem, const __be32 **cellp)
+{
+   const __be32 *cp = (const __be32 *)*cellp;
+   drmem->num_seq_lmbs = be32_to_cpu(*cp++);
+   drmem->base_addr = read_n_cells(n_mem_addr_cells, &cp);
+   drmem->drc_index = be32_to_cpu(*cp++);
+   drmem->aa_index = be32_to_cpu(*cp++);
+   drmem->flags = be32_to_cpu(*cp++);
+
+   *cellp = cp;
+}
+EXPORT_SYMBOL(read_drconf_cell_v2);
 
 /*
  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 946e34f..a55bc1e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -56,6 +56,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
@@ -441,12 +442,12 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned 
long node,
 
 #ifdef CONFIG_PPC_PSERIES
 /*
- * Interpret the ibm,dynamic-memory property in the
- * /ibm,dynamic-reconfiguration-memory node.
+ * Interpret the ibm,dynamic-memory property/ibm,dynamic-memory-v2
+ * in the /ibm,dynamic-reconfiguration-memory node.
  * This contains a list of memory blocks along with NUMA affinity
  * information.
  */
-static int __init early_init_dt_scan_drconf_memory(unsigned long node)
+static int __init early_init_dt_scan_drconf_memory_v1(unsigned long node)
 {
const __be32 *dm, *ls, *usm;
int l;
@@ -516,6 +517,105 @@ static int __init 
early_init_dt_scan_drconf_memory(unsigned long node)
memblock_dump_all();
return 0;
 }
+
+st

Resend: [Patch 0/8] powerpc/devtree: Add support for 2 new DRC properties

2016-07-27 Thread Michael Bringmann
Several properties in the DRC device tree format are replaced by
more compact representations to allow, for example, for the encoding
of vast amounts of memory, and or reduced duplication of information
in related data structures.

"ibm,drc-info": This property, when present, replaces the following
four properties: "ibm,drc-indexes", "ibm,drc-names", "ibm,drc-types"
and "ibm,drc-power-domains".  This property is defined for all
dynamically reconfigurable platform nodes.  The "ibm,drc-info" elements
are intended to provide a more compact representation, and reduce some
search overhead.

"ibm,dynamic-memory-v2": This property replaces the "ibm,dynamic-memory"
node representation within the "ibm,dynamic-reconfiguration-memory"
property provided by the BMC.  This element format is intended to provide
a more compact representation of memory, especially, for systems with
massive amounts of RAM.  To simplify portability, this property is
converted to the "ibm,dynamic-memory" property during system boot.

"ibm,architecture.vec": Bit flags are added to this data structure
by the front end processor to inform the kernel as to whether to expect
the changes to one or both of the device tree structures "ibm,drc-info"
and "ibm,dynamic-memory-v2".

Signed-off-by: Michael Bringmann 

Michael Bringmann (8):
  powerpc/firmware: Add definitions for new firmware features.
  powerpc/memory: Parse new memory property to register blocks.
  powerpc/memory: Parse new memory property to initialize structures.
  pseries/hotplug init: Convert new DRC memory property for hotplug runtime
  pseries/drc-info: Search new DRC properties for CPU indexes
  hotplug/drc-info: Add code to search new devtree properties
  powerpc: Check arch.vec earlier during boot for memory features
  powerpc: Enable support for new DRC devtree properties


___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 21/21] powerpc/jump_label: Annotate jump label assembly

2016-07-27 Thread Michael Ellerman
Add a comment to the generated assembler for jump labels. This makes it
easier to identify them in asm listings (generated with $ make foo.s).

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/jump_label.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

v3: New.

diff --git a/arch/powerpc/include/asm/jump_label.h 
b/arch/powerpc/include/asm/jump_label.h
index 47e155f15433..9878cac7b47c 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -21,7 +21,7 @@
 static __always_inline bool arch_static_branch(struct static_key *key, bool 
branch)
 {
asm_volatile_goto("1:\n\t"
-"nop\n\t"
+"nop # arch_static_branch\n\t"
 ".pushsection __jump_table,  \"aw\"\n\t"
 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 ".popsection \n\t"
@@ -35,7 +35,7 @@ l_yes:
 static __always_inline bool arch_static_branch_jump(struct static_key *key, 
bool branch)
 {
asm_volatile_goto("1:\n\t"
-"b %l[l_yes]\n\t"
+"b %l[l_yes] # arch_static_branch_jump\n\t"
 ".pushsection __jump_table,  \"aw\"\n\t"
 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 ".popsection \n\t"
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 20/21] powerpc/mm: Catch usage of cpu/mmu_has_feature() before jump label init

2016-07-27 Thread Michael Ellerman
From: "Aneesh Kumar K.V" 

This allows us to catch incorrect usage of cpu_has_feature() and
mmu_has_feature() prior to jump labels being initialised.

Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/Kconfig.debug | 10 ++
 arch/powerpc/include/asm/cpu_has_feature.h |  7 +++
 arch/powerpc/include/asm/mmu.h | 14 ++
 arch/powerpc/kernel/process.c  |  2 +-
 4 files changed, 32 insertions(+), 1 deletion(-)

v3: Use printk() and dump_stack() rather than WARN_ON(), because
WARN_ON() may not work this early in boot.
Rename the Kconfig.

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 2512dac77adb..0108fde08d90 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -69,6 +69,16 @@ config JUMP_LABEL_FEATURE_CHECKS
  feature checks. This should generate more optimal code for those
  checks.
 
+config JUMP_LABEL_FEATURE_CHECK_DEBUG
+   bool "Do extra check on feature fixup calls"
+   depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
+   default n
+   help
+ This tries to catch incorrect usage of cpu_has_feature() and
+ mmu_has_feature() in the code.
+
+ If you don't know what this means, say N.
+
 config FTR_FIXUP_SELFTEST
bool "Run self-tests of the feature-fixup code"
depends on DEBUG_KERNEL
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h 
b/arch/powerpc/include/asm/cpu_has_feature.h
index 18e60e61bea9..b702a48c438d 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -22,6 +22,13 @@ static __always_inline bool cpu_has_feature(unsigned long 
feature)
 {
int i;
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+   if (!static_key_initialized) {
+   printk("Warning! cpu_has_feature() used prior to jump label 
init!\n");
+   dump_stack();
+   return __cpu_has_feature(feature);
+   }
+#endif
if (CPU_FTRS_ALWAYS & feature)
return true;
 
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 3900cb7fe7cf..50d8c9f78976 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -153,6 +153,13 @@ static __always_inline bool mmu_has_feature(unsigned long 
feature)
 {
int i;
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+   if (!static_key_initialized) {
+   printk("Warning! mmu_has_feature() used prior to jump label 
init!\n");
+   dump_stack();
+   return __mmu_has_feature(feature);
+   }
+#endif
if (!(MMU_FTRS_POSSIBLE & feature))
return false;
 
@@ -164,6 +171,13 @@ static inline void mmu_clear_feature(unsigned long feature)
 {
int i;
 
+#ifdef CONFIG_FEATURE_FIXUP_DEBUG
+   if (!static_key_initialized) {
+   WARN_ON(1);
+   cur_cpu_spec->mmu_features &= ~feature;
+   return;
+   }
+#endif
i = __builtin_ctzl(feature);
cur_cpu_spec->mmu_features &= ~feature;
static_branch_disable(&mmu_feature_keys[i]);
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 19/21] powerpc: Add option to use jump label for mmu_has_feature()

2016-07-27 Thread Michael Ellerman
From: Kevin Hao 

As we just did for CPU features.

Signed-off-by: Kevin Hao 
Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/mmu.h| 36 
 arch/powerpc/kernel/cputable.c| 17 +
 arch/powerpc/lib/feature-fixups.c |  1 +
 3 files changed, 54 insertions(+)

v3: Rename to mmu_feature_keys, and NUM_MMU_FTR_KEYS.
Use the kconfig.

diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e3eff365e55d..3900cb7fe7cf 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -140,6 +140,41 @@ static inline bool __mmu_has_feature(unsigned long feature)
return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include 
+
+#define NUM_MMU_FTR_KEYS   32
+
+extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
+
+extern void mmu_feature_keys_init(void);
+
+static __always_inline bool mmu_has_feature(unsigned long feature)
+{
+   int i;
+
+   if (!(MMU_FTRS_POSSIBLE & feature))
+   return false;
+
+   i = __builtin_ctzl(feature);
+   return static_branch_likely(&mmu_feature_keys[i]);
+}
+
+static inline void mmu_clear_feature(unsigned long feature)
+{
+   int i;
+
+   i = __builtin_ctzl(feature);
+   cur_cpu_spec->mmu_features &= ~feature;
+   static_branch_disable(&mmu_feature_keys[i]);
+}
+#else
+
+static inline void mmu_feature_keys_init(void)
+{
+
+}
+
 static inline bool mmu_has_feature(unsigned long feature)
 {
return __mmu_has_feature(feature);
@@ -149,6 +184,7 @@ static inline void mmu_clear_feature(unsigned long feature)
 {
cur_cpu_spec->mmu_features &= ~feature;
 }
+#endif /* CONFIG_JUMP_LABEL */
 
 extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
 
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f268850f8fda..db14efc7d3e0 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2243,4 +2243,21 @@ void __init cpu_feature_keys_init(void)
static_branch_disable(&cpu_feature_keys[i]);
}
 }
+
+struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
+   [0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(mmu_feature_keys);
+
+void __init mmu_feature_keys_init(void)
+{
+   int i;
+
+   for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
+   unsigned long f = 1ul << i;
+
+   if (!(cur_cpu_spec->mmu_features & f))
+   static_branch_disable(&mmu_feature_keys[i]);
+   }
+}
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index f90423faade0..8db370cec547 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -196,6 +196,7 @@ void __init apply_feature_fixups(void)
 */
jump_label_init();
cpu_feature_keys_init();
+   mmu_feature_keys_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 18/21] powerpc: Add option to use jump label for cpu_has_feature()

2016-07-27 Thread Michael Ellerman
From: Kevin Hao 

We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.

We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.

For a C sequence along the lines of:

if (cpu_has_feature(FOO))
 return 2;

The generated code before is roughly:

ld  r9,-27640(r2)
ld  r9,0(r9)
lwz r9,32(r9)
cmpwi   cr7,r9,0
bge cr7, 1f
li  r3,2
blr
1:  ...

After (true):
nop
li  r3,2
blr

After (false):
b   1f
li  r3,2
blr
1:  ...

Signed-off-by: Kevin Hao 
Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/cpu_has_feature.h | 22 ++
 arch/powerpc/include/asm/cputable.h|  6 ++
 arch/powerpc/kernel/cputable.c | 20 
 arch/powerpc/lib/feature-fixups.c  |  1 +
 4 files changed, 49 insertions(+)

v3: Rename MAX_CPU_FEATURES as we already have a #define with that name.
Define NUM_CPU_FTR_KEYS as a constant.
Rename the array to cpu_feature_keys.
Use the kconfig we added to guard it.
Rewrite the change log.

diff --git a/arch/powerpc/include/asm/cpu_has_feature.h 
b/arch/powerpc/include/asm/cpu_has_feature.h
index ad296b2f1d84..18e60e61bea9 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -11,10 +11,32 @@ static inline bool __cpu_has_feature(unsigned long feature)
  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
+#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
+#include 
+
+#define NUM_CPU_FTR_KEYS   64
+
+extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
+
+static __always_inline bool cpu_has_feature(unsigned long feature)
+{
+   int i;
+
+   if (CPU_FTRS_ALWAYS & feature)
+   return true;
+
+   if (!(CPU_FTRS_POSSIBLE & feature))
+   return false;
+
+   i = __builtin_ctzl(feature);
+   return static_branch_likely(&cpu_feature_keys[i]);
+}
+#else
 static inline bool cpu_has_feature(unsigned long feature)
 {
return __cpu_has_feature(feature);
 }
+#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h 
b/arch/powerpc/include/asm/cputable.h
index 92961bcfbe3f..f23aa3450bca 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -123,6 +123,12 @@ extern void do_feature_fixups(unsigned long value, void 
*fixup_start,
 
 extern const char *powerpc_base_platform;
 
+#ifdef CONFIG_JUMP_LABEL
+extern void cpu_feature_keys_init(void);
+#else
+static inline void cpu_feature_keys_init(void) { }
+#endif
+
 /* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
 enum {
TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index d81f826d1029..f268850f8fda 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -2224,3 +2225,22 @@ struct cpu_spec * __init identify_cpu(unsigned long 
offset, unsigned int pvr)
 
return NULL;
 }
+
+#ifdef CONFIG_JUMP_LABEL
+struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
+   [0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
+};
+EXPORT_SYMBOL_GPL(cpu_feature_keys);
+
+void __init cpu_feature_keys_init(void)
+{
+   int i;
+
+   for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
+   unsigned long f = 1ul << i;
+
+   if (!(cur_cpu_spec->cpu_features & f))
+   static_branch_disable(&cpu_feature_keys[i]);
+   }
+}
+#endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 2a1904739843..f90423faade0 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -195,6 +195,7 @@ void __init apply_feature_fixups(void)
 * CPU/MMU features.
 */
jump_label_init();
+   cpu_feature_keys_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 17/21] powerpc: Add kconfig option to use jump labels for cpu/mmu_has_feature()

2016-07-27 Thread Michael Ellerman
Add a kconfig option to control whether we use jump label for the
cpu/mmu_has_feature() checks. Currently this does nothing, but we will
enabled it in the subsequent patches.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/Kconfig.debug | 9 +
 1 file changed, 9 insertions(+)

v3: New.

diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index cfe08eab90c6..2512dac77adb 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -60,6 +60,15 @@ config CODE_PATCHING_SELFTEST
depends on DEBUG_KERNEL
default n
 
+config JUMP_LABEL_FEATURE_CHECKS
+   bool "Enable use of jump label for cpu/mmu_has_feature()"
+   depends on JUMP_LABEL
+   default y
+   help
+ Selecting this options enables use of jump labels for some internal
+ feature checks. This should generate more optimal code for those
+ checks.
+
 config FTR_FIXUP_SELFTEST
bool "Run self-tests of the feature-fixup code"
depends on DEBUG_KERNEL
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 16/21] powerpc: Move cpu_has_feature() to a separate file

2016-07-27 Thread Michael Ellerman
From: Kevin Hao 

We plan to use jump label for cpu_has_feature(). In order to implement
this we need to include the linux/jump_label.h in asm/cputable.h.

Unfortunately if we do that it leads to an include loop. The root of the
problem seems to be that reg.h needs cputable.h (for CPU_FTRs), and then
cputable.h via jump_label.h eventually pulls in hw_irq.h which needs
reg.h (for MSR_EE).

So move cpu_has_feature() to a separate file on its own.

Signed-off-by: Kevin Hao 
Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu-hash.h |  1 +
 arch/powerpc/include/asm/cacheflush.h |  1 +
 arch/powerpc/include/asm/cpu_has_feature.h| 20 
 arch/powerpc/include/asm/cputable.h   | 11 ---
 arch/powerpc/include/asm/cputime.h|  1 +
 arch/powerpc/include/asm/dbell.h  |  1 +
 arch/powerpc/include/asm/dcr-native.h |  1 +
 arch/powerpc/include/asm/mman.h   |  1 +
 arch/powerpc/include/asm/time.h   |  1 +
 arch/powerpc/include/asm/xor.h|  1 +
 arch/powerpc/kernel/align.c   |  1 +
 arch/powerpc/kernel/irq.c |  1 +
 arch/powerpc/kernel/process.c |  1 +
 arch/powerpc/kernel/setup-common.c|  1 +
 arch/powerpc/kernel/setup_32.c|  1 +
 arch/powerpc/kernel/smp.c |  1 +
 arch/powerpc/platforms/cell/pervasive.c   |  1 +
 arch/powerpc/xmon/ppc-dis.c   |  1 +
 18 files changed, 36 insertions(+), 11 deletions(-)
 create mode 100644 arch/powerpc/include/asm/cpu_has_feature.h

v3: Change the header name, and flesh out change log.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h 
b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 5eaf86ac143d..032e9f0bc708 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * SLB
diff --git a/arch/powerpc/include/asm/cacheflush.h 
b/arch/powerpc/include/asm/cacheflush.h
index 69fb16d7a811..b77f0364df94 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -11,6 +11,7 @@
 
 #include 
 #include 
+#include 
 
 /*
  * No cache flushing is required when address mappings are changed,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h 
b/arch/powerpc/include/asm/cpu_has_feature.h
new file mode 100644
index ..ad296b2f1d84
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_POWERPC_CPUFEATURES_H
+#define __ASM_POWERPC_CPUFEATURES_H
+
+#ifndef __ASSEMBLY__
+
+#include 
+
+static inline bool __cpu_has_feature(unsigned long feature)
+{
+   return !!((CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
+}
+
+static inline bool cpu_has_feature(unsigned long feature)
+{
+   return __cpu_has_feature(feature);
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_POWERPC_CPUFEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h 
b/arch/powerpc/include/asm/cputable.h
index 85a6797f9231..92961bcfbe3f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -577,17 +577,6 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline bool __cpu_has_feature(unsigned long feature)
-{
-   return !!((CPU_FTRS_ALWAYS & feature) ||
- (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
-}
-
-static inline bool cpu_has_feature(unsigned long feature)
-{
-   return __cpu_has_feature(feature);
-}
-
 #define HBP_NUM 1
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cputime.h 
b/arch/powerpc/include/asm/cputime.h
index e2452550bcb1..465653b6b393 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -28,6 +28,7 @@ static inline void setup_cputime_one_jiffy(void) { }
 #include 
 #include 
 #include 
+#include 
 
 typedef u64 __nocast cputime_t;
 typedef u64 __nocast cputime64_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 5fa6b20eba10..378167377065 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -16,6 +16,7 @@
 #include 
 
 #include 
+#include 
 
 #define PPC_DBELL_MSG_BRDCAST  (0x0400)
 #define PPC_DBELL_TYPE(x)  (((x) & 0xf) << (63-36))
diff --git a/arch/powerpc/include/asm/dcr-native.h 
b/arch/powerpc/include/asm/dcr-native.h
index 4efc11dacb98..4a2beef74277 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -24,6 +24,7 @@
 
 #include 
 #include 
+#include 
 
 typedef struct {
unsigned int base;
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 2563c435a4b1..ef2d9ac1bc52 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/inclu

[PATCH v3 15/21] powerpc: Remove mfvtb()

2016-07-27 Thread Michael Ellerman
From: Kevin Hao 

This function is only used by get_vtb(). They are almost the same except
the reading from the real register. Move the mfspr() to get_vtb() and
kill the function mfvtb(). With this, we can eliminate the use of
cpu_has_feature() in very core header file like reg.h. This is a
preparation for the use of jump label for cpu_has_feature().

Signed-off-by: Kevin Hao 
Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/reg.h  | 9 -
 arch/powerpc/include/asm/time.h | 2 +-
 2 files changed, 1 insertion(+), 10 deletions(-)

v3: No change.

diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d7e9ab5e4709..817c005205f0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1256,15 +1256,6 @@ static inline void msr_check_and_clear(unsigned long 
bits)
__msr_check_and_clear(bits);
 }
 
-static inline unsigned long mfvtb (void)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
-   if (cpu_has_feature(CPU_FTR_ARCH_207S))
-   return mfspr(SPRN_VTB);
-#endif
-   return 0;
-}
-
 #ifdef __powerpc64__
 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
 #define mftb() ({unsigned long rval;   \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 09211640a0e0..cbbeaf0a6597 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -103,7 +103,7 @@ static inline u64 get_vtb(void)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_ARCH_207S))
-   return mfvtb();
+   return mfspr(SPRN_VTB);
 #endif
return 0;
 }
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 14/21] powerpc: Call jump_label_init() in apply_feature_fixups()

2016-07-27 Thread Michael Ellerman
From: "Aneesh Kumar K.V" 

Call jump_label_init() early so that we can use static keys for CPU and
MMU feature checks.

Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/lib/feature-fixups.c | 8 
 1 file changed, 8 insertions(+)

v3: Updated comment.

diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 854b8ba40f8e..2a1904739843 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -13,6 +13,7 @@
  */
 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -187,6 +188,13 @@ void __init apply_feature_fixups(void)
  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
do_final_fixups();
+
+   /*
+* Initialise jump label. This causes all the cpu/mmu_has_feature()
+* checks to take on their correct polarity based on the current set of
+* CPU/MMU features.
+*/
+   jump_label_init();
 }
 
 static int __init check_features(void)
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 13/21] jump_label: Make it possible for arches to invoke jump_label_init() earlier

2016-07-27 Thread Michael Ellerman
From: Kevin Hao 

Some arches (powerpc at least) would like to invoke jump_label_init()
much earlier in boot. So check static_key_initialized in order to make
sure this function runs only once.

Signed-off-by: Kevin Hao 
Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 kernel/jump_label.c | 3 +++
 1 file changed, 3 insertions(+)

v3: Updated change log.

diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 05254eeb4b4e..14d81315fd7e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -205,6 +205,9 @@ void __init jump_label_init(void)
struct static_key *key = NULL;
struct jump_entry *iter;
 
+   if (static_key_initialized)
+   return;
+
jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop);
 
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 12/21] powerpc/mm: Convert early cpu/mmu feature check to use the new helpers

2016-07-27 Thread Michael Ellerman
From: "Aneesh Kumar K.V" 

This switches early feature checks to use the non static key variant of
the function. In later patches we will be switching cpu_has_feature()
and mmu_has_feature() to use static keys and we can use them only after
static key/jump label is initialized. Any check for feature before jump
label init should be done using this new helper.

Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 4 ++--
 arch/powerpc/kernel/paca.c   | 2 +-
 arch/powerpc/kernel/setup_64.c   | 4 ++--
 arch/powerpc/mm/hash_utils_64.c  | 6 +++---
 arch/powerpc/mm/init_64.c| 2 +-
 5 files changed, 9 insertions(+), 9 deletions(-)

v3: Add/remove some sites now that we're rebased on the early MMU init series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 70c995870297..6deda6ecc4f7 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void);
 extern void radix__early_init_mmu_secondary(void);
 static inline void early_init_mmu_secondary(void)
 {
-   if (radix_enabled())
+   if (__radix_enabled())
return radix__early_init_mmu_secondary();
return hash__early_init_mmu_secondary();
 }
@@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t 
first_memblock_base,
 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
  phys_addr_t first_memblock_size)
 {
-   if (radix_enabled())
+   if (__radix_enabled())
return radix__setup_initial_memory_limit(first_memblock_base,
   first_memblock_size);
return hash__setup_initial_memory_limit(first_memblock_base,
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 93dae296b6be..1b0b89e80824 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
 * if we do a GET_PACA() before the feature fixups have been
 * applied
 */
-   if (cpu_has_feature(CPU_FTR_HVMODE))
+   if (__cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca);
 #endif
mtspr(SPRN_SPRG_PACA, local_paca);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 984696136f96..86ffab4c427b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
opal_configure_cores();
 
/* Enable AIL if supported, and we are in hypervisor mode */
-   if (cpu_has_feature(CPU_FTR_HVMODE) &&
-   cpu_has_feature(CPU_FTR_ARCH_207S)) {
+   if (__cpu_has_feature(CPU_FTR_HVMODE) &&
+   __cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5f922e93af25..3aad12fb9d2f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -530,7 +530,7 @@ static bool might_have_hea(void)
 * we will never see an HEA ethernet device.
 */
 #ifdef CONFIG_IBMEBUS
-   return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
+   return !__cpu_has_feature(CPU_FTR_ARCH_207S) &&
!firmware_has_feature(FW_FEATURE_SPLPAR);
 #else
return false;
@@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void)
 * Not in the device-tree, let's fallback on known size
 * list for 16M capable GP & GR
 */
-   if (mmu_has_feature(MMU_FTR_16M_PAGE))
+   if (__mmu_has_feature(MMU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
   sizeof(mmu_psize_defaults_gp));
 found:
@@ -591,7 +591,7 @@ found:
mmu_vmalloc_psize = MMU_PAGE_64K;
if (mmu_linear_psize == MMU_PAGE_4K)
mmu_linear_psize = MMU_PAGE_64K;
-   if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
+   if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
/*
 * When running on pSeries using 64k pages for ioremap
 * would stop us accessing the HEA ethernet. So if we
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6259f5db525b..c21d160088fa 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void)
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
 
-   i

[PATCH v3 11/21] powerpc/mm: Add __cpu/__mmu_has_feature()

2016-07-27 Thread Michael Ellerman
In later patches, we will be switching cpu and mmu feature checks to
use static keys. For checks in early boot before jump label is
initialized we need a variant of cpu/mmu_has_feature() that doesn't use
jump labels. So create those called, unimaginatively,
__cpu/__mmu_has_feature().

Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/cputable.h |  7 ++-
 arch/powerpc/include/asm/mmu.h  | 17 -
 2 files changed, 22 insertions(+), 2 deletions(-)

v3: Don't change any logic.
Bool conversions were split out.
Don't convert any call sites in this patch.

diff --git a/arch/powerpc/include/asm/cputable.h 
b/arch/powerpc/include/asm/cputable.h
index 7bb87017d9db..85a6797f9231 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -577,12 +577,17 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline bool cpu_has_feature(unsigned long feature)
+static inline bool __cpu_has_feature(unsigned long feature)
 {
return !!((CPU_FTRS_ALWAYS & feature) ||
  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
+static inline bool cpu_has_feature(unsigned long feature)
+{
+   return __cpu_has_feature(feature);
+}
+
 #define HBP_NUM 1
 
 #endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f413b3213a3b..e3eff365e55d 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -135,11 +135,16 @@ enum {
0,
 };
 
-static inline bool mmu_has_feature(unsigned long feature)
+static inline bool __mmu_has_feature(unsigned long feature)
 {
return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
+static inline bool mmu_has_feature(unsigned long feature)
+{
+   return __mmu_has_feature(feature);
+}
+
 static inline void mmu_clear_feature(unsigned long feature)
 {
cur_cpu_spec->mmu_features &= ~feature;
@@ -168,11 +173,21 @@ static inline bool radix_enabled(void)
 {
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
 }
+
+static inline bool __radix_enabled(void)
+{
+   return __mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
 #else
 static inline bool radix_enabled(void)
 {
return false;
 }
+
+static inline bool __radix_enabled(void)
+{
+   return false;
+}
 #endif
 
 #endif /* !__ASSEMBLY__ */
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 04/21] powerpc/mm: Do radix device tree scanning earlier

2016-07-27 Thread Michael Ellerman
Like we just did for hash, split the device tree scanning parts out and
call them from mmu_early_init_devtree().

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
 arch/powerpc/mm/init_64.c| 4 +++-
 arch/powerpc/mm/pgtable-radix.c  | 3 +--
 3 files changed, 5 insertions(+), 3 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 358f1410dc0d..9ee00c2576d0 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -109,6 +109,7 @@ extern int mmu_io_psize;
 /* MMU initialization */
 void mmu_early_init_devtree(void);
 void hash__early_init_devtree(void);
+void radix__early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d02c6c9a..e0ab33d20a10 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -427,7 +427,9 @@ void __init mmu_early_init_devtree(void)
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
-   if (!radix_enabled())
+   if (radix_enabled())
+   radix__early_init_devtree();
+   else
hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 003ff48a11b6..f34ccdbe0fbd 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long 
node,
return 1;
 }
 
-static void __init radix_init_page_sizes(void)
+void __init radix__early_init_devtree(void)
 {
int rc;
 
@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
__pte_frag_nr = H_PTE_FRAG_NR;
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
-   radix_init_page_sizes();
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
radix_init_native();
lpcr = mfspr(SPRN_LPCR);
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 10/21] powerpc/mm: Define radix_enabled() in one place & use static inline

2016-07-27 Thread Michael Ellerman
Currently we have radix_enabled() three times, twice in asm/book3s/64/mmu.h
and then a fallback in asm/mmu.h.

Consolidate them in asm/mmu.h. While we're at it convert them to be
static inlines, and change the fallback case to returning a bool, like
mmu_has_feature().

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  7 ---
 arch/powerpc/include/asm/mmu.h   | 16 
 2 files changed, 12 insertions(+), 11 deletions(-)

v3: New.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index ad2d501cddcf..70c995870297 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -23,13 +23,6 @@ struct mmu_psize_def {
 };
 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 
-#ifdef CONFIG_PPC_RADIX_MMU
-#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX)
-#else
-#define radix_enabled() (0)
-#endif
-
-
 #endif /* __ASSEMBLY__ */
 
 /* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index eb942a446969..f413b3213a3b 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -163,6 +163,18 @@ static inline void assert_pte_locked(struct mm_struct *mm, 
unsigned long addr)
 }
 #endif /* !CONFIG_DEBUG_VM */
 
+#ifdef CONFIG_PPC_RADIX_MMU
+static inline bool radix_enabled(void)
+{
+   return mmu_has_feature(MMU_FTR_TYPE_RADIX);
+}
+#else
+static inline bool radix_enabled(void)
+{
+   return false;
+}
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 /* The kernel use the constants below to index in the page sizes array.
@@ -230,9 +242,5 @@ static inline void mmu_early_init_devtree(void) { }
 #  include 
 #endif
 
-#ifndef radix_enabled
-#define radix_enabled() (0)
-#endif
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MMU_H_ */
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 09/21] powerpc/kernel: Convert cpu_has_feature() to returning bool

2016-07-27 Thread Michael Ellerman
The intention is that the result is only used as a boolean, so enforce
that by changing the return type to bool.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/cputable.h | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

v3: Split out.

diff --git a/arch/powerpc/include/asm/cputable.h 
b/arch/powerpc/include/asm/cputable.h
index df4fb5faba43..7bb87017d9db 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -2,6 +2,7 @@
 #define __ASM_POWERPC_CPUTABLE_H
 
 
+#include 
 #include 
 #include 
 #include 
@@ -576,12 +577,10 @@ enum {
 };
 #endif /* __powerpc64__ */
 
-static inline int cpu_has_feature(unsigned long feature)
+static inline bool cpu_has_feature(unsigned long feature)
 {
-   return (CPU_FTRS_ALWAYS & feature) ||
-  (CPU_FTRS_POSSIBLE
-   & cur_cpu_spec->cpu_features
-   & feature);
+   return !!((CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
 }
 
 #define HBP_NUM 1
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 08/21] powerpc/kernel: Convert mmu_has_feature() to returning bool

2016-07-27 Thread Michael Ellerman
The intention is that the result is only used as a boolean, so enforce
that by changing the return type to bool.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/mmu.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

v3: Split out.

diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 599781e48552..eb942a446969 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -135,9 +135,9 @@ enum {
0,
 };
 
-static inline int mmu_has_feature(unsigned long feature)
+static inline bool mmu_has_feature(unsigned long feature)
 {
-   return (MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
+   return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
 }
 
 static inline void mmu_clear_feature(unsigned long feature)
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 07/21] powerpc/mm: Make MMU_FTR_RADIX a MMU family feature

2016-07-27 Thread Michael Ellerman
From: "Aneesh Kumar K.V" 

MMU feature bits are defined such that we use the lower half to
present MMU family features. Remove the strict split of half and
also move Radix to a mmu family feature. Radix introduce a new MMU
model and strictly speaking it is a new MMU family. This also free
up bits which can be used for individual features later.

Signed-off-by: Aneesh Kumar K.V 
Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  2 +-
 arch/powerpc/include/asm/mmu.h   | 15 +++
 arch/powerpc/kernel/entry_64.S   |  2 +-
 arch/powerpc/kernel/exceptions-64s.S |  8 
 arch/powerpc/kernel/idle_book3s.S|  2 +-
 arch/powerpc/kernel/prom.c   |  2 +-
 arch/powerpc/mm/init_64.c|  2 +-
 7 files changed, 16 insertions(+), 17 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 9ee00c2576d0..ad2d501cddcf 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -24,7 +24,7 @@ struct mmu_psize_def {
 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
 
 #ifdef CONFIG_PPC_RADIX_MMU
-#define radix_enabled() mmu_has_feature(MMU_FTR_RADIX)
+#define radix_enabled() mmu_has_feature(MMU_FTR_TYPE_RADIX)
 #else
 #define radix_enabled() (0)
 #endif
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 14220c5c12c9..599781e48552 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -12,7 +12,7 @@
  */
 
 /*
- * First half is MMU families
+ * MMU families
  */
 #define MMU_FTR_HPTE_TABLE ASM_CONST(0x0001)
 #define MMU_FTR_TYPE_8xx   ASM_CONST(0x0002)
@@ -21,9 +21,13 @@
 #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x0010)
 #define MMU_FTR_TYPE_47x   ASM_CONST(0x0020)
 
+/* Radix page table supported and enabled */
+#define MMU_FTR_TYPE_RADIX ASM_CONST(0x0040)
+
 /*
- * This is individual features
+ * Individual features below.
  */
+
 /*
  * We need to clear top 16bits of va (from the remaining 64 bits )in
  * tlbie* instructions
@@ -93,11 +97,6 @@
  */
 #define MMU_FTR_1T_SEGMENT ASM_CONST(0x4000)
 
-/*
- * Radix page table available
- */
-#define MMU_FTR_RADIX  ASM_CONST(0x8000)
-
 /* MMU feature bit sets for various CPUs */
 #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2  \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
@@ -131,7 +130,7 @@ enum {
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
 #ifdef CONFIG_PPC_RADIX_MMU
-   MMU_FTR_RADIX |
+   MMU_FTR_TYPE_RADIX |
 #endif
0,
 };
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2e0c565754aa..0bdceef11e75 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -532,7 +532,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 #ifdef CONFIG_PPC_STD_MMU_64
 BEGIN_MMU_FTR_SECTION
b   2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 BEGIN_FTR_SECTION
clrrdi  r6,r8,28/* get its ESID */
clrrdi  r9,r1,28/* get current sp ESID */
diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
index 6200e4925d26..334c7fac7a4a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -938,7 +938,7 @@ BEGIN_MMU_FTR_SECTION
b   do_hash_page/* Try to handle as hpte fault */
 MMU_FTR_SECTION_ELSE
b   handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
.align  7
.globl  h_data_storage_common
@@ -969,7 +969,7 @@ BEGIN_MMU_FTR_SECTION
b   do_hash_page/* Try to handle as hpte fault */
 MMU_FTR_SECTION_ELSE
b   handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
 
@@ -1390,7 +1390,7 @@ slb_miss_realmode:
 #ifdef CONFIG_PPC_STD_MMU_64
 BEGIN_MMU_FTR_SECTION
bl  slb_allocate_realmode
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
 #endif
/* All done -- return from exception. */
 
@@ -1404,7 +1404,7 @@ BEGIN_MMU_FTR_SECTION
beq-2f
 FTR_SECTION_ELSE
b   2f
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
 
 .machine   push
 .machine   "power4"
diff --git a/arch/powerpc/kernel/idle_book3s.S 
b/arch/powerpc/kernel/idle_book3s.S
index 335eb6cedae5..4ccffbbc 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -570,7 

[PATCH v3 06/21] powerpc/kernel: Check features don't change after patching

2016-07-27 Thread Michael Ellerman
Early in boot we binary patch some sections of code based on the CPU and
MMU feature bits. But it is a one-time patching, there is no facility
for repatching the code later if the set of features change.

It is a major bug if the set of features changes after we've done the
code patching - so add a check for it.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/lib/feature-fixups.c | 27 ++-
 1 file changed, 26 insertions(+), 1 deletion(-)

v3: Merged into this series.

diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index defb2998b818..854b8ba40f8e 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -152,10 +152,19 @@ static void do_final_fixups(void)
 #endif
 }
 
-void apply_feature_fixups(void)
+static unsigned long __initdata saved_cpu_features;
+static unsigned int __initdata saved_mmu_features;
+#ifdef CONFIG_PPC64
+static unsigned long __initdata saved_firmware_features;
+#endif
+
+void __init apply_feature_fixups(void)
 {
struct cpu_spec *spec = *PTRRELOC(&cur_cpu_spec);
 
+   saved_cpu_features = spec->cpu_features;
+   saved_mmu_features = spec->mmu_features;
+
/*
 * Apply the CPU-specific and firmware specific fixups to kernel text
 * (nop out sections not relevant to this CPU or this firmware).
@@ -173,12 +182,28 @@ void apply_feature_fixups(void)
 PTRRELOC(&__stop___lwsync_fixup));
 
 #ifdef CONFIG_PPC64
+   saved_firmware_features = powerpc_firmware_features;
do_feature_fixups(powerpc_firmware_features,
  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 #endif
do_final_fixups();
 }
 
+static int __init check_features(void)
+{
+   WARN(saved_cpu_features != cur_cpu_spec->cpu_features,
+"CPU features changed after feature patching!\n");
+   WARN(saved_mmu_features != cur_cpu_spec->mmu_features,
+"MMU features changed after feature patching!\n");
+#ifdef CONFIG_PPC64
+   WARN(saved_firmware_features != powerpc_firmware_features,
+"Firmware features changed after feature patching!\n");
+#endif
+
+   return 0;
+}
+late_initcall(check_features);
+
 #ifdef CONFIG_FTR_FIXUP_SELFTEST
 
 #define check(x)   \
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 05/21] powerpc/64: Do feature patching before MMU init

2016-07-27 Thread Michael Ellerman
Up until now we needed to do the MMU init before feature patching,
because part of the MMU init was scanning the device tree and setting
and/or clearing some MMU feature bits.

Now that we have split that MMU feature modification out into routines
called from early_init_devtree() (called earlier) we can now do feature
patching before calling MMU init.

The advantage of this is it means the remainder of the MMU init runs
with the final set of features which will apply for the rest of the life
of the system. This means we don't have to special case anything called
from MMU init to deal with a changing set of feature bits.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/kernel/setup_64.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8216aed22b7..984696136f96 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -298,12 +298,12 @@ void __init early_setup(unsigned long dt_ptr)
 */
configure_exceptions();
 
-   /* Initialize the hash table or TLB handling */
-   early_init_mmu();
-
/* Apply all the dynamic patching */
apply_feature_fixups();
 
+   /* Initialize the hash table or TLB handling */
+   early_init_mmu();
+
/*
 * At this point, we can let interrupts switch to virtual mode
 * (the MMU has been setup), so adjust the MSR in the PACA to
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 03/21] powerpc/mm: Do hash device tree scanning earlier

2016-07-27 Thread Michael Ellerman
Currently MMU initialisation (early_init_mmu()) consists of a mixture of
scanning the device tree, setting MMU feature bits, and then also doing
actual initialisation of MMU data structures.

We'd like to decouple the setting of the MMU features from the actual
setup. So split out the device tree scanning, and associated code, and
call it from mmu_init_early_devtree().

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h |  1 +
 arch/powerpc/mm/hash_utils_64.c  | 15 +--
 arch/powerpc/mm/init_64.c|  3 +++
 3 files changed, 13 insertions(+), 6 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index 4eb4bd019716..358f1410dc0d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -108,6 +108,7 @@ extern int mmu_io_psize;
 
 /* MMU initialization */
 void mmu_early_init_devtree(void);
+void hash__early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 1ff11c1bb182..5f922e93af25 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -759,12 +759,6 @@ static void __init htab_initialize(void)
 
DBG(" -> htab_initialize()\n");
 
-   /* Initialize segment sizes */
-   htab_init_seg_sizes();
-
-   /* Initialize page sizes */
-   htab_init_page_sizes();
-
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
mmu_kernel_ssize = MMU_SEGSIZE_1T;
mmu_highuser_ssize = MMU_SEGSIZE_1T;
@@ -885,6 +879,15 @@ static void __init htab_initialize(void)
 #undef KB
 #undef MB
 
+void __init hash__early_init_devtree(void)
+{
+   /* Initialize segment sizes */
+   htab_init_seg_sizes();
+
+   /* Initialize page sizes */
+   htab_init_page_sizes();
+}
+
 void __init hash__early_init_mmu(void)
 {
/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 0d51e6e25db5..d02c6c9a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -426,5 +426,8 @@ void __init mmu_early_init_devtree(void)
/* Disable radix mode based on kernel command line. */
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
+
+   if (!radix_enabled())
+   hash__early_init_devtree();
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 02/21] powerpc/mm: Move disable_radix handling into mmu_early_init_devtree()

2016-07-27 Thread Michael Ellerman
Move the handling of the disable_radix command line argument into the
newly created mmu_early_init_devtree().

It's an MMU option so it's preferable to have it in an mm related file,
and it also means platforms that don't support radix don't have to carry
the code.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/kernel/prom.c | 13 -
 arch/powerpc/mm/init_64.c  | 11 +++
 2 files changed, 11 insertions(+), 13 deletions(-)

v3: Merged into this series.

diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9686984e79c4..b4b6952e8991 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -647,14 +647,6 @@ static void __init early_reserve_mem(void)
 #endif
 }
 
-static bool disable_radix;
-static int __init parse_disable_radix(char *p)
-{
-   disable_radix = true;
-   return 0;
-}
-early_param("disable_radix", parse_disable_radix);
-
 void __init early_init_devtree(void *params)
 {
phys_addr_t limit;
@@ -744,11 +736,6 @@ void __init early_init_devtree(void *params)
 */
spinning_secondaries = boot_cpu_count - 1;
 #endif
-   /*
-* now fixup radix MMU mode based on kernel command line
-*/
-   if (disable_radix)
-   cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
mmu_early_init_devtree();
 
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0fb33ac3db2..0d51e6e25db5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -413,7 +413,18 @@ EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
 
 #ifdef CONFIG_PPC_STD_MMU_64
+static bool disable_radix;
+static int __init parse_disable_radix(char *p)
+{
+   disable_radix = true;
+   return 0;
+}
+early_param("disable_radix", parse_disable_radix);
+
 void __init mmu_early_init_devtree(void)
 {
+   /* Disable radix mode based on kernel command line. */
+   if (disable_radix)
+   cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 }
 #endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v3 01/21] powerpc/mm: Add mmu_early_init_devtree()

2016-07-27 Thread Michael Ellerman
Empty for now, but we'll add to it in the next patch.

Signed-off-by: Michael Ellerman 
---
 arch/powerpc/include/asm/book3s/64/mmu.h | 1 +
 arch/powerpc/include/asm/mmu.h   | 1 +
 arch/powerpc/kernel/prom.c   | 2 ++
 arch/powerpc/mm/init_64.c| 6 ++
 4 files changed, 10 insertions(+)

v3: Merged into this series.

diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h 
b/arch/powerpc/include/asm/book3s/64/mmu.h
index d4eda6420523..4eb4bd019716 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -107,6 +107,7 @@ extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
 
 /* MMU initialization */
+void mmu_early_init_devtree(void);
 extern void radix_init_native(void);
 extern void hash__early_init_mmu(void);
 extern void radix__early_init_mmu(void);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 54471228f7b8..14220c5c12c9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -210,6 +210,7 @@ extern void early_init_mmu(void);
 extern void early_init_mmu_secondary(void);
 extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
   phys_addr_t first_memblock_size);
+static inline void mmu_early_init_devtree(void) { }
 #endif /* __ASSEMBLY__ */
 #endif
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bae3db791150..9686984e79c4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -750,6 +750,8 @@ void __init early_init_devtree(void *params)
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_RADIX;
 
+   mmu_early_init_devtree();
+
 #ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 33709bdb0419..d0fb33ac3db2 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -411,3 +411,9 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
 
 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+void __init mmu_early_init_devtree(void)
+{
+}
+#endif /* CONFIG_PPC_STD_MMU_64 */
-- 
2.7.4

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Balbir Singh
On Wed, Jul 27, 2016 at 09:50:03PM +1000, Nicholas Piggin wrote:
> On Wed, 27 Jul 2016 17:32:06 +1000
> Balbir Singh  wrote:
> 
> > +#ifndef CONFIG_RELOCATABLE
> > +   b   power7_wakeup_common
> > +#else
> > +   /*
> > +* We can't just use a direct branch to power7_wakeup_common
> > +* because the distance from here to there depends on where
> > +* the kernel ends up being put.
> > +*/
> > +   mfctr   r11
> > +   ld  r10, PACAKBASE(r13)
> > +   LOAD_HANDLER(r10, power7_wakeup_common)
> > +   mtctr   r10
> > +   bctr
> >  #endif
> 
> So r10 and r11 are safe to use (as well as existing registers
> being used without saving) because we are returning via the nap
> functions that caller will expect te trash volatile registers,
> yes?
>

r10, r11 are volatile as per the ABI, so yes.

 
> In that caie I can't see a problem with this.
>

Thanks for the review

Balbir 
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PowerPC] Kernel OOPS while compiling LTP test suite on linus mainline

2016-07-27 Thread Abdul Haleem

Hi,

Kernel OOPS messages were seen while compiling linux test project (LTP) source 
on4.7.0-rc5 mainline.


kernel config : pseries_le_defconfig
machine Type  : PowerVM LPAR
trace messages:
*15:34:57* [  862.548866] Unable to handle kernel paging request for data at 
address 0x
*15:34:57* [  862.548904] Faulting instruction address: 0xc0260900
*15:34:57* [  862.548911] Oops: Kernel access of bad area, sig: 11 [#1]
*15:34:57* [  862.548917] SMP NR_CPUS=2048 NUMA pSeries
*15:34:57* [  862.548924] Modules linked in: rtc_generic(E) pseries_rng(E) 
autofs4(E)
*15:34:57* [  862.548938] CPU: 0 PID: 129 Comm: kswapd2 Tainted: GE 
  4.7.0-rc5-autotest #1
*15:34:57* [  862.548946] task: c007766a2600 ti: c00776764000 task.ti: 
c00776764000
*15:34:57* [  862.548953] NIP: c0260900 LR: c026452c CTR: 

*15:34:57* [  862.548961] REGS: c00776767830 TRAP: 0300   Tainted: G
E(4.7.0-rc5-autotest)
*15:34:57* [  862.548968] MSR: 80010280b033 
  CR: 24000222  XER: 2001
*15:34:57* [  862.548996] CFAR: c0008468 DAR:  DSISR: 
4000 SOFTE: 0
*15:34:57* GPR00: c026452c c00776767ab0 c13ac100 
c0077ff54200
*15:34:57* GPR04:  c00776767ba0 0001 
c151c100
*15:34:57* GPR08:  000b4057 8000 
c0071664b7a0
*15:34:57* GPR12:  ce80 0001 
f15dc000
*15:34:57* GPR16: c0077ff54700   
c0077ff54700
*15:34:57* GPR20: 0001 0100 0200 
c0077ff54200
*15:34:57* GPR24: c00776767ba0 0020  
0001
*15:34:57* GPR28: 0010  c00776767ba0 
f15dc020
*15:34:57* [  862.549094] NIP [c0260900] 
move_active_pages_to_lru.isra.16+0xa0/0x380
*15:34:57* [  862.549102] LR [c026452c] shrink_active_list+0x2fc/0x510
*15:34:57* [  862.549108] Call Trace:
*15:34:57* [  862.549112] [c00776767ab0] [f15dc000] 
0xf15dc000 (unreliable)
*15:34:57* [  862.549122] [c00776767b60] [c026452c] 
shrink_active_list+0x2fc/0x510
*15:34:57* [  862.549131] [c00776767c50] [c02665d4] 
kswapd+0x434/0xa70
*15:34:57* [  862.549139] [c00776767d80] [c00f1b50] 
kthread+0x110/0x130
*15:34:57* [  862.549148] [c00776767e30] [c00095f0] 
ret_from_kernel_thread+0x5c/0x6c
*15:34:57* [  862.549155] Instruction dump:
*15:34:57* [  862.549161] 6000 3b200020 3a81 7b7c26e4 3aa00100 3ac00200 
3a40 3a770500
*15:34:57* [  862.549174] 3a20 6000 6000 6042  
7fbd4840 419e01b8 ebfd0008
*15:34:57* [  862.549193] ---[ end trace fcc50906d9164c56 ]---
*15:34:57* [  862.550562]
*15:35:18* [  883.551577] INFO: rcu_sched self-detected stall on CPU
*15:35:18* [  883.551578] INFO: rcu_sched self-detected stall on CPU
*15:35:18* [  883.551588]   2-...: (5249 ticks this GP) 
idle=cc5/141/0 softirq=50260/50260 fqs=5249
*15:35:18* [  883.551591](t=5250 jiffies g=48365 c=48364 q=182)


Regard's
Abdul
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PowerPC] Kernel OOPS while compiling LTP test suite on linus mainline

2016-07-27 Thread Balbir Singh
On Wed, Jul 27, 2016 at 04:45:35PM +0530, Abdul Haleem wrote:
> Hi,
> 
> Kernel OOPS messages were seen while compiling linux test project (LTP) 
> source on 4.7.0-rc5 mainline.
> 
> Kernel config : pseries_le_defconfig
> Machine Type  : PowerVM LPAR
> Machine hardware : LPAR uses 16 vCPUs, and 29G memory
> 
> trace messages:
> *15:34:57* [  862.548866] Unable to handle kernel paging request for data at 
> address 0x
> *15:34:57* [  862.548904] Faulting instruction address: 0xc0260900
> *15:34:57* [  862.548911] Oops: Kernel access of bad area, sig: 11 [#1]
> *15:34:57* [  862.548917] SMP NR_CPUS=2048 NUMA pSeries
> *15:34:57* [  862.548924] Modules linked in: rtc_generic(E) pseries_rng(E) 
> autofs4(E)
> *15:34:57* [  862.548938] CPU: 0 PID: 129 Comm: kswapd2 Tainted: G
> E   4.7.0-rc5-autotest #1
> *15:34:57* [  862.548946] task: c007766a2600 ti: c00776764000 
> task.ti: c00776764000
> *15:34:57* [  862.548953] NIP: c0260900 LR: c026452c CTR: 
> 
> *15:34:57* [  862.548961] REGS: c00776767830 TRAP: 0300   Tainted: G  
>   E(4.7.0-rc5-autotest)
> *15:34:57* [  862.548968] MSR: 80010280b033 
>   CR: 24000222  XER: 2001
> *15:34:57* [  862.548996] CFAR: c0008468 DAR:  DSISR: 
> 4000 SOFTE: 0
> *15:34:57* GPR00: c026452c c00776767ab0 c13ac100 
> c0077ff54200
> *15:34:57* GPR04:  c00776767ba0 0001 
> c151c100
> *15:34:57* GPR08:  000b4057 8000 
> c0071664b7a0
> *15:34:57* GPR12:  ce80 0001 
> f15dc000
> *15:34:57* GPR16: c0077ff54700   
> c0077ff54700
> *15:34:57* GPR20: 0001 0100 0200 
> c0077ff54200
> *15:34:57* GPR24: c00776767ba0 0020  
> 0001
> *15:34:57* GPR28: 0010  c00776767ba0 
> f15dc020
> *15:34:57* [  862.549094] NIP [c0260900] 
> move_active_pages_to_lru.isra.16+0xa0/0x380
> *15:34:57* [  862.549102] LR [c026452c] shrink_active_list+0x2fc/0x510
> *15:34:57* [  862.549108] Call Trace:
> *15:34:57* [  862.549112] [c00776767ab0] [f15dc000] 
> 0xf15dc000 (unreliable)
> *15:34:57* [  862.549122] [c00776767b60] [c026452c] 
> shrink_active_list+0x2fc/0x510
> *15:34:57* [  862.549131] [c00776767c50] [c02665d4] 
> kswapd+0x434/0xa70
> *15:34:57* [  862.549139] [c00776767d80] [c00f1b50] 
> kthread+0x110/0x130
> *15:34:57* [  862.549148] [c00776767e30] [c00095f0] 
> ret_from_kernel_thread+0x5c/0x6c
> *15:34:57* [  862.549155] Instruction dump:
> *15:34:57* [  862.549161] 6000 3b200020 3a81 7b7c26e4 3aa00100 
> 3ac00200 3a40 3a770500
> *15:34:57* [  862.549174] 3a20 6000 6000 6042  
> 7fbd4840 419e01b8 ebfd0008
> *15:34:57* [  862.549193] ---[ end trace fcc50906d9164c56 ]---
> *15:34:57* [  862.550562]
> *15:35:18* [  883.551577] INFO: rcu_sched self-detected stall on CPU
> *15:35:18* [  883.551578] INFO: rcu_sched self-detected stall on CPU
> *15:35:18* [  883.551588] 2-...: (5249 ticks this GP) 
> idle=cc5/141/0 softirq=50260/50260 fqs=5249
> *15:35:18* [  883.551591]  (t=5250 jiffies g=48365 c=48364 q=182)
>

Is the problem repeatable? Comm says kswapd2, could you post the
topology?
 
> Regard's
> Abdul
> 
> ___
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Nicholas Piggin
On Wed, 27 Jul 2016 20:01:24 +1000
Balbir Singh  wrote:

> On Wed, Jul 27, 2016 at 7:53 PM, Benjamin Herrenschmidt
>  wrote:
> > On Wed, 2016-07-27 at 17:32 +1000, Balbir Singh wrote:  
> >> From: Balbir Singh 
> >>
> >> Currently the power management bits are broken w.r.t. relocation.
> >> There are direct branches from system_reset_pSeries to
> >> power7_wakeup_*.  
> >
> > Side track: we should really get rid of the _pSeries suffix for
> > these things :-)
> >  
> 
> I thought about it, but then thought it would need more auditing
> across places and hence held back. I'll be happy to send another patch
> to remove _pSeries from the exception-64s.S to begin with as a follow
> up

Ahh, my patches do that actually. Not as a primary concern, but
I removed almost all labels and naming from the exception-64s.h file,
and moved them into new head-64.h, so I changed the names a little
in the process.

It would be helpful to not touching this too much before we have some
more discussion of my reworking. I'm happy to rebase my stuff on top of
bugfixes though, of course.

Thanks,
Nick
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Nicholas Piggin
On Wed, 27 Jul 2016 17:32:06 +1000
Balbir Singh  wrote:

> From: Balbir Singh 
> 
> Currently the power management bits are broken w.r.t. relocation.
> There are direct branches from system_reset_pSeries to
> power7_wakeup_*. The correct way to do it is to do what
> the slb miss handler does, which is jump to a small stub within
> the first 64k of the relocated address and then jump to the
> actual location.
> 
> The code has been lightly tested (not the kvm bits), I would highly
> appreciate a review of the code. I suspect there might be easy
> to find bugs :)
> 
> Cc: b...@kernel.crashing.org
> Cc: m...@ellerman.id.au
> Cc: pau...@samba.org
> Cc: npig...@gmail.com
> Cc: sva...@linux.vnet.ibm.com
> 
> Signed-off-by: Balbir Singh 
> ---
>  arch/powerpc/kernel/exceptions-64s.S | 82
> ++-- 1 file changed, 51
> insertions(+), 31 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/exceptions-64s.S
> b/arch/powerpc/kernel/exceptions-64s.S index 8bcc1b4..64f9650 100644
> --- a/arch/powerpc/kernel/exceptions-64s.S
> +++ b/arch/powerpc/kernel/exceptions-64s.S
> @@ -118,39 +118,21 @@ BEGIN_FTR_SECTION
>   cmpwi   cr4,r5,1
>   mtspr   SPRN_HSPRG0,r13
>  
> - lbz r0,PACA_THREAD_IDLE_STATE(r13)
> - cmpwi   cr2,r0,PNV_THREAD_NAP
> - bgt cr2,8f  /* Either
> sleep or Winkle */ -
> - /* Waking up from nap should not cause hypervisor state loss
> */
> - bgt cr3,.
> -
> - /* Waking up from nap */
> - li  r0,PNV_THREAD_RUNNING
> - stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear
> thread state */ -
> -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> - li  r0,KVM_HWTHREAD_IN_KERNEL
> - stb r0,HSTATE_HWTHREAD_STATE(r13)
> - /* Order setting hwthread_state vs. testing hwthread_req */
> - sync
> - lbz r0,HSTATE_HWTHREAD_REQ(r13)
> - cmpwi   r0,0
> - beq 1f
> - b   kvm_start_guest
> -1:
> +#ifndef CONFIG_RELOCATABLE
> + b   power7_wakeup_common
> +#else
> + /*
> +  * We can't just use a direct branch to power7_wakeup_common
> +  * because the distance from here to there depends on where
> +  * the kernel ends up being put.
> +  */
> + mfctr   r11
> + ld  r10, PACAKBASE(r13)
> + LOAD_HANDLER(r10, power7_wakeup_common)
> + mtctr   r10
> + bctr
>  #endif

So r10 and r11 are safe to use (as well as existing registers
being used without saving) because we are returning via the nap
functions that caller will expect to trash volatile registers,
yes?

In that case I can't see a problem with this.

Thanks,
Nick


___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Balbir Singh
On Wed, Jul 27, 2016 at 05:32:06PM +1000, Balbir Singh wrote:
> 
> From: Balbir Singh 
> 
> Currently the power management bits are broken w.r.t. relocation.
> There are direct branches from system_reset_pSeries to
> power7_wakeup_*. The correct way to do it is to do what
> the slb miss handler does, which is jump to a small stub within
> the first 64k of the relocated address and then jump to the
> actual location.
> 
> The code has been lightly tested (not the kvm bits), I would highly
> appreciate a review of the code. I suspect there might be easy
> to find bugs :)
>
Snip
 
> + .align 7
> +_GLOBAL(power7_wakeup_common)
> +#ifdef CONFIG_RELOCATABLE
> + mtctr   r11
> +#endif
> + lbz r0,PACA_THREAD_IDLE_STATE(r13)
> + cmpwi   cr2,r0,PNV_THREAD_NAP
> + bgt cr2,8f  /* Either sleep or Winkle */
> +
> + /* Waking up from nap should not cause hypervisor state loss */
> + bgt cr3,.
> +
> + /* Waking up from nap */
> + li  r0,PNV_THREAD_RUNNING
> + stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
> +
> +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> + li  r0,KVM_HWTHREAD_IN_KERNEL
> + stb r0,HSTATE_HWTHREAD_STATE(r13)
> + /* Order setting hwthread_state vs. testing hwthread_req */
> + sync
> + lbz r0,HSTATE_HWTHREAD_REQ(r13)
> + cmpwi   r0,0
> + beq 1f
> + b   kvm_start_guest
> +1:
> +#endif
> +
> + /* Return SRR1 from power7_nap() */
> + mfspr   r3,SPRN_SRR1
> + beq cr3,2f
> + b   power7_wakeup_noloss
> +2:   b   power7_wakeup_loss
> +
> + /* Fast Sleep wakeup on PowerNV */
> +8:   GET_PACA(r13)
> + b   power7_wakeup_tb_loss
> +
>  /*
>   * Hash table stuff
>   */

As per 0day, this part needs to be under #ifdef CONFIG_PPC_P7_NAP

I'll repose a new version tomorrow.

Balbir Singh

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH v12 02/30] powerpc/process: Add the function flush_tmregs_to_thread

2016-07-27 Thread Michael Ellerman

Hi Simon,

wei.guo.si...@gmail.com writes:
> From: Anshuman Khandual 
>
> This patch creates a function flush_tmregs_to_thread which
> will then be used by subsequent patches in this series. The
> function checks for self tracing ptrace interface attempts
> while in the TM context and logs appropriate warning message.
>
> Cc: Benjamin Herrenschmidt 
> Cc: Paul Mackerras 
> Cc: Michael Ellerman 
> Cc: Shuah Khan 
> Cc: Anton Blanchard 
> Cc: Cyril Bur 
> Cc: Anshuman Khandual 
> Cc: Simon Guo 
> Cc: Ulrich Weigand 
> Cc: Michael Neuling 
> Cc: Andrew Morton 
> Cc: Kees Cook 
> Cc: Rashmica Gupta 
> Cc: Khem Raj 
> Cc: Jessica Yu 
> Cc: Jiri Kosina 
> Cc: Miroslav Benes 
> Cc: Suraj Jitindar Singh 
> Cc: Chris Smart 
> Cc: linuxppc-dev@lists.ozlabs.org
> Cc: linux-ker...@vger.kernel.org
> Cc: linux-kselft...@vger.kernel.org

Please drop these Cc lines from the change log.

> Signed-off-by: Anshuman Khandual 
> ---

This and all the other patches need your Signed-off-by, as well as
Anshuman's.

cheers
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PowerPC] Kernel OOPS while compiling LTP test suite on linus mainline

2016-07-27 Thread Abdul Haleem

Hi,

Kernel OOPS messages were seen while compiling linux test project (LTP) source 
on 4.7.0-rc5 mainline.

Kernel config : pseries_le_defconfig
Machine Type  : PowerVM LPAR
Machine hardware : LPAR uses 16 vCPUs, and 29G memory

trace messages:
*15:34:57* [  862.548866] Unable to handle kernel paging request for data at 
address 0x
*15:34:57* [  862.548904] Faulting instruction address: 0xc0260900
*15:34:57* [  862.548911] Oops: Kernel access of bad area, sig: 11 [#1]
*15:34:57* [  862.548917] SMP NR_CPUS=2048 NUMA pSeries
*15:34:57* [  862.548924] Modules linked in: rtc_generic(E) pseries_rng(E) 
autofs4(E)
*15:34:57* [  862.548938] CPU: 0 PID: 129 Comm: kswapd2 Tainted: GE 
  4.7.0-rc5-autotest #1
*15:34:57* [  862.548946] task: c007766a2600 ti: c00776764000 task.ti: 
c00776764000
*15:34:57* [  862.548953] NIP: c0260900 LR: c026452c CTR: 

*15:34:57* [  862.548961] REGS: c00776767830 TRAP: 0300   Tainted: G
E(4.7.0-rc5-autotest)
*15:34:57* [  862.548968] MSR: 80010280b033 
  CR: 24000222  XER: 2001
*15:34:57* [  862.548996] CFAR: c0008468 DAR:  DSISR: 
4000 SOFTE: 0
*15:34:57* GPR00: c026452c c00776767ab0 c13ac100 
c0077ff54200
*15:34:57* GPR04:  c00776767ba0 0001 
c151c100
*15:34:57* GPR08:  000b4057 8000 
c0071664b7a0
*15:34:57* GPR12:  ce80 0001 
f15dc000
*15:34:57* GPR16: c0077ff54700   
c0077ff54700
*15:34:57* GPR20: 0001 0100 0200 
c0077ff54200
*15:34:57* GPR24: c00776767ba0 0020  
0001
*15:34:57* GPR28: 0010  c00776767ba0 
f15dc020
*15:34:57* [  862.549094] NIP [c0260900] 
move_active_pages_to_lru.isra.16+0xa0/0x380
*15:34:57* [  862.549102] LR [c026452c] shrink_active_list+0x2fc/0x510
*15:34:57* [  862.549108] Call Trace:
*15:34:57* [  862.549112] [c00776767ab0] [f15dc000] 
0xf15dc000 (unreliable)
*15:34:57* [  862.549122] [c00776767b60] [c026452c] 
shrink_active_list+0x2fc/0x510
*15:34:57* [  862.549131] [c00776767c50] [c02665d4] 
kswapd+0x434/0xa70
*15:34:57* [  862.549139] [c00776767d80] [c00f1b50] 
kthread+0x110/0x130
*15:34:57* [  862.549148] [c00776767e30] [c00095f0] 
ret_from_kernel_thread+0x5c/0x6c
*15:34:57* [  862.549155] Instruction dump:
*15:34:57* [  862.549161] 6000 3b200020 3a81 7b7c26e4 3aa00100 3ac00200 
3a40 3a770500
*15:34:57* [  862.549174] 3a20 6000 6000 6042  
7fbd4840 419e01b8 ebfd0008
*15:34:57* [  862.549193] ---[ end trace fcc50906d9164c56 ]---
*15:34:57* [  862.550562]
*15:35:18* [  883.551577] INFO: rcu_sched self-detected stall on CPU
*15:35:18* [  883.551578] INFO: rcu_sched self-detected stall on CPU
*15:35:18* [  883.551588]   2-...: (5249 ticks this GP) 
idle=cc5/141/0 softirq=50260/50260 fqs=5249
*15:35:18* [  883.551591](t=5250 jiffies g=48365 c=48364 q=182)

Regard's
Abdul

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: _PAGE_PRESENT and _PAGE_ACCESSED

2016-07-27 Thread Benjamin Herrenschmidt
On Tue, 2016-07-26 at 19:52 +0200, LEROY Christophe wrote:
> In ppc8xx tlbmiss handler, we consider a page valid if both  
> _PAGE_PRESENT and _PAGE_ACCESSED are set.
> Is there any chance to have _PAGE_ACCESSED set and not _PAGE_PRESENT ?
> Otherwise we could simplify the handler by considering the page valid  
> only when _PAGE_ACCESSED is set

When _PAGE_PRESENT is not set, the PTE becomes a swap PTE and a pile
of the other bits can be repurposed, you may want to verify if
_PAGE_ACCESSED is one of them.

Cheers,
Ben.

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread kbuild test robot
Hi,

[auto build test ERROR on v4.7-rc7]
[cannot apply to powerpc/next next-20160727]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:
https://github.com/0day-ci/linux/commits/Balbir-Singh/Make-system_reset_pSeries-relocatable/20160727-153638
config: powerpc-ps3_defconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 5.4.0-6) 5.4.0 20160609
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=powerpc 

All errors (new ones prefixed by >>):

   arch/powerpc/kernel/head_64.o: In function `power7_wakeup_common':
>> arch/powerpc/kernel/exceptions-64s.S:1438: undefined reference to 
>> `PACA_THREAD_IDLE_STATE'
>> arch/powerpc/kernel/exceptions-64s.S:1439: undefined reference to 
>> `PNV_THREAD_NAP'
>> arch/powerpc/kernel/exceptions-64s.S:1446: undefined reference to 
>> `PNV_THREAD_RUNNING'
   arch/powerpc/kernel/exceptions-64s.S:1447: undefined reference to 
`PACA_THREAD_IDLE_STATE'
>> arch/powerpc/kernel/exceptions-64s.S:1464: undefined reference to 
>> `power7_wakeup_noloss'
>> arch/powerpc/kernel/exceptions-64s.S:1465: undefined reference to 
>> `power7_wakeup_loss'
>> arch/powerpc/kernel/exceptions-64s.S:1469: undefined reference to 
>> `power7_wakeup_tb_loss'

vim +1438 arch/powerpc/kernel/exceptions-64s.S

  1432  
  1433  .align 7
  1434  _GLOBAL(power7_wakeup_common)
  1435  #ifdef CONFIG_RELOCATABLE
  1436  mtctr   r11
  1437  #endif
> 1438  lbz r0,PACA_THREAD_IDLE_STATE(r13)
> 1439  cmpwi   cr2,r0,PNV_THREAD_NAP
  1440  bgt cr2,8f  /* Either sleep or 
Winkle */
  1441  
  1442  /* Waking up from nap should not cause hypervisor state loss */
  1443  bgt cr3,.
  1444  
  1445  /* Waking up from nap */
> 1446  li  r0,PNV_THREAD_RUNNING
  1447  stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
  1448  
  1449  #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  1450  li  r0,KVM_HWTHREAD_IN_KERNEL
  1451  stb r0,HSTATE_HWTHREAD_STATE(r13)
  1452  /* Order setting hwthread_state vs. testing hwthread_req */
  1453  sync
  1454  lbz r0,HSTATE_HWTHREAD_REQ(r13)
  1455  cmpwi   r0,0
  1456  beq 1f
  1457  b   kvm_start_guest
  1458  1:
  1459  #endif
  1460  
  1461  /* Return SRR1 from power7_nap() */
  1462  mfspr   r3,SPRN_SRR1
  1463  beq cr3,2f
> 1464  b   power7_wakeup_noloss
> 1465  2:  b   power7_wakeup_loss
  1466  
  1467  /* Fast Sleep wakeup on PowerNV */
  1468  8:  GET_PACA(r13)
> 1469  b   power7_wakeup_tb_loss
  1470  
  1471  /*
  1472   * Hash table stuff

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: Binary data
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Balbir Singh
On Wed, Jul 27, 2016 at 7:53 PM, Benjamin Herrenschmidt
 wrote:
> On Wed, 2016-07-27 at 17:32 +1000, Balbir Singh wrote:
>> From: Balbir Singh 
>>
>> Currently the power management bits are broken w.r.t. relocation.
>> There are direct branches from system_reset_pSeries to
>> power7_wakeup_*.
>
> Side track: we should really get rid of the _pSeries suffix for these
> things :-)
>

I thought about it, but then thought it would need more auditing
across places and hence held back. I'll be happy to send another patch
to remove _pSeries from the exception-64s.S to begin with as a follow
up

Balbir
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH] Make system_reset_pSeries relocatable

2016-07-27 Thread Benjamin Herrenschmidt
On Wed, 2016-07-27 at 17:32 +1000, Balbir Singh wrote:
> From: Balbir Singh 
> 
> Currently the power management bits are broken w.r.t. relocation.
> There are direct branches from system_reset_pSeries to
> power7_wakeup_*.

Side track: we should really get rid of the _pSeries suffix for these
things :-)

>  The correct way to do it is to do what
> the slb miss handler does, which is jump to a small stub within
> the first 64k of the relocated address and then jump to the
> actual location.
> 
> The code has been lightly tested (not the kvm bits), I would highly
> appreciate a review of the code. I suspect there might be easy
> to find bugs :)
> 
> Cc: b...@kernel.crashing.org
> Cc: m...@ellerman.id.au
> Cc: pau...@samba.org
> Cc: npig...@gmail.com
> Cc: sva...@linux.vnet.ibm.com
> 
> Signed-off-by: Balbir Singh 
> ---
>  arch/powerpc/kernel/exceptions-64s.S | 82 ++--
> 
>  1 file changed, 51 insertions(+), 31 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/exceptions-64s.S
> b/arch/powerpc/kernel/exceptions-64s.S
> index 8bcc1b4..64f9650 100644
> --- a/arch/powerpc/kernel/exceptions-64s.S
> +++ b/arch/powerpc/kernel/exceptions-64s.S
> @@ -118,39 +118,21 @@ BEGIN_FTR_SECTION
>   cmpwi   cr4,r5,1
>   mtspr   SPRN_HSPRG0,r13
>  
> - lbz r0,PACA_THREAD_IDLE_STATE(r13)
> - cmpwi   cr2,r0,PNV_THREAD_NAP
> - bgt cr2,8f  /* Either
> sleep or Winkle */
> -
> - /* Waking up from nap should not cause hypervisor state loss
> */
> - bgt cr3,.
> -
> - /* Waking up from nap */
> - li  r0,PNV_THREAD_RUNNING
> - stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear
> thread state */
> -
> -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> - li  r0,KVM_HWTHREAD_IN_KERNEL
> - stb r0,HSTATE_HWTHREAD_STATE(r13)
> - /* Order setting hwthread_state vs. testing hwthread_req */
> - sync
> - lbz r0,HSTATE_HWTHREAD_REQ(r13)
> - cmpwi   r0,0
> - beq 1f
> - b   kvm_start_guest
> -1:
> +#ifndef CONFIG_RELOCATABLE
> + b   power7_wakeup_common
> +#else
> + /*
> +  * We can't just use a direct branch to power7_wakeup_common
> +  * because the distance from here to there depends on where
> +  * the kernel ends up being put.
> +  */
> + mfctr   r11
> + ld  r10, PACAKBASE(r13)
> + LOAD_HANDLER(r10, power7_wakeup_common)
> + mtctr   r10
> + bctr
>  #endif
>  
> - /* Return SRR1 from power7_nap() */
> - mfspr   r3,SPRN_SRR1
> - beq cr3,2f
> - b   power7_wakeup_noloss
> -2:   b   power7_wakeup_loss
> -
> - /* Fast Sleep wakeup on PowerNV */
> -8:   GET_PACA(r13)
> - b   power7_wakeup_tb_loss
> -
>  9:
>  END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
>  #endif /* CONFIG_PPC_P7_NAP */
> @@ -1448,6 +1430,44 @@ power4_fixup_nap:
>   blr
>  #endif
>  
> + .align 7
> +_GLOBAL(power7_wakeup_common)
> +#ifdef CONFIG_RELOCATABLE
> + mtctr   r11
> +#endif
> + lbz r0,PACA_THREAD_IDLE_STATE(r13)
> + cmpwi   cr2,r0,PNV_THREAD_NAP
> + bgt cr2,8f  /* Either
> sleep or Winkle */
> +
> + /* Waking up from nap should not cause hypervisor state loss
> */
> + bgt cr3,.
> +
> + /* Waking up from nap */
> + li  r0,PNV_THREAD_RUNNING
> + stb r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear
> thread state */
> +
> +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
> + li  r0,KVM_HWTHREAD_IN_KERNEL
> + stb r0,HSTATE_HWTHREAD_STATE(r13)
> + /* Order setting hwthread_state vs. testing hwthread_req */
> + sync
> + lbz r0,HSTATE_HWTHREAD_REQ(r13)
> + cmpwi   r0,0
> + beq 1f
> + b   kvm_start_guest
> +1:
> +#endif
> +
> + /* Return SRR1 from power7_nap() */
> + mfspr   r3,SPRN_SRR1
> + beq cr3,2f
> + b   power7_wakeup_noloss
> +2:   b   power7_wakeup_loss
> +
> + /* Fast Sleep wakeup on PowerNV */
> +8:   GET_PACA(r13)
> + b   power7_wakeup_tb_loss
> +
>  /*
>   * Hash table stuff
>   */
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v12 30/30] selftests/powerpc: Fix a build issue

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

Fixes the following build failure -

cp_abort.c:90:3: error: ‘for’ loop initial declarations are only
allowed in C99 or C11 mode
   for (int i = 0; i < NUM_LOOPS; i++) {
   ^
cp_abort.c:90:3: note: use option -std=c99, -std=gnu99, -std=c11 or
-std=gnu11 to compile your code
cp_abort.c:97:3: error: ‘for’ loop initial declarations are only
allowed in C99 or C11 mode
   for (int i = 0; i < NUM_LOOPS; i++) {

Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: Shuah Khan 
Cc: Anton Blanchard 
Cc: Cyril Bur 
Cc: Anshuman Khandual 
Cc: Simon Guo 
Cc: Ulrich Weigand 
Cc: Michael Neuling 
Cc: Andrew Morton 
Cc: Kees Cook 
Cc: Rashmica Gupta 
Cc: Khem Raj 
Cc: Jessica Yu 
Cc: Jiri Kosina 
Cc: Miroslav Benes 
Cc: Suraj Jitindar Singh 
Cc: Chris Smart 
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
---
 tools/testing/selftests/powerpc/context_switch/cp_abort.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/powerpc/context_switch/cp_abort.c 
b/tools/testing/selftests/powerpc/context_switch/cp_abort.c
index 5a5b55a..1ce7dce 100644
--- a/tools/testing/selftests/powerpc/context_switch/cp_abort.c
+++ b/tools/testing/selftests/powerpc/context_switch/cp_abort.c
@@ -67,7 +67,7 @@ int test_cp_abort(void)
/* 128 bytes for a full cache line */
char buf[128] __cacheline_aligned;
cpu_set_t cpuset;
-   int fd1[2], fd2[2], pid;
+   int fd1[2], fd2[2], pid, i;
char c;
 
/* only run this test on a P9 or later */
@@ -87,14 +87,14 @@ int test_cp_abort(void)
FAIL_IF(pid < 0);
 
if (!pid) {
-   for (int i = 0; i < NUM_LOOPS; i++) {
+   for (i = 0; i < NUM_LOOPS; i++) {
FAIL_IF((write(fd1[WRITE_FD], &c, 1)) != 1);
FAIL_IF((read(fd2[READ_FD], &c, 1)) != 1);
/* A paste succeeds if CR0 EQ bit is set */
FAIL_IF(paste(buf) & 0x2000);
}
} else {
-   for (int i = 0; i < NUM_LOOPS; i++) {
+   for (i = 0; i < NUM_LOOPS; i++) {
FAIL_IF((read(fd1[READ_FD], &c, 1)) != 1);
copy(buf);
FAIL_IF((write(fd2[WRITE_FD], &c, 1) != 1));
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v12 29/30] selftests/powerpc: Add .gitignore file for ptrace executables

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds a .gitignore file for all the executables in
the ptrace test directory thus making invisible with git status
query.

Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: Shuah Khan 
Cc: Anton Blanchard 
Cc: Cyril Bur 
Cc: Anshuman Khandual 
Cc: Simon Guo 
Cc: Ulrich Weigand 
Cc: Michael Neuling 
Cc: Andrew Morton 
Cc: Kees Cook 
Cc: Rashmica Gupta 
Cc: Khem Raj 
Cc: Jessica Yu 
Cc: Jiri Kosina 
Cc: Miroslav Benes 
Cc: Suraj Jitindar Singh 
Cc: Chris Smart 
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
---
 tools/testing/selftests/powerpc/ptrace/.gitignore | 11 +++
 1 file changed, 11 insertions(+)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/.gitignore

diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore 
b/tools/testing/selftests/powerpc/ptrace/.gitignore
new file mode 100644
index 000..bdf3566
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -0,0 +1,11 @@
+ptrace-ebb
+ptrace-gpr
+ptrace-tm-gpr
+ptrace-tm-spd-gpr
+ptrace-tar
+ptrace-tm-tar
+ptrace-tm-spd-tar
+ptrace-vsx
+ptrace-tm-vsx
+ptrace-tm-spd-vsx
+ptrace-tm-spr
-- 
1.8.3.1

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH v12 28/30] selftests/powerpc: Add ptrace tests for TM SPR registers

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for TM SPR registers. This
also adds ptrace interface based helper functions related to TM
SPR registers access.

Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: Shuah Khan 
Cc: Anton Blanchard 
Cc: Cyril Bur 
Cc: Anshuman Khandual 
Cc: Simon Guo 
Cc: Ulrich Weigand 
Cc: Michael Neuling 
Cc: Andrew Morton 
Cc: Kees Cook 
Cc: Rashmica Gupta 
Cc: Khem Raj 
Cc: Jessica Yu 
Cc: Jiri Kosina 
Cc: Miroslav Benes 
Cc: Suraj Jitindar Singh 
Cc: Chris Smart 
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spr.c   | 186 +
 tools/testing/selftests/powerpc/ptrace/ptrace.h|  35 
 3 files changed, 223 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 797840a..f34670e 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,7 +1,8 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
 ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
-ptrace-tm-spd-vsx
+ptrace-tm-spd-vsx ptrace-tm-spr
 
+include ../../lib.mk
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
new file mode 100644
index 000..a4f361e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -0,0 +1,186 @@
+/*
+ * Ptrace test TM SPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+
+/* Tracee and tracer shared data */
+struct shared {
+   int flag;
+   struct tm_spr_regs regs;
+};
+unsigned long tfhar;
+
+int shm_id;
+volatile struct shared *cptr, *pptr;
+
+int shm_id1;
+volatile int *cptr1, *pptr1;
+
+#define TM_SCHED   0xde018c01
+#define TM_KVM_SCHED   0xe001ac01
+
+int validate_tm_spr(struct tm_spr_regs *regs)
+{
+   if (regs->tm_tfhar != tfhar )
+   return TEST_FAIL;
+
+   if ((regs->tm_texasr != TM_SCHED) && (regs->tm_texasr != TM_KVM_SCHED))
+   return TEST_FAIL;
+
+   if ((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0))
+   return TEST_FAIL;
+
+   return TEST_PASS;
+}
+
+void tm_spr(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (struct shared *)shmat(shm_id, NULL, 0);
+   cptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+trans:
+   cptr1[0] = 0;
+   asm __volatile__(
+   "1: ;"
+   /* TM failover handler should follow TBEGIN */
+   "mflr 31;"
+   "bl 4f;"/* $ = TFHAR - 12 */
+   "4: ;"
+   "mflr %[tfhar];"
+   "mtlr 31;"
+
+   TBEGIN
+   "beq 2f;"
+
+   TSUSPEND
+   "li 8, 1;"
+   "sth 8, 0(%[cptr1]);"
+   TRESUME
+   "b .;"
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [tfhar] "=r" (tfhar), [res] "=r" (result),
+   [texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
+   : [sprn_texasr] "i"  (SPRN_TEXASR)
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r8", "r9", "r10", "r11", "r31"
+   );
+
+   /* There are 2 32bit instructions before tbegin. */
+   tfhar += 12; 
+
+   if (result) {
+   if (!cptr->flag)
+   goto trans;
+
+   ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs);
+   shmdt((void *)cptr);
+   shmdt((void *)cptr1);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   shmdt((void *)cptr1);
+   exit(1);
+}
+
+int trace_tm_spr(pid_t child)
+{
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs);
+   if (ret)
+   return TEST_FAIL;
+
+   printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_

[PATCH v12 27/30] selftests/powerpc: Add ptrace tests for VSX, VMX registers in suspended TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for VSX, VMX registers
inside suspended TM context.

Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: Shuah Khan 
Cc: Anton Blanchard 
Cc: Cyril Bur 
Cc: Anshuman Khandual 
Cc: Simon Guo 
Cc: Ulrich Weigand 
Cc: Michael Neuling 
Cc: Andrew Morton 
Cc: Kees Cook 
Cc: Rashmica Gupta 
Cc: Khem Raj 
Cc: Jessica Yu 
Cc: Jiri Kosina 
Cc: Miroslav Benes 
Cc: Suraj Jitindar Singh 
Cc: Chris Smart 
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c   | 222 +
 2 files changed, 224 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index 1b07649..797840a 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,6 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
+ptrace-tm-spd-vsx
 
 
 all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
new file mode 100644
index 000..dbb0cc1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -0,0 +1,222 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+volatile int *cptr, *pptr;
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+   loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_new(void)
+{
+   loadvsx(fp_load_new, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+   loadvsx(fp_load_ckpt, 0);
+}
+
+__attribute__((used)) void wait_parent(void)
+{
+   cptr[2] = 1;
+   while (!cptr[1]);
+}
+
+void tm_spd_vsx(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[2] = 0;
+   asm __volatile__(
+   "bl load_vsx_ckpt;"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "bl load_vsx_new;"
+   TSUSPEND
+   "bl load_vsx;"
+   "bl wait_parent;"
+   TRESUME
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+   [sprn_texasr] "i"  (SPRN_TEXASR)
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r8", "r9", "r10", "r11"
+   );
+
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+   shmdt((void *)cptr);
+
+   storevsx(fp_store, 0);
+   ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   exit(1);
+}
+
+int trace_tm_spd_vsx(pid_t child)
+{
+   unsigned long vsx[VSX_MAX];
+   unsigned long vmx[VMX_MAX + 2][2];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load_ckpt);
+   if (ret)
+   return TES

[PATCH v12 26/30] selftests/powerpc: Add ptrace tests for VSX, VMX registers in TM

2016-07-27 Thread wei . guo . simon
From: Anshuman Khandual 

This patch adds ptrace interface test for VSX, VMX registers
inside TM context. This also adds ptrace interface based helper
functions related to chckpointed VSX, VMX registers access.

Cc: Benjamin Herrenschmidt 
Cc: Paul Mackerras 
Cc: Michael Ellerman 
Cc: Shuah Khan 
Cc: Anton Blanchard 
Cc: Cyril Bur 
Cc: Anshuman Khandual 
Cc: Simon Guo 
Cc: Ulrich Weigand 
Cc: Michael Neuling 
Cc: Andrew Morton 
Cc: Kees Cook 
Cc: Rashmica Gupta 
Cc: Khem Raj 
Cc: Jessica Yu 
Cc: Jiri Kosina 
Cc: Miroslav Benes 
Cc: Suraj Jitindar Singh 
Cc: Chris Smart 
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Signed-off-by: Anshuman Khandual 
Signed-off-by: Simon Guo 
---
 tools/testing/selftests/powerpc/ptrace/Makefile|   3 +-
 .../selftests/powerpc/ptrace/ptrace-tm-vsx.c   | 209 +
 2 files changed, 211 insertions(+), 1 deletion(-)
 create mode 100644 tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c

diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile 
b/tools/testing/selftests/powerpc/ptrace/Makefile
index e3d9ceb..1b07649 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -1,5 +1,6 @@
 TEST_PROGS := ptrace-ebb ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
-ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx
+ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx
+
 
 all: $(TEST_PROGS)
 CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c 
b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
new file mode 100644
index 000..d63f45d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -0,0 +1,209 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+volatile unsigned long *cptr, *pptr;
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+   loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+   loadvsx(fp_load_ckpt, 0);
+}
+
+void tm_vsx(void)
+{
+   unsigned long result, texasr;
+   int ret;
+
+   cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+   cptr[1] = 0;
+   asm __volatile__(
+   "bl load_vsx_ckpt;"
+
+   "1: ;"
+   TBEGIN
+   "beq 2f;"
+
+   "bl load_vsx;"
+   TSUSPEND
+   "li 7, 1;"
+   "stw 7, 0(%[cptr1]);"
+   TRESUME
+   "b .;"
+
+   TEND
+   "li 0, 0;"
+   "ori %[res], 0, 0;"
+   "b 3f;"
+
+   "2: ;"
+   "li 0, 1;"
+   "ori %[res], 0, 0;"
+   "mfspr %[texasr], %[sprn_texasr];"
+
+   "3: ;"
+   : [res] "=r" (result), [texasr] "=r" (texasr)
+   : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+   [sprn_texasr] "i"  (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
+   : "memory", "r0", "r1", "r2", "r3", "r4",
+   "r7", "r8", "r9", "r10", "r11"
+   );
+
+   if (result) {
+   if (!cptr[0])
+   goto trans;
+
+   shmdt((void *)cptr);
+   storevsx(fp_store, 0);
+   ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+   if (ret)
+   exit(1);
+   exit(0);
+   }
+   shmdt((void *)cptr);
+   exit(1);
+}
+
+int trace_tm_vsx(pid_t child)
+{
+   unsigned long vsx[VSX_MAX];
+   unsigned long vmx[VMX_MAX + 2][2];
+   int ret;
+
+   ret = start_trace(child);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vmx(vmx, fp_load);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vsx_ckpt(child, vsx);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = validate_vsx(vsx, fp_load_ckpt);
+   if (ret)
+   return TEST_FAIL;
+
+   ret = show_vmx_ckpt(child, vmx);
+   if (ret)
+   return TEST_FAIL;
+
+   r

  1   2   >