Re: [PATCH v3 13/41] KVM: PPC: Book3S 64: Move interrupt early register setup to KVM

2021-03-20 Thread Alexey Kardashevskiy




On 06/03/2021 02:06, Nicholas Piggin wrote:

Like the earlier patch for hcalls, KVM interrupt entry requires a
different calling convention than the Linux interrupt handlers
set up. Move the code that converts from one to the other into KVM.

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/kernel/exceptions-64s.S | 131 +--
  arch/powerpc/kvm/book3s_64_entry.S   |  34 ++-
  2 files changed, 55 insertions(+), 110 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
index b7092ba87da8..b4eab5084964 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -187,7 +187,6 @@ do_define_int n
.endif
  .endm
  
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER

  /*
   * All interrupts which set HSRR registers, as well as SRESET and MCE and
   * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
@@ -220,54 +219,25 @@ do_define_int n
   * to KVM to handle.
   */
  
-.macro KVMTEST name

+.macro KVMTEST name handler
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi   r10,0
-   bne \name\()_kvm
-.endm
-
-.macro GEN_KVM name
-   .balign IFETCH_ALIGN_BYTES
-\name\()_kvm:
-
-BEGIN_FTR_SECTION
-   ld  r10,IAREA+EX_CFAR(r13)
-   std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
-
-   ld  r10,IAREA+EX_CTR(r13)
-   mtctr   r10
-BEGIN_FTR_SECTION
-   ld  r10,IAREA+EX_PPR(r13)
-   std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-   ld  r11,IAREA+EX_R11(r13)
-   ld  r12,IAREA+EX_R12(r13)
-   std r12,HSTATE_SCRATCH0(r13)
-   sldir12,r9,32
-   ld  r9,IAREA+EX_R9(r13)
-   ld  r10,IAREA+EX_R10(r13)
/* HSRR variants have the 0x2 bit added to their trap number */
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
-   ori r12,r12,(IVEC + 0x2)
+   li  r10,(IVEC + 0x2)
FTR_SECTION_ELSE
-   ori r12,r12,(IVEC)
+   li  r10,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.elseif IHSRR
-   ori r12,r12,(IVEC+ 0x2)
+   li  r10,(IVEC + 0x2)
.else
-   ori r12,r12,(IVEC)
+   li  r10,(IVEC)
.endif
-   b   kvmppc_interrupt
-.endm
-
-#else
-.macro KVMTEST name
-.endm
-.macro GEN_KVM name
-.endm
+   bne \handler
  #endif
+.endm
  
  /*

   * This is the BOOK3S interrupt entry code macro.
@@ -409,7 +379,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
  DEFINE_FIXED_SYMBOL(\name\()_common_real)
  \name\()_common_real:
.if IKVM_REAL
-   KVMTEST \name
+   KVMTEST \name kvm_interrupt
.endif
  
  	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */

@@ -432,7 +402,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
  DEFINE_FIXED_SYMBOL(\name\()_common_virt)
  \name\()_common_virt:
.if IKVM_VIRT
-   KVMTEST \name
+   KVMTEST \name kvm_interrupt
  1:
.endif
.endif /* IVIRT */
@@ -446,7 +416,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
  DEFINE_FIXED_SYMBOL(\name\()_common_real)
  \name\()_common_real:
.if IKVM_REAL
-   KVMTEST \name
+   KVMTEST \name kvm_interrupt
.endif
  .endm
  
@@ -967,8 +937,6 @@ EXC_COMMON_BEGIN(system_reset_common)

EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
  
-	GEN_KVM system_reset

-
  
  /**

   * Interrupt 0x200 - Machine Check Interrupt (MCE).
@@ -1132,7 +1100,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/*
 * Check if we are coming from guest. If yes, then run the normal
 * exception handler which will take the
-* machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
+* machine_check_kvm->kvm_interrupt branch to deliver the MC event
 * to guest.
 */
lbz r11,HSTATE_IN_GUEST(r13)
@@ -1203,8 +1171,6 @@ EXC_COMMON_BEGIN(machine_check_common)
bl  machine_check_exception
b   interrupt_return
  
-	GEN_KVM machine_check

-
  
  #ifdef CONFIG_PPC_P7_NAP

  /*
@@ -1339,8 +1305,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
REST_NVGPRS(r1)
b   interrupt_return
  
-	GEN_KVM data_access

-
  
  /**

   * Interrupt 0x380 - Data Segment Interrupt (DSLB).
@@ -1390,8 +1354,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
bl  do_bad_slb_fault
b   interrupt_return
  
-	GEN_KVM data_access_slb

-
  
  /**

   * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
@@ -1428,8 +1390,6 @@ MMU_FTR_SECTION_ELSE
  ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
b   interrupt_return
  
-	GEN_KVM instruction_access

-
  
  /**

   * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
@@ -1474,8 +1434,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
bl  

Re: [PATCH v3 14/41] KVM: PPC: Book3S 64: move bad_host_intr check to HV handler

2021-03-20 Thread Alexey Kardashevskiy




On 06/03/2021 02:06, Nicholas Piggin wrote:

This is not used by PR KVM.

Signed-off-by: Nicholas Piggin 



Reviewed-by: Alexey Kardashevskiy 

a small tote - it probably makes sense to move this before 09/41 as this 
one removes what 09/41 added to book3s_64_entry.S. Thanks,




---
  arch/powerpc/kvm/book3s_64_entry.S  | 3 ---
  arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +++-
  arch/powerpc/kvm/book3s_segment.S   | 7 +++
  3 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_entry.S 
b/arch/powerpc/kvm/book3s_64_entry.S
index d06e81842368..7a6b060ceed8 100644
--- a/arch/powerpc/kvm/book3s_64_entry.S
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -78,11 +78,8 @@ do_kvm_interrupt:
beq-.Lmaybe_skip
  .Lno_skip:
  #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-   cmpwi   r9,KVM_GUEST_MODE_HOST_HV
-   beq kvmppc_bad_host_intr
  #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
cmpwi   r9,KVM_GUEST_MODE_GUEST
-   ld  r9,HSTATE_SCRATCH2(r13)
beq kvmppc_interrupt_pr
  #endif
b   kvmppc_interrupt_hv
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f976efb7e4a9..75405ef53238 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1265,6 +1265,7 @@ hdec_soon:
  kvmppc_interrupt_hv:
/*
 * Register contents:
+* R9   = HSTATE_IN_GUEST
 * R12  = (guest CR << 32) | interrupt vector
 * R13  = PACA
 * guest R12 saved in shadow VCPU SCRATCH0
@@ -1272,6 +1273,8 @@ kvmppc_interrupt_hv:
 * guest R9 saved in HSTATE_SCRATCH2
 */
/* We're now back in the host but in guest MMU context */
+   cmpwi   r9,KVM_GUEST_MODE_HOST_HV
+   beq kvmppc_bad_host_intr
li  r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
  
@@ -3272,7 +3275,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)

   * cfar is saved in HSTATE_CFAR(r13)
   * ppr is saved in HSTATE_PPR(r13)
   */
-.global kvmppc_bad_host_intr
  kvmppc_bad_host_intr:
/*
 * Switch to the emergency stack, but start half-way down in
diff --git a/arch/powerpc/kvm/book3s_segment.S 
b/arch/powerpc/kvm/book3s_segment.S
index 1f492aa4c8d6..ef1d88b869bf 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -167,8 +167,15 @@ kvmppc_interrupt_pr:
 * R12 = (guest CR << 32) | exit handler id
 * R13 = PACA
 * HSTATE.SCRATCH0 = guest R12
+*
+* If HV is possible, additionally:
+* R9  = HSTATE_IN_GUEST
+* HSTATE.SCRATCH2 = guest R9
 */
  #ifdef CONFIG_PPC64
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+   ld  r9,HSTATE_SCRATCH2(r13)
+#endif
/* Match 32-bit entry */
rotldi  r12, r12, 32  /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */



--
Alexey


Patch "vmlinux.lds.h: Create section for protection against instrumentation" has been added to the 4.19-stable tree

2021-03-20 Thread gregkh


This is a note to let you know that I've just added the patch titled

vmlinux.lds.h: Create section for protection against instrumentation

to the 4.19-stable tree which can be found at:

http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
 vmlinux.lds.h-create-section-for-protection-against-instrumentation.patch
and it can be found in the queue-4.19 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let  know about it.


>From foo@baz Sat Mar 20 11:54:47 AM CET 2021
From: Nicolas Boichat 
Date: Sat, 20 Mar 2021 12:16:25 +0800
Subject: vmlinux.lds.h: Create section for protection against instrumentation
To: sta...@vger.kernel.org
Cc: gro...@chromium.org, Thomas Gleixner , Alexandre 
Chartre , Peter Zijlstra , 
Nicolas Boichat , Arnd Bergmann , 
Benjamin Herrenschmidt , Christopher Li 
, Daniel Axtens , Greg Kroah-Hartman 
, Masahiro Yamada , 
Michael Ellerman , Michal Marek , 
"Naveen N. Rao" , Nicholas Piggin 
, Paul Mackerras , 
linux-a...@vger.kernel.org, linux-kbu...@vger.kernel.org, 
linux-ker...@vger.kernel.org, linux-spa...@vger.kernel.org, 
linuxppc-dev@lists.ozlabs.org
Message-ID: 
<20210320121614.for-stable-4.19.v2.1.I222f801866f71be9f7d85e5b10665cd4506d78ec@changeid>

From: Nicolas Boichat 

From: Thomas Gleixner 

commit 655389433e7efec589838b400a2a652b3ffa upstream.

Some code pathes, especially the low level entry code, must be protected
against instrumentation for various reasons:

 - Low level entry code can be a fragile beast, especially on x86.

 - With NO_HZ_FULL RCU state needs to be established before using it.

Having a dedicated section for such code allows to validate with tooling
that no unsafe functions are invoked.

Add the .noinstr.text section and the noinstr attribute to mark
functions. noinstr implies notrace. Kprobes will gain a section check
later.

Provide also a set of markers: instrumentation_begin()/end()

These are used to mark code inside a noinstr function which calls
into regular instrumentable text section as safe.

The instrumentation markers are only active when CONFIG_DEBUG_ENTRY is
enabled as the end marker emits a NOP to prevent the compiler from merging
the annotation points. This means the objtool verification requires a
kernel compiled with this option.

Signed-off-by: Thomas Gleixner 
Reviewed-by: Alexandre Chartre 
Acked-by: Peter Zijlstra 
Link: https://lkml.kernel.org/r/20200505134100.075416...@linutronix.de

[Nicolas:
Guard noinstr macro in include/linux/compiler_types.h in __KERNEL__
&& !__ASSEMBLY__, otherwise noinstr is expanded in the linker
script construct.

Upstream does not have this problem as many macros were moved by
commit 71391bdd2e9a ("include/linux/compiler_types.h: don't pollute
userspace with macro definitions"). We take the minimal approach here
and just guard the new macro.

Minor context conflicts in:
arch/powerpc/kernel/vmlinux.lds.S
include/asm-generic/vmlinux.lds.h
include/linux/compiler.h]
Signed-off-by: Nicolas Boichat 

Signed-off-by: Greg Kroah-Hartman 
---
Technically guarding with !__ASSEMBLY__ should be enough, but
there seems to be no reason to expose this new macro when
!__KERNEL__, so let's just match what upstream does.

Changes in v2:
 - Guard noinstr macro by __KERNEL__ && !__ASSEMBLY__ to prevent
   expansion in linker script and match upstream.

 arch/powerpc/kernel/vmlinux.lds.S |1 
 include/asm-generic/sections.h|3 ++
 include/asm-generic/vmlinux.lds.h |   10 +++
 include/linux/compiler.h  |   54 ++
 include/linux/compiler_types.h|6 
 scripts/mod/modpost.c |2 -
 6 files changed, 75 insertions(+), 1 deletion(-)

--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -99,6 +99,7 @@ SECTIONS
 #endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup 
__ftr_alt_* .ref.text);
+   NOINSTR_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end
 /* Start and end of .opd section - used for function descriptors. */
 extern char __start_opd[], __end_opd[];
 
+/* Start and end of instrumentation protected text section */
+extern char __noinstr_text_start[], __noinstr_text_end[];
+
 extern __visible const void __nosave_begin, __nosave_end;
 
 /* Function descriptor handling (if any).  Override in asm/sections.h */
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -483,6 +483,15 @@
}
 
 /*
+ * Non-instrumentable text section
+ */
+#define NOINSTR_TEXT   \
+   ALIGN_FUNCTION(); 

Re: [for-stable-4.19 PATCH v2 0/2] Backport patches to fix KASAN+LKDTM with recent clang on ARM64

2021-03-20 Thread Greg Kroah-Hartman
On Sat, Mar 20, 2021 at 12:16:24PM +0800, Nicolas Boichat wrote:
> Backport 2 patches that are required to make KASAN+LKDTM work
> with recent clang (patch 2/2 has a complete description).
> Tested on our chromeos-4.19 branch.
> Also compile tested on x86-64 and arm64 with gcc this time
> around.
> 
> Patch 1/2 adds a guard around noinstr that matches upstream,
> to prevent a build issue, and has some minor context conflicts.
> Patch 2/2 is a clean backport.
> 
> These patches have been merged to 5.4 stable already. We might
> need to backport to older stable branches, but this is what I
> could test for now.

Ok, trying this again, let's see what breaks :)

thanks,

greg k-h


[PATCH v7] powerpc/irq: Inline call_do_irq() and call_do_softirq()

2021-03-20 Thread Michael Ellerman
From: Christophe Leroy 

call_do_irq() and call_do_softirq() are simple enough to be
worth inlining.

Inlining them avoids an mflr/mtlr pair plus a save/reload on stack. It
also allows GCC to keep the saved ksp_limit in an nonvolatile reg.

This is inspired from S390 arch. Several other arches do more or
less the same. The way sparc arch does seems odd thought.

Signed-off-by: Christophe Leroy 
Signed-off-by: Michael Ellerman 
---

v2: no change.
v3: no change.
v4:
- comment reminding the purpose of the inline asm block.
- added r2 as clobbered reg
v5:
- Limiting the change to PPC32 for now.
- removed r2 from the clobbered regs list (on PPC32 r2 points to current all 
the time)
- Removed patch 1 and merged ksp_limit handling in here.
v6:
- Rebase on top of merge-test (ca6e327fefb2).
- Remove the ksp_limit stuff as it's doesn't exist anymore.

v7:
mpe:
- Enable for 64-bit too. This all in-kernel code calling in-kernel
  code, and must use the kernel TOC.
- Use named parameters for the inline asm.
- Reformat inline asm.
- Mark as always_inline.
- Drop unused ret from call_do_softirq(), add r3 as clobbered.
---
 arch/powerpc/include/asm/irq.h |  2 --
 arch/powerpc/kernel/irq.c  | 41 ++
 arch/powerpc/kernel/misc_32.S  | 25 -
 arch/powerpc/kernel/misc_64.S  | 22 --
 4 files changed, 41 insertions(+), 49 deletions(-)

diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index f3f264e441a7..b2bd58830430 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -53,8 +53,6 @@ extern void *mcheckirq_ctx[NR_CPUS];
 extern void *hardirq_ctx[NR_CPUS];
 extern void *softirq_ctx[NR_CPUS];
 
-void call_do_softirq(void *sp);
-void call_do_irq(struct pt_regs *regs, void *sp);
 extern void do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b72abbff96c..260effc0a435 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -667,6 +667,47 @@ static inline void check_stack_overflow(void)
}
 }
 
+static __always_inline void call_do_softirq(const void *sp)
+{
+   /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+   asm volatile (
+PPC_STLU " %%r1, %[offset](%[sp])  ;"
+   "mr %%r1, %[sp] ;"
+   "bl %[callee]   ;"
+PPC_LL "   %%r1, 0(%%r1)   ;"
+: // Outputs
+: // Inputs
+  [sp] "b" (sp), [offset] "i" (THREAD_SIZE - 
STACK_FRAME_OVERHEAD),
+  [callee] "i" (__do_softirq)
+: // Clobbers
+  "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+  "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+  "r11", "r12"
+   );
+}
+
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+   register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+   /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+   asm volatile (
+PPC_STLU " %%r1, %[offset](%[sp])  ;"
+   "mr %%r1, %[sp] ;"
+   "bl %[callee]   ;"
+PPC_LL "   %%r1, 0(%%r1)   ;"
+: // Outputs
+  "+r" (r3)
+: // Inputs
+  [sp] "b" (sp), [offset] "i" (THREAD_SIZE - 
STACK_FRAME_OVERHEAD),
+  [callee] "i" (__do_irq)
+: // Clobbers
+  "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+  "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+  "r11", "r12"
+   );
+}
+
 void __do_irq(struct pt_regs *regs)
 {
unsigned int irq;
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index acc410043b96..6a076bef2932 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -27,31 +27,6 @@
 
.text
 
-_GLOBAL(call_do_softirq)
-   mflrr0
-   stw r0,4(r1)
-   stwur1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
-   mr  r1,r3
-   bl  __do_softirq
-   lwz r1,0(r1)
-   lwz r0,4(r1)
-   mtlrr0
-   blr
-
-/*
- * void call_do_irq(struct pt_regs *regs, void *sp);
- */
-_GLOBAL(call_do_irq)
-   mflrr0
-   stw r0,4(r1)
-   stwur1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
-   mr  r1,r4
-   bl  __do_irq
-   lwz r1,0(r1)
-   lwz r0,4(r1)
-   mtlrr0
-   blr
-
 /*
  * This returns the high 64 bits of the product of two 64-bit numbers.
  */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 070465825c21..4b761a18a74

Re: [PATCH 5/6] powerpc/mm/64s/hash: Add real-mode change_memory_range() for hash LPAR

2021-03-20 Thread Michael Ellerman
Nicholas Piggin  writes:
> Excerpts from Michael Ellerman's message of February 11, 2021 11:51 pm:
...
>> diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c 
>> b/arch/powerpc/mm/book3s64/hash_pgtable.c
>> index 3663d3cdffac..01de985df2c4 100644
>> --- a/arch/powerpc/mm/book3s64/hash_pgtable.c
>> +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
>> @@ -414,6 +428,73 @@ static void change_memory_range(unsigned long start, 
>> unsigned long end,
>>  mmu_kernel_ssize);
>>  }
>>  
>> +static int notrace chmem_secondary_loop(struct change_memory_parms *parms)
>> +{
>> +unsigned long msr, tmp, flags;
>> +int *p;
>> +
>> +p = &parms->cpu_counter.counter;
>> +
>> +local_irq_save(flags);
>> +__hard_EE_RI_disable();
>> +
>> +asm volatile (
>> +// Switch to real mode and leave interrupts off
>> +"mfmsr  %[msr]  ;"
>> +"li %[tmp], %[MSR_IR_DR];"
>> +"andc   %[tmp], %[msr], %[tmp]  ;"
>> +"mtmsrd %[tmp]  ;"
>> +
>> +// Tell the master we are in real mode
>> +"1: "
>> +"lwarx  %[tmp], 0, %[p] ;"
>> +"addic  %[tmp], %[tmp], -1  ;"
>> +"stwcx. %[tmp], 0, %[p] ;"
>> +"bne-   1b  ;"
>> +
>> +// Spin until the counter goes to zero
>> +"2: ;"
>> +"lwz%[tmp], 0(%[p]) ;"
>> +"cmpwi  %[tmp], 0   ;"
>> +"bne-   2b  ;"
>> +
>> +// Switch back to virtual mode
>> +"mtmsrd %[msr]  ;"
>> +
>> +: // outputs
>> +  [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)
>> +: // inputs
>> +  [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
>> +: // clobbers
>> +  "cc", "xer"
>> +);
>> +
>> +local_irq_restore(flags);
>
> Hmm. __hard_EE_RI_disable won't get restored by this because it doesn't
> set the HARD_DIS flag. Also we don't want RI disabled here because 
> tracing will get called first (which might take SLB or HPTE fault).

Thanks for noticing. I originally wrote hard_irq_disable() but then
thought disabling RI also would be good.

> But it's also slightly rude to ever enable EE under an irq soft mask,
> because you don't know if it had been disabled by the masked interrupt 
> handler. It's not strictly a problem AFAIK because the interrupt would
> just get masked again, but if we try to maintain a good pattern would
> be good. Hmm that means we should add a check for irqs soft masked in
> __hard_irq_enable(), I'm not sure if all existing users would follow
> this rule.
>
> Might be better to call hard_irq_disable(); after the local_irq_save();
> and then clear and reset RI inside that region (could just do it at the
> same time as disabling MMU).

Thinking about it more, there's no real reason to disable RI.

We should be able to return from an interrupt in there, it's just that
if we do take one we'll probably die before we get a chance to return
because the mapping of text will be missing.

So disabling RI doesn't really gain us anything I don't think.

cheers


[PATCH] crypto: vmx: fix incorrect kernel-doc comment syntax in files

2021-03-20 Thread Aditya Srivastava
The opening comment mark '/**' is used for highlighting the beginning of
kernel-doc comments.
There are certain files in drivers/crypto/vmx, which follow this syntax,
but the content inside does not comply with kernel-doc.
Such lines were probably not meant for kernel-doc parsing, but are parsed
due to the presence of kernel-doc like comment syntax(i.e, '/**'), which
causes unexpected warnings from kernel-doc.

E.g., presence of kernel-doc like comment in the header line for
drivers/crypto/vmx/vmx.c causes this warning by kernel-doc:

"warning: expecting prototype for Routines supporting VMX instructions on the 
Power 8(). Prototype was for p8_init() instead"

Similarly for other files too.

Provide a simple fix by replacing such occurrences with general comment
format, i.e. '/*', to prevent kernel-doc from parsing it.

Signed-off-by: Aditya Srivastava 
---
* Applies perfectly on next-20210319

 drivers/crypto/vmx/aes.c | 2 +-
 drivers/crypto/vmx/aes_cbc.c | 2 +-
 drivers/crypto/vmx/aes_ctr.c | 2 +-
 drivers/crypto/vmx/aes_xts.c | 2 +-
 drivers/crypto/vmx/ghash.c   | 2 +-
 drivers/crypto/vmx/vmx.c | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d05c02baebcf..ec06189fbf99 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index d88084447f1c..ed0debc7acb5 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CBC routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 79ba062ee1c1..9a3da8cd62f3 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES CTR routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 9fee1b1532a4..dabbccb41550 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * AES XTS routines supporting VMX In-core instructions on Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 14807ac2e3b9..5bc5710a6de0 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * GHASH routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015, 2019 International Business Machines Inc.
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index a40d08e75fc0..7eb713cc87c8 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Routines supporting VMX instructions on the Power 8
  *
  * Copyright (C) 2015 International Business Machines Inc.
-- 
2.17.1



[PATCH] powerpc: epapr: A typo fix

2021-03-20 Thread Bhaskar Chowdhury


s/parmeters/parameters/

Signed-off-by: Bhaskar Chowdhury 
---
 arch/powerpc/include/asm/epapr_hcalls.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/epapr_hcalls.h 
b/arch/powerpc/include/asm/epapr_hcalls.h
index c99ba08a408d..cdf3c6df5123 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -65,7 +65,7 @@
  * but the gcc inline assembly syntax does not allow us to specify registers
  * on the clobber list that are also on the input/output list.  Therefore,
  * the lists of clobbered registers depends on the number of register
- * parmeters ("+r" and "=r") passed to the hypercall.
+ * parameters ("+r" and "=r") passed to the hypercall.
  *
  * Each assembly block should use one of the HCALL_CLOBBERSx macros.  As a
  * general rule, 'x' is the number of parameters passed to the assembly
--
2.26.2



Re: [PATCH 00/36] [Set 4] Rid W=1 warnings in SCSI

2021-03-20 Thread Lee Jones
On Thu, 18 Mar 2021, Martin K. Petersen wrote:

> 
> Lee,
> 
> > This set is part of a larger effort attempting to clean-up W=1 kernel
> > builds, which are currently overwhelmingly riddled with niggly little
> > warnings.
> 
> Applied to 5.13/scsi-staging, thanks! I fixed a few little things.

Thanks for your continued support Martin.

-- 
Lee Jones [李琼斯]
Senior Technical Lead - Developer Services
Linaro.org │ Open source software for Arm SoCs
Follow Linaro: Facebook | Twitter | Blog


Re: [for-stable-4.19 PATCH 1/2] vmlinux.lds.h: Create section for protection against instrumentation

2021-03-20 Thread Alexandre Chartre



On 3/19/21 11:39 AM, Greg Kroah-Hartman wrote:

On Fri, Mar 19, 2021 at 07:54:15AM +0800, Nicolas Boichat wrote:

From: Thomas Gleixner 

commit 655389433e7efec589838b400a2a652b3ffa upstream.

Some code pathes, especially the low level entry code, must be protected
against instrumentation for various reasons:

  - Low level entry code can be a fragile beast, especially on x86.

  - With NO_HZ_FULL RCU state needs to be established before using it.

Having a dedicated section for such code allows to validate with tooling
that no unsafe functions are invoked.

Add the .noinstr.text section and the noinstr attribute to mark
functions. noinstr implies notrace. Kprobes will gain a section check
later.

Provide also a set of markers: instrumentation_begin()/end()

These are used to mark code inside a noinstr function which calls
into regular instrumentable text section as safe.

The instrumentation markers are only active when CONFIG_DEBUG_ENTRY is
enabled as the end marker emits a NOP to prevent the compiler from merging
the annotation points. This means the objtool verification requires a
kernel compiled with this option.

Signed-off-by: Thomas Gleixner 
Reviewed-by: Alexandre Chartre 
Acked-by: Peter Zijlstra 
Link: https://lkml.kernel.org/r/20200505134100.075416...@linutronix.de

[Nicolas: context conflicts in:
arch/powerpc/kernel/vmlinux.lds.S
include/asm-generic/vmlinux.lds.h
include/linux/compiler.h
include/linux/compiler_types.h]
Signed-off-by: Nicolas Boichat 


Did you build this on x86?

I get the following build error:

ld:./arch/x86/kernel/vmlinux.lds:20: syntax error

And that line looks like:

  . = ALIGN(8); *(.text.hot .text.hot.*) *(.text .text.fixup) *(.text.unlikely 
.text.unlikely.*) *(.text.unknown .text.unknown.*) . = ALIGN(8); __noinstr_text_start = 
.; *(.__attribute__((noinline)) __attribute__((no_instrument_function)) 
__attribute((__section__(".noinstr.text"))).text) __noinstr_text_end = .; 
*(.text..refcount) *(.ref.text) *(.meminit.text*) *(.memexit.text*)



In the NOINSTR_TEXT macro, noinstr is expanded with the value of the noinstr
macro from linux/compiler_types.h while it shouldn't.

The problem is possibly that the noinstr macro is defined for assembly. Make
sure that the macro is not defined for assembly e.g.:

#ifndef __ASSEMBLY__

/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text")))

#endif

alex.


Re: [PATCH] powerpc: epapr: A typo fix

2021-03-20 Thread Randy Dunlap




On Sun, 21 Mar 2021, Bhaskar Chowdhury wrote:



s/parmeters/parameters/

Signed-off-by: Bhaskar Chowdhury 


Acked-by: Randy Dunlap 



---
arch/powerpc/include/asm/epapr_hcalls.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/epapr_hcalls.h 
b/arch/powerpc/include/asm/epapr_hcalls.h
index c99ba08a408d..cdf3c6df5123 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -65,7 +65,7 @@
 * but the gcc inline assembly syntax does not allow us to specify registers
 * on the clobber list that are also on the input/output list.  Therefore,
 * the lists of clobbered registers depends on the number of register
- * parmeters ("+r" and "=r") passed to the hypercall.
+ * parameters ("+r" and "=r") passed to the hypercall.
 *
 * Each assembly block should use one of the HCALL_CLOBBERSx macros.  As a
 * general rule, 'x' is the number of parameters passed to the assembly
--
2.26.2




[GIT PULL] Please pull powerpc/linux.git powerpc-5.12-4 tag

2021-03-20 Thread Michael Ellerman
-BEGIN PGP SIGNED MESSAGE-
Hash: SHA256

Hi Linus,

Please pull some more powerpc fixes for 5.12:

The following changes since commit 0b736881c8f1a6cd912f7a9162b9e097b28c1c30:

  powerpc/traps: unrecoverable_exception() is not an interrupt handler 
(2021-03-12 11:02:12 +1100)

are available in the git repository at:

  https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git 
tags/powerpc-5.12-4

for you to fetch changes up to cc7a0bb058b85ea03db87169c60c7cfdd5d34678:

  PCI: rpadlpar: Fix potential drc_name corruption in store functions 
(2021-03-17 13:48:07 +1100)

- --
powerpc fixes for 5.12 #4

Fix a possible stack corruption and subsequent DLPAR failure in the rpadlpar_io
PCI hotplug driver.

Two build fixes for uncommon configurations.

Thanks to Christophe Leroy, Tyrel Datwyler.

- --
Christophe Leroy (2):
  powerpc/vdso32: Add missing _restgpr_31_x to fix build failure
  powerpc: Force inlining of cpu_has_feature() to avoid build failure

Tyrel Datwyler (1):
  PCI: rpadlpar: Fix potential drc_name corruption in store functions


 arch/powerpc/include/asm/cpu_has_feature.h |  4 ++--
 arch/powerpc/kernel/vdso32/gettimeofday.S  | 11 +++
 drivers/pci/hotplug/rpadlpar_sysfs.c   | 14 ++
 3 files changed, 19 insertions(+), 10 deletions(-)
-BEGIN PGP SIGNATURE-

iQIzBAEBCAAdFiEEJFGtCPCthwEv2Y/bUevqPMjhpYAFAmBW1cQACgkQUevqPMjh
pYAhog/+KVrIL4frKx1m7bmM+FJ83heS20+F7i5yQWRovR2FdgYwwbIrtxNUKzRU
sNShwQbt9H6GVEGc12kusH5M+t6wGrHrYQ47kdkD6qhDTPfQzKXsoi5eWbgMkQnx
Hd3njqjzfGFEicmfp8l+1WmoWeYpL2MLR7E/KTtS/MYMq5Gsz2lZSMEsmXlAY5Cs
5lT8dBHdH0AX9krRJk9BzKrRqB0qjrK60/sg9oLJQ4sixeWaa+dSORnJq26jYV+n
MPyaNCWF3EUrs7BNyxfZOeFwAZ0FzRSBZrgIt6MNPreT/FaL6lo2McHHyQ9Ls5Qb
yPUmioNKLhXdtcU6AZ0+QOlQvyiGJ7xb3UTAbFFq8TifYHVS0v9srBkgfcvFHycc
yjDtdeTk18WB6NSDC8zVSc+Ut5q7WZa6RLKubiCPgd5DqDkpAKcYcTQSZZUgTb6Q
IyT7bwCBqW6Z5bNTsHhSZ+Ub06L6RLTQ1IfD4GXtqe16F1eeFlMOPCP2YzGUB6s9
IB7GQilliVtRZaFCGwYyVdZCHftHVdK5k7DV+3aImzPRYOKpL4YWwrBnDoqkccYs
Mhbw2YtO7oXSZ2yXGrsu/WF56QCDf6PFO5r28dvNFEi6qZWbfLEQQLZmv/0efFtC
Je1VKXKXCfuLD2VH02F8URZpzbg90dC8YRsTUpgSLCHgiGjEOcE=
=zfoH
-END PGP SIGNATURE-