Re: [PATCH 1/1] powerpc/debug: implement HAVE_USER_RETURN_NOTIFIER

2023-12-10 Thread Christophe Leroy


Le 11/12/2023 à 03:50, Luming Yu a écrit :
> [Vous ne recevez pas souvent de courriers de luming...@shingroup.cn. 
> Découvrez pourquoi ceci est important à 
> https://aka.ms/LearnAboutSenderIdentification ]
> 
> The support for user return notifier infrastructure
> is hooked into powerpc architecture.

"The support ... is hooked". So what ? It's like saying "The sky is 
blue". Ok, and then ? What's the link between that statement and the patch ?

I don't understand what you mean. Please provide more details, tell what 
this patch does and how.

Christophe


> ---
>   arch/powerpc/Kconfig|  1 +
>   arch/powerpc/include/asm/entry-common.h | 16 
>   arch/powerpc/include/asm/thread_info.h  |  2 ++
>   arch/powerpc/kernel/process.c   |  2 ++
>   4 files changed, 21 insertions(+)
>   create mode 100644 arch/powerpc/include/asm/entry-common.h
> 
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index c10229c0243c..b968068cc04a 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -277,6 +277,7 @@ config PPC
>  select HAVE_STACKPROTECTOR  if PPC64 && 
> $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
>  select HAVE_STATIC_CALL if PPC32
>  select HAVE_SYSCALL_TRACEPOINTS
> +   select HAVE_USER_RETURN_NOTIFIER
>  select HAVE_VIRT_CPU_ACCOUNTING
>  select HAVE_VIRT_CPU_ACCOUNTING_GEN
>  select HOTPLUG_SMT  if HOTPLUG_CPU
> diff --git a/arch/powerpc/include/asm/entry-common.h 
> b/arch/powerpc/include/asm/entry-common.h
> new file mode 100644
> index ..51f1eb767696
> --- /dev/null
> +++ b/arch/powerpc/include/asm/entry-common.h
> @@ -0,0 +1,16 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef ARCH_POWERPC_ENTRY_COMMON_H
> +#define ARCH_POWERPC_ENTRY_COMMON_H
> +
> +#include 
> +
> +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
> + unsigned long ti_work)
> +{
> +   if (ti_work & _TIF_USER_RETURN_NOTIFY)
> +   fire_user_return_notifiers();
> +}
> +
> +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
> +
> +#endif
> diff --git a/arch/powerpc/include/asm/thread_info.h 
> b/arch/powerpc/include/asm/thread_info.h
> index bf5dde1a4114..47e226032f9c 100644
> --- a/arch/powerpc/include/asm/thread_info.h
> +++ b/arch/powerpc/include/asm/thread_info.h
> @@ -117,6 +117,7 @@ void arch_setup_new_exec(void);
>   #endif
>   #define TIF_POLLING_NRFLAG 19  /* true if poll_idle() is polling 
> TIF_NEED_RESCHED */
>   #define TIF_32BIT  20  /* 32 bit binary */
> +#define TIF_USER_RETURN_NOTIFY 21  /* notify kernel of userspace return 
> */
> 
>   /* as above, but as bit values */
>   #define _TIF_SYSCALL_TRACE (1< @@ -125,6 +126,7 @@ void arch_setup_new_exec(void);
>   #define _TIF_NOTIFY_SIGNAL (1<   #define _TIF_POLLING_NRFLAG(1<   #define _TIF_32BIT (1< +#define _TIF_USER_RETURN_NOTIFY(1<   #define _TIF_RESTORE_TM(1<   #define _TIF_PATCH_PENDING (1<   #define _TIF_SYSCALL_AUDIT (1< diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 392404688cec..70a9ea949798 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -38,6 +38,7 @@
>   #include 
>   #include 
>   #include 
> +#include 
> 
>   #include 
>   #include 
> @@ -1386,6 +1387,7 @@ struct task_struct *__switch_to(struct task_struct 
> *prev,
>  if (current->thread.regs)
>  restore_math(current->thread.regs);
>   #endif /* CONFIG_PPC_BOOK3S_64 */
> +   propagate_user_return_notify(prev, new);
> 
>  return last;
>   }
> --
> 2.42.0.windows.2
> 


Re: [RFC PATCH 5/9] powerpc/kprobes: Use ftrace to determine if a probe is at function entry

2023-12-10 Thread Google
On Fri,  8 Dec 2023 22:00:44 +0530
Naveen N Rao  wrote:

> Rather than hard-coding the offset into a function to be used to
> determine if a kprobe is at function entry, use ftrace_location() to
> determine the ftrace location within the function and categorize all
> instructions till that offset to be function entry.
> 
> For functions that cannot be traced, we fall back to using a fixed
> offset of 8 (two instructions) to categorize a probe as being at
> function entry for 64-bit elfv2.
> 

OK, so this can cover both KPROBES_ON_FTRACE=y/n cases and the function
is traced by ftrace or not.

Looks good to me.

Acked-by: Masami Hiramatsu (Google) 

Thanks,

> Signed-off-by: Naveen N Rao 
> ---
>  arch/powerpc/kernel/kprobes.c | 18 --
>  1 file changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> index b20ee72e873a..42665dfab59e 100644
> --- a/arch/powerpc/kernel/kprobes.c
> +++ b/arch/powerpc/kernel/kprobes.c
> @@ -105,24 +105,22 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, 
> unsigned int offset)
>   return addr;
>  }
>  
> -static bool arch_kprobe_on_func_entry(unsigned long offset)
> +static bool arch_kprobe_on_func_entry(unsigned long addr, unsigned long 
> offset)
>  {
> -#ifdef CONFIG_PPC64_ELF_ABI_V2
> -#ifdef CONFIG_KPROBES_ON_FTRACE
> - return offset <= 16;
> -#else
> - return offset <= 8;
> -#endif
> -#else
> + unsigned long ip = ftrace_location(addr);
> +
> + if (ip)
> + return offset <= (ip - addr);
> + if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
> + return offset <= 8;
>   return !offset;
> -#endif
>  }
>  
>  /* XXX try and fold the magic of kprobe_lookup_name() in this */
>  kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long 
> offset,
>bool *on_func_entry)
>  {
> - *on_func_entry = arch_kprobe_on_func_entry(offset);
> + *on_func_entry = arch_kprobe_on_func_entry(addr, offset);
>   return (kprobe_opcode_t *)(addr + offset);
>  }
>  
> -- 
> 2.43.0
> 


-- 
Masami Hiramatsu (Google) 


Re: [PATCH 09/12] KVM: PPC: Book3S HV nestedv2: Do not call H_COPY_TOFROM_GUEST

2023-12-10 Thread Aneesh Kumar K.V
On 12/11/23 9:26 AM, Vaibhav Jain wrote:
> Hi Aneesh,
> 
> Thanks for looking into this patch. My responses inline:
> 
> "Aneesh Kumar K.V (IBM)"  writes:
> 
> 
>> May be we should use 
>> firmware_has_feature(FW_FEATURE_H_COPY_TOFROM_GUEST))?
>> 
>> the nestedv2 can end up using the above hcall if it is supported by
>> the hypervisor right? In its absence we will have to translate the
>> guest ea using xlate and then use kvm_guest_read to read location
>> using the guest real address right? That xlate will also involves
>> multiple kvm_guest_read.
>> 
>> 
> Yes, Agreed and thats a nice suggestion. However ATM the hypervisor 
> supporting Nestedv2 doesnt have support for this hcall. In future
> once we have support for this hcall for nestedv2 from the hypervisor
> we can replace this branch with a firmware_has_feature() test.
> 

What I am suggesting is we convert that conditional to firmware_has_feature so 
that
later when hypervisor supports this hcall all older kernel can make
use of the copy_tofrom_guest without any code change.

>>> Signed-off-by: Jordan Niethe  --- 
>>> arch/powerpc/kvm/book3s_64_mmu_radix.c | 3 +++ 1 file changed, 3
>>> insertions(+)
>>> 
>>> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c
>>> b/arch/powerpc/kvm/book3s_64_mmu_radix.c index
>>> 916af6c153a5..4a1abb9f7c05 100644 ---
>>> a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++
>>> b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -40,6 +40,9 @@
>>> unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, 
>>> unsigned long quadrant, ret = n; bool is_load = !!to;
>>> 
>>> +   if (kvmhv_is_nestedv2()) +  return H_UNSUPPORTED; + /* Can't
>>> access quadrants 1 or 2 in non-HV mode, call the HV to do it */ 
>>> if (kvmhv_on_pseries()) return
>>> plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, -- 
>>> 2.42.0
> 



Re: [PATCH 09/12] KVM: PPC: Book3S HV nestedv2: Do not call H_COPY_TOFROM_GUEST

2023-12-10 Thread Vaibhav Jain
Hi Aneesh,

Thanks for looking into this patch. My responses inline:

"Aneesh Kumar K.V (IBM)"  writes:


> May be we should use
> firmware_has_feature(FW_FEATURE_H_COPY_TOFROM_GUEST))?
>
> the nestedv2 can end up using the above hcall if it is supported by the
> hypervisor right? In its absence we will have to translate the guest ea
> using xlate and then use kvm_guest_read to read location using the guest
> real address right? That xlate will also involves multiple kvm_guest_read.
>
>
Yes, Agreed and thats a nice suggestion. However ATM the hypervisor
supporting Nestedv2 doesnt have support for this hcall. In future once
we have support for this hcall for nestedv2 from the hypervisor we can
replace this branch with a firmware_has_feature() test.

>> Signed-off-by: Jordan Niethe 
>> ---
>>  arch/powerpc/kvm/book3s_64_mmu_radix.c | 3 +++
>>  1 file changed, 3 insertions(+)
>>
>> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
>> b/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> index 916af6c153a5..4a1abb9f7c05 100644
>> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> @@ -40,6 +40,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, 
>> int pid,
>>  unsigned long quadrant, ret = n;
>>  bool is_load = !!to;
>>  
>> +if (kvmhv_is_nestedv2())
>> +return H_UNSUPPORTED;
>> +
>>  /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
>>  if (kvmhv_on_pseries())
>>  return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
>> -- 
>> 2.42.0

-- 
Cheers
~ Vaibhav


Re: [PATCH v4 0/5] powerpc/smp: Topology and shared processor optimizations

2023-12-10 Thread Srikar Dronamraju
* Srikar Dronamraju  [2023-11-09 11:19:28]:

Hi Michael,

> PowerVM systems configured in shared processors mode have some unique
> challenges. Some device-tree properties will be missing on a shared
> processor. Hence some sched domains may not make sense for shared processor
> systems.
> 
> 

Did you get a chance to look at this patchset?
Do you see this getting pulled into your merge branch?
I havent seen any comments that requires a change from the current patchset.

-- 
Thanks and Regards
Srikar Dronamraju


Re: [PATCH v9 4/7] powerpc: mm: Default p{m,u}d_pte implementations

2023-12-10 Thread Rohan McLure
On 11/30/23 18:35, Christophe Leroy wrote:
>
> Le 30/11/2023 à 03:53, Rohan McLure a écrit :
>> For 32-bit systems which have no usecases for p{m,u}d_pte() prior to
>> page table checking, implement default stubs.
> Is that the best solution ?
>
> If I understand correctly, it is only needed for 
> pmd_user_accessible_page(). Why not provide a stub 
> pmd_user_accessible_page() that returns false on those architectures ?
Yep, this seems reasonable to me.
>
> Same for pud_user_accessible_page()
>
> But if you decide to keep it I think that:
> - It should be squashed with following patch to make it clear it's 
> needed for that only.
> - Remove the WARN_ONCE().
I might however move those WARN_ONCE() calls to the default, false-returning
p{m,u}d_user_accessible_page() implementations, to be consistent with
pud_pfn().
> - Only have a special one for books/64 and a generic only common to he 3 
> others.
>
>> Signed-off-by: Rohan McLure 
>> ---
>> v9: New patch
>> ---
>>   arch/powerpc/include/asm/book3s/64/pgtable.h |  3 +++
>>   arch/powerpc/include/asm/nohash/64/pgtable.h |  2 ++
>>   arch/powerpc/include/asm/pgtable.h   | 17 +
>>   3 files changed, 22 insertions(+)
>>
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h 
>> b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index 8fdb7667c509..2454174b26cb 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -887,6 +887,8 @@ static inline int pud_present(pud_t pud)
>>   
>>   extern struct page *pud_page(pud_t pud);
>>   extern struct page *pmd_page(pmd_t pmd);
>> +
>> +#define pud_pte pud_pte
>>   static inline pte_t pud_pte(pud_t pud)
>>   {
>>  return __pte_raw(pud_raw(pud));
>> @@ -1043,6 +1045,7 @@ static inline void __kernel_map_pages(struct page 
>> *page, int numpages, int enabl
>>   }
>>   #endif
>>   
>> +#define pmd_pte pmd_pte
>>   static inline pte_t pmd_pte(pmd_t pmd)
>>   {
>>  return __pte_raw(pmd_raw(pmd));
>> diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h 
>> b/arch/powerpc/include/asm/nohash/64/pgtable.h
>> index f58cbebde26e..09a34fe196ae 100644
>> --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
>> @@ -93,6 +93,7 @@ static inline void pmd_clear(pmd_t *pmdp)
>>  *pmdp = __pmd(0);
>>   }
>>   
>> +#define pmd_pte pmd_pte
>>   static inline pte_t pmd_pte(pmd_t pmd)
>>   {
>>  return __pte(pmd_val(pmd));
>> @@ -134,6 +135,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
>>   
>>   extern struct page *pud_page(pud_t pud);
>>   
>> +#define pud_pte pud_pte
>>   static inline pte_t pud_pte(pud_t pud)
>>   {
>>  return __pte(pud_val(pud));
>> diff --git a/arch/powerpc/include/asm/pgtable.h 
>> b/arch/powerpc/include/asm/pgtable.h
>> index 9c0f2151f08f..d7d0f47760d3 100644
>> --- a/arch/powerpc/include/asm/pgtable.h
>> +++ b/arch/powerpc/include/asm/pgtable.h
>> @@ -233,6 +233,23 @@ static inline int pud_pfn(pud_t pud)
>>   }
>>   #endif
>>   
>> +#ifndef pmd_pte
>> +#define pmd_pte pmd_pte
>> +static inline pte_t pmd_pte(pmd_t pmd)
>> +{
>> +WARN_ONCE(1, "pmd: platform does not use pmd entries directly");
>> +return __pte(pmd_val(pmd));
>> +}
>> +#endif
>> +
>> +#ifndef pud_pte
>> +#define pud_pte pud_pte
>> +static inline pte_t pud_pte(pud_t pud)
>> +{
>> +WARN_ONCE(1, "pud: platform does not use pud entries directly");
>> +return __pte(pud_val(pud));
>> +}
>> +#endif
>>   #endif /* __ASSEMBLY__ */
>>   
>>   #endif /* _ASM_POWERPC_PGTABLE_H */



[PATCH 1/1] powerpc/debug: implement HAVE_USER_RETURN_NOTIFIER

2023-12-10 Thread Luming Yu
The support for user return notifier infrastructure
is hooked into powerpc architecture.
---
 arch/powerpc/Kconfig|  1 +
 arch/powerpc/include/asm/entry-common.h | 16 
 arch/powerpc/include/asm/thread_info.h  |  2 ++
 arch/powerpc/kernel/process.c   |  2 ++
 4 files changed, 21 insertions(+)
 create mode 100644 arch/powerpc/include/asm/entry-common.h

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c10229c0243c..b968068cc04a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -277,6 +277,7 @@ config PPC
select HAVE_STACKPROTECTOR  if PPC64 && 
$(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_STATIC_CALL if PPC32
select HAVE_SYSCALL_TRACEPOINTS
+   select HAVE_USER_RETURN_NOTIFIER
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select HOTPLUG_SMT  if HOTPLUG_CPU
diff --git a/arch/powerpc/include/asm/entry-common.h 
b/arch/powerpc/include/asm/entry-common.h
new file mode 100644
index ..51f1eb767696
--- /dev/null
+++ b/arch/powerpc/include/asm/entry-common.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_POWERPC_ENTRY_COMMON_H
+#define ARCH_POWERPC_ENTRY_COMMON_H
+
+#include 
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+   if (ti_work & _TIF_USER_RETURN_NOTIFY)
+   fire_user_return_notifiers();
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+
+#endif
diff --git a/arch/powerpc/include/asm/thread_info.h 
b/arch/powerpc/include/asm/thread_info.h
index bf5dde1a4114..47e226032f9c 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -117,6 +117,7 @@ void arch_setup_new_exec(void);
 #endif
 #define TIF_POLLING_NRFLAG 19  /* true if poll_idle() is polling 
TIF_NEED_RESCHED */
 #define TIF_32BIT  20  /* 32 bit binary */
+#define TIF_USER_RETURN_NOTIFY 21  /* notify kernel of userspace return */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE (1<
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -1386,6 +1387,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (current->thread.regs)
restore_math(current->thread.regs);
 #endif /* CONFIG_PPC_BOOK3S_64 */
+   propagate_user_return_notify(prev, new);
 
return last;
 }
-- 
2.42.0.windows.2



Re: [PATCH v4 10/12] KVM: x86: never write to memory from kvm_vcpu_check_block()

2023-12-10 Thread Jim Mattson
On Thu, Dec 7, 2023 at 8:21 AM Sean Christopherson  wrote:
> Doh.  We got the less obvious cases and missed the obvious one.
>
> Ugh, and we also missed a related mess in kvm_guest_apic_has_interrupt().  
> That
> thing should really be folded into vmx_has_nested_events().
>
> Good gravy.  And vmx_interrupt_blocked() does the wrong thing because that
> specifically checks if L1 interrupts are blocked.
>
> Compile tested only, and definitely needs to be chunked into multiple patches,
> but I think something like this mess?

The proposed patch does not fix the problem. In fact, it messes things
up so much that I don't get any test results back.

Google has an internal K-U-T test that demonstrates the problem. I
will post it soon.