Re: [PATCH 1/2 v2] rseq/membarrier: add MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU

2020-08-07 Thread Peter Oskolkov
On Fri, Aug 7, 2020 at 6:38 AM  wrote:
>

[...]

> I'm thinking even this is a problem, we can end up sending IPIs to CPUs
> outside out partition (they might be NOHZ_FULL) and that's a no-no too.
>
> Something like so perhaps... that really limits it to CPUs that match
> our mm.

Thanks for the suggestion - I'll prepare a v3 based on your and
Mathieu's feedback.

>
> diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
> index 6be66f52a2ad..bee5e98e6774 100644
> --- a/include/linux/sched/mm.h
> +++ b/include/linux/sched/mm.h
> @@ -356,6 +356,7 @@ enum {
>
>  enum {
> MEMBARRIER_FLAG_SYNC_CORE   = (1U << 0),
> +   MEMBARRIER_FLAG_RSEQ= (1U << 1),
>  };
>
>  #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
> diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
> index 168479a7d61b..4d9b22c2f5e2 100644
> --- a/kernel/sched/membarrier.c
> +++ b/kernel/sched/membarrier.c
> @@ -27,6 +27,11 @@
>
>  static void ipi_mb(void *info)
>  {
> +   int *flags = info;
> +
> +   if (flags && (*flags & MEMBARRIER_FLAG_RSEQ))
> +   rseq_preempt(current);
> +
> smp_mb();   /* IPIs should be serializing but paranoid. */
>  }
>
> @@ -129,11 +134,11 @@ static int membarrier_global_expedited(void)
> return 0;
>  }
>
> -static int membarrier_private_expedited(int flags)
> +static int membarrier_private_expedited(int flags, int cpu_id)
>  {
> -   int cpu;
> -   cpumask_var_t tmpmask;
> struct mm_struct *mm = current->mm;
> +   cpumask_var_t tmpmask;
> +   int cpu;
>
> if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
> if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
> @@ -174,6 +179,10 @@ static int membarrier_private_expedited(int flags)
>  */
> if (cpu == raw_smp_processor_id())
> continue;
> +
> +   if (cpu_id >= 0 && cpu != cpu_id)
> +   continue;
> +
> p = rcu_dereference(cpu_rq(cpu)->curr);
> if (p && p->mm == mm)
> __cpumask_set_cpu(cpu, tmpmask);
> @@ -181,7 +190,7 @@ static int membarrier_private_expedited(int flags)
> rcu_read_unlock();
>
> preempt_disable();
> -   smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
> +   smp_call_function_many(tmpmask, ipi_mb, &flags, 1);
> preempt_enable();
>
> free_cpumask_var(tmpmask);
> @@ -362,11 +371,13 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
> case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
> return membarrier_register_global_expedited();
> case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
> -   return membarrier_private_expedited(0);
> +   return membarrier_private_expedited(0, -1);
> case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
> return membarrier_register_private_expedited(0);
> case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
> -   return 
> membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
> +   return 
> membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, -1);
> +   case MEMBERRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
> +   return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, 
> flags);
> case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
> return 
> membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
> default:


Re: [PATCH 1/2 v2] rseq/membarrier: add MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU

2020-08-07 Thread peterz
On Thu, Aug 06, 2020 at 10:05:43AM -0700, Peter Oskolkov wrote:
> +#ifdef CONFIG_RSEQ
> +static void membarrier_rseq_ipi(void *arg)
> +{
> + if (current->mm != arg)  /* Not our process. */
> + return;
> + if (!current->rseq)  /* RSEQ not set up for the current task/thread. */
> + return;
> +
> + rseq_preempt(current);
> +}
> +#endif
> +
> +static int membarrier_private_restart_rseq_on_cpu(int cpu_id)
> +{
> +#ifdef CONFIG_RSEQ
> + /* syscalls are not allowed inside rseq critical sections. */
> + if (cpu_id == raw_smp_processor_id())
> + return 0;
> +
> + return smp_call_function_single(cpu_id, membarrier_rseq_ipi,
> + current->mm, true);
> +#else
> + return 0;
> +#endif
> +}

I'm thinking even this is a problem, we can end up sending IPIs to CPUs
outside out partition (they might be NOHZ_FULL) and that's a no-no too.

Something like so perhaps... that really limits it to CPUs that match
our mm.

diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 6be66f52a2ad..bee5e98e6774 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -356,6 +356,7 @@ enum {
 
 enum {
MEMBARRIER_FLAG_SYNC_CORE   = (1U << 0),
+   MEMBARRIER_FLAG_RSEQ= (1U << 1),
 };
 
 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 168479a7d61b..4d9b22c2f5e2 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -27,6 +27,11 @@
 
 static void ipi_mb(void *info)
 {
+   int *flags = info;
+
+   if (flags && (*flags & MEMBARRIER_FLAG_RSEQ))
+   rseq_preempt(current);
+
smp_mb();   /* IPIs should be serializing but paranoid. */
 }
 
@@ -129,11 +134,11 @@ static int membarrier_global_expedited(void)
return 0;
 }
 
-static int membarrier_private_expedited(int flags)
+static int membarrier_private_expedited(int flags, int cpu_id)
 {
-   int cpu;
-   cpumask_var_t tmpmask;
struct mm_struct *mm = current->mm;
+   cpumask_var_t tmpmask;
+   int cpu;
 
if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
@@ -174,6 +179,10 @@ static int membarrier_private_expedited(int flags)
 */
if (cpu == raw_smp_processor_id())
continue;
+
+   if (cpu_id >= 0 && cpu != cpu_id)
+   continue;
+
p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
@@ -181,7 +190,7 @@ static int membarrier_private_expedited(int flags)
rcu_read_unlock();
 
preempt_disable();
-   smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
+   smp_call_function_many(tmpmask, ipi_mb, &flags, 1);
preempt_enable();
 
free_cpumask_var(tmpmask);
@@ -362,11 +371,13 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
return membarrier_register_global_expedited();
case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
-   return membarrier_private_expedited(0);
+   return membarrier_private_expedited(0, -1);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
return membarrier_register_private_expedited(0);
case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
-   return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
+   return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE, 
-1);
+   case MEMBERRIER_CMD_PRIVATE_EXPEDITED_RSEQ:
+   return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, 
flags);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
return 
membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
default: