Introduce percpu-op.h API. It uses rseq internally as fast-path if
invoked from the right CPU, else cpu_opv as slow-path if called
from the wrong CPU or if rseq fails.

This allows acting on per-cpu data from various CPUs transparently from
user-space: cpu_opv will take care of migrating the thread to the
requested CPU. Use-cases such as rebalancing memory across per-cpu
memory pools, or migrating tasks for a user-space scheduler, are thus
facilitated. This also handles debugger single-stepping.

The use from userspace is, e.g. for a counter increment:

    int cpu, ret;

    cpu = percpu_current_cpu();
    ret = percpu_addv(&data->c[cpu].count, 1, cpu);
    if (unlikely(ret)) {
         perror("percpu_addv");
         return -1;
    }
    return 0;

Signed-off-by: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
CC: Shuah Khan <sh...@kernel.org>
CC: Russell King <li...@arm.linux.org.uk>
CC: Catalin Marinas <catalin.mari...@arm.com>
CC: Will Deacon <will.dea...@arm.com>
CC: Thomas Gleixner <t...@linutronix.de>
CC: Paul Turner <p...@google.com>
CC: Peter Zijlstra <pet...@infradead.org>
CC: Andy Lutomirski <l...@amacapital.net>
CC: Andi Kleen <a...@firstfloor.org>
CC: Dave Watson <davejwat...@fb.com>
CC: Chris Lameter <c...@linux.com>
CC: Ingo Molnar <mi...@redhat.com>
CC: "H. Peter Anvin" <h...@zytor.com>
CC: Ben Maurer <bmau...@fb.com>
CC: Steven Rostedt <rost...@goodmis.org>
CC: "Paul E. McKenney" <paul...@linux.vnet.ibm.com>
CC: Josh Triplett <j...@joshtriplett.org>
CC: Linus Torvalds <torva...@linux-foundation.org>
CC: Andrew Morton <a...@linux-foundation.org>
CC: Boqun Feng <boqun.f...@gmail.com>
CC: linux-kselft...@vger.kernel.org
CC: linux-...@vger.kernel.org
---
 tools/testing/selftests/cpu-opv/percpu-op.h | 151 ++++++++++++++++++++++++++++
 1 file changed, 151 insertions(+)
 create mode 100644 tools/testing/selftests/cpu-opv/percpu-op.h

diff --git a/tools/testing/selftests/cpu-opv/percpu-op.h 
b/tools/testing/selftests/cpu-opv/percpu-op.h
new file mode 100644
index 000000000000..918171e585d7
--- /dev/null
+++ b/tools/testing/selftests/cpu-opv/percpu-op.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * percpu-op.h
+ *
+ * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
+ */
+
+#ifndef PERCPU_OP_H
+#define PERCPU_OP_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "rseq.h"
+#include "cpu-op.h"
+
+static inline uint32_t percpu_current_cpu(void)
+{
+       return rseq_current_cpu();
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv,
+                        int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_storev(v, expect, newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_storev(v, expect, newv, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpnev_storeoffp_load(v, expectnot, voffp, load, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpnev_storeoffp_load(v, expectnot, voffp,
+                                                   load, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       if (rseq_unlikely(rseq_addv(v, count, cpu)))
+               return cpu_op_addv(v, count, cpu);
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_storev_storev(intptr_t *v, intptr_t expect,
+                               intptr_t *v2, intptr_t newv2,
+                               intptr_t newv, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2,
+                                          newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_storev_storev(v, expect, v2, newv2,
+                                                  newv, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_storev_storev_release(intptr_t *v, intptr_t expect,
+                                       intptr_t *v2, intptr_t newv2,
+                                       intptr_t newv, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_trystorev_storev_release(v, expect, v2, newv2,
+                                                  newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_storev_storev_release(v, expect, v2, newv2,
+                                                          newv, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                               intptr_t *v2, intptr_t expect2,
+                               intptr_t newv, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_cmpeqv_storev(v, expect, v2, expect2, newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_cmpeqv_storev(v, expect, v2, expect2,
+                                                  newv, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_memcpy_storev(intptr_t *v, intptr_t expect,
+                               void *dst, void *src, size_t len,
+                               intptr_t newv, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
+                                          newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_memcpy_storev(v, expect, dst, src, len,
+                                                  newv, cpu);
+       }
+       return 0;
+}
+
+static inline __attribute__((always_inline))
+int percpu_cmpeqv_memcpy_storev_release(intptr_t *v, intptr_t expect,
+                                       void *dst, void *src, size_t len,
+                                       intptr_t newv, int cpu)
+{
+       int ret;
+
+       ret = rseq_cmpeqv_trymemcpy_storev_release(v, expect, dst, src, len,
+                                                  newv, cpu);
+       if (rseq_unlikely(ret)) {
+               if (ret > 0)
+                       return ret;
+               return cpu_op_cmpeqv_memcpy_storev_release(v, expect, dst, src,
+                                                          len, newv, cpu);
+       }
+       return 0;
+}
+
+#endif  /* PERCPU_OP_H_ */
-- 
2.11.0

Reply via email to