This patch is a port of the task isolation functionality to the arm 32-bit
architecture. The task isolation needs an additional thread flag that
requires to change the entry assembly code to accept a bitfield larger than
one byte.  The constants _TIF_SYSCALL_WORK and _TIF_WORK_MASK are now
defined in the literal pool. The rest of the patch is straightforward and
reflects what is done on other architectures.

Signed-off-by: Francis Giraldeau <francis.girald...@gmail.com>
---
 arch/arm/Kconfig                                   |  1 +
 arch/arm/include/asm/thread_info.h                 |  8 ++++++--
 arch/arm/kernel/entry-common.S                     | 15 ++++++++++-----
 arch/arm/kernel/ptrace.c                           | 10 ++++++++++
 arch/arm/kernel/signal.c                           | 12 +++++++++++-
 arch/arm/kernel/smp.c                              |  4 ++++
 arch/arm/mm/fault.c                                |  9 ++++++++-
 tools/testing/selftests/task_isolation/isolation.c | 14 ++++++++++----
 8 files changed, 60 insertions(+), 13 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 018ee76..0b147e4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -40,6 +40,7 @@ config ARM
        select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
        select HAVE_ARCH_MMAP_RND_BITS if MMU
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
+       select HAVE_ARCH_TASK_ISOLATION
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARM_SMCCC if CPU_V7
        select HAVE_CBPF_JIT
diff --git a/arch/arm/include/asm/thread_info.h 
b/arch/arm/include/asm/thread_info.h
index 776757d..c83ce56 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -145,6 +145,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user 
*,
 #define TIF_SECCOMP            7       /* seccomp syscall filtering active */
 
 #define TIF_NOHZ               12      /* in adaptive nohz mode */
+#define TIF_TASK_ISOLATION     13      /* task isolation active */
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20
@@ -158,16 +159,19 @@ extern int vfp_restore_user_hwstate(struct user_vfp 
__user *,
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
+#define _TIF_TASK_ISOLATION    (1 << TIF_TASK_ISOLATION)
 
 /* Checks for any syscall work in entry-common.S */
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+                          _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+                          _TIF_TASK_ISOLATION)
 
 /*
  * Change these and you break ASM code in entry-common.S
  */
 #define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-                                _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+                                _TIF_TASK_ISOLATION)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 10c3283..dd8c45b 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -36,7 +36,8 @@ ret_fast_syscall:
  UNWIND(.cantunwind    )
        disable_irq_notrace                     @ disable interrupts
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
-       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+       ldr     r2, =_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+       tst     r1, r2
        bne     fast_work_pending
 
        /* perform architecture specific actions before user return */
@@ -62,7 +63,8 @@ ret_fast_syscall:
        str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
        disable_irq_notrace                     @ disable interrupts
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
-       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+       ldr     r2, =_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+       tst     r1, r2
        beq     no_work_pending
  UNWIND(.fnend         )
 ENDPROC(ret_fast_syscall)
@@ -70,7 +72,8 @@ ENDPROC(ret_fast_syscall)
        /* Slower path - fall through to work_pending */
 #endif
 
-       tst     r1, #_TIF_SYSCALL_WORK
+       ldr     r2, =_TIF_SYSCALL_WORK
+       tst     r1, r2
        bne     __sys_trace_return_nosave
 slow_work_pending:
        mov     r0, sp                          @ 'regs'
@@ -94,7 +97,8 @@ ret_slow_syscall:
        disable_irq_notrace                     @ disable interrupts
 ENTRY(ret_to_user_from_irq)
        ldr     r1, [tsk, #TI_FLAGS]
-       tst     r1, #_TIF_WORK_MASK
+       ldr     r2, =_TIF_WORK_MASK
+       tst     r1, r2
        bne     slow_work_pending
 no_work_pending:
        asm_trace_hardirqs_on save = 0
@@ -220,7 +224,8 @@ local_restart:
        ldr     r10, [tsk, #TI_FLAGS]           @ check for syscall tracing
        stmdb   sp!, {r4, r5}                   @ push fifth and sixth args
 
-       tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
+       ldr     r11, =_TIF_SYSCALL_WORK         @ are we tracing syscalls?
+       tst     r10, r11
        bne     __sys_trace
 
        cmp     scno, #NR_syscalls              @ check upper syscall limit
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ce131ed..f793ccc 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -26,6 +26,7 @@
 #include <linux/audit.h>
 #include <linux/tracehook.h>
 #include <linux/unistd.h>
+#include <linux/isolation.h>
 
 #include <asm/pgtable.h>
 #include <asm/traps.h>
@@ -935,6 +936,15 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, 
int scno)
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
 
+       /*
+        * In task isolation mode, we may prevent the syscall from
+        * running, and if so we also deliver a signal to the process.
+        */
+       if (test_thread_flag(TIF_TASK_ISOLATION)) {
+               if (task_isolation_syscall(scno) == -1)
+                       return -1;
+       }
+
        /* Do seccomp after ptrace; syscall may have changed. */
 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
        if (secure_computing(NULL) == -1)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7b8f214..527a9e6 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -14,6 +14,7 @@
 #include <linux/uaccess.h>
 #include <linux/tracehook.h>
 #include <linux/uprobes.h>
+#include <linux/isolation.h>
 
 #include <asm/elf.h>
 #include <asm/cacheflush.h>
@@ -595,9 +596,18 @@ do_work_pending(struct pt_regs *regs, unsigned int 
thread_flags, int syscall)
                                clear_thread_flag(TIF_NOTIFY_RESUME);
                                tracehook_notify_resume(regs);
                        }
+
+                       if (thread_flags & _TIF_TASK_ISOLATION)
+                               task_isolation_prepare();
                }
                local_irq_disable();
-               thread_flags = current_thread_info()->flags;
+               thread_flags = READ_ONCE(current_thread_info()->flags);
+
+               /* Clear task isolation from cached_flags manually. */
+               if ((thread_flags & _TIF_TASK_ISOLATION) &&
+                   task_isolation_ready())
+                       thread_flags &= ~_TIF_TASK_ISOLATION;
+
        } while (thread_flags & _TIF_WORK_MASK);
        return 0;
 }
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 8615216..b653a71 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -27,6 +27,7 @@
 #include <linux/completion.h>
 #include <linux/cpufreq.h>
 #include <linux/irq_work.h>
+#include <linux/isolation.h>
 
 #include <linux/atomic.h>
 #include <asm/smp.h>
@@ -523,6 +524,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask 
*mask)
 
 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 {
+       task_isolation_debug_cpumask(mask, "wakeup IPI");
        smp_cross_call(mask, IPI_WAKEUP);
 }
 
@@ -542,6 +544,7 @@ void arch_irq_work_raise(void)
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {
+       task_isolation_debug_cpumask(mask, "timer IPI");
        smp_cross_call(mask, IPI_TIMER);
 }
 #endif
@@ -664,6 +667,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
 void smp_send_reschedule(int cpu)
 {
+       task_isolation_debug(cpu, "reschedule IPI");
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3a2e678..02e7eb1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
+#include <linux/isolation.h>
 
 #include <asm/exception.h>
 #include <asm/pgtable.h>
@@ -348,8 +349,14 @@ retry:
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR
         */
-       if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | 
VM_FAULT_BADACCESS))))
+       if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
+                             VM_FAULT_BADACCESS)))) {
+               /* No signal was generated, but notify task-isolation tasks. */
+               if (user_mode(regs))
+                       task_isolation_quiet_exception("page fault at %#lx",
+                                                      addr);
                return 0;
+       }
 
        /*
         * If we are in kernel mode at this point, we
diff --git a/tools/testing/selftests/task_isolation/isolation.c 
b/tools/testing/selftests/task_isolation/isolation.c
index d78de28..f2796a0 100644
--- a/tools/testing/selftests/task_isolation/isolation.c
+++ b/tools/testing/selftests/task_isolation/isolation.c
@@ -59,6 +59,7 @@
 #include <sys/mman.h>
 #include <sys/time.h>
 #include <sys/prctl.h>
+#include <sys/types.h>
 #include "../kselftest.h"
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -480,7 +481,7 @@ fail:
 #include <arch/spr_def.h>
 #endif
 
-static inline unsigned long get_cycle_count(void)
+static inline u_int64_t get_cycle_count(void)
 {
 #ifdef __x86_64__
        unsigned int lower, upper;
@@ -494,6 +495,11 @@ static inline unsigned long get_cycle_count(void)
 
        asm volatile("mrs %0, cntvct_el0" : "=r" (vtick));
        return vtick;
+#elif defined(__arm__)
+       u_int64_t cval;
+
+       asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
+       return cval;
 #else
 #error Unsupported architecture
 #endif
@@ -542,7 +548,7 @@ void jitter_handler(int sig)
 
 void test_jitter(unsigned long waitticks)
 {
-       unsigned long start, last, elapsed;
+       u_int64_t start, last, elapsed;
        int rc;
 
        printf("testing task isolation jitter for %ld ticks\n", waitticks);
@@ -560,8 +566,8 @@ void test_jitter(unsigned long waitticks)
 
        last = start = get_cycle_count();
        do {
-               unsigned long next = get_cycle_count();
-               unsigned long delta = next - last;
+               u_int64_t next = get_cycle_count();
+               u_int64_t delta = next - last;
 
                elapsed = next - start;
                if (__builtin_expect(delta > HISTSIZE, 0)) {
-- 
2.7.4

Reply via email to