Add a per-thread core scheduling interface which allows a thread to share a
core with another thread, or have a core exclusively for itself.

ChromeOS uses core-scheduling to securely enable hyperthreading.  This cuts
down the keypress latency in Google docs from 150ms to 50ms while improving
the camera streaming frame rate by ~3%.

Tested-by: Julien Desfossez <jdesfos...@digitalocean.com>
Reviewed-by: Aubrey Li <aubrey.in...@gmail.com>
Co-developed-by: Chris Hyser <chris.hy...@oracle.com>
Signed-off-by: Chris Hyser <chris.hy...@oracle.com>
Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org>
---
 include/linux/sched.h            |  1 +
 include/uapi/linux/prctl.h       |  3 ++
 kernel/sched/core.c              | 51 +++++++++++++++++++++++++++++---
 kernel/sys.c                     |  3 ++
 tools/include/uapi/linux/prctl.h |  3 ++
 5 files changed, 57 insertions(+), 4 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index c6a3b0fa952b..79d76c78cc8e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2083,6 +2083,7 @@ void sched_core_unsafe_enter(void);
 void sched_core_unsafe_exit(void);
 bool sched_core_wait_till_safe(unsigned long ti_check);
 bool sched_core_kernel_protected(void);
+int sched_core_share_pid(pid_t pid);
 #else
 #define sched_core_unsafe_enter(ignore) do { } while (0)
 #define sched_core_unsafe_exit(ignore) do { } while (0)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c334e6a02e5f..217b0482aea1 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -248,4 +248,7 @@ struct prctl_mm_map {
 #define PR_SET_IO_FLUSHER              57
 #define PR_GET_IO_FLUSHER              58
 
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE_SHARE            59
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7ccca355623a..a95898c75bdf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -310,6 +310,7 @@ static int __sched_core_stopper(void *data)
 }
 
 static DEFINE_MUTEX(sched_core_mutex);
+static DEFINE_MUTEX(sched_core_tasks_mutex);
 static int sched_core_count;
 
 static void __sched_core_enable(void)
@@ -4037,8 +4038,9 @@ int sched_fork(unsigned long clone_flags, struct 
task_struct *p)
        RB_CLEAR_NODE(&p->core_node);
 
        /*
-        * Tag child via per-task cookie only if parent is tagged via per-task
-        * cookie. This is independent of, but can be additive to the CGroup 
tagging.
+        * If parent is tagged via per-task cookie, tag the child (either with
+        * the parent's cookie, or a new one). The final cookie is calculated
+        * by concatenating the per-task cookie with that of the CGroup's.
         */
        if (current->core_task_cookie) {
 
@@ -9855,7 +9857,7 @@ static int sched_core_share_tasks(struct task_struct *t1, 
struct task_struct *t2
        unsigned long cookie;
        int ret = -ENOMEM;
 
-       mutex_lock(&sched_core_mutex);
+       mutex_lock(&sched_core_tasks_mutex);
 
        /*
         * NOTE: sched_core_get() is done by sched_core_alloc_task_cookie() or
@@ -9954,10 +9956,51 @@ static int sched_core_share_tasks(struct task_struct 
*t1, struct task_struct *t2
 
        ret = 0;
 out_unlock:
-       mutex_unlock(&sched_core_mutex);
+       mutex_unlock(&sched_core_tasks_mutex);
        return ret;
 }
 
+/* Called from prctl interface: PR_SCHED_CORE_SHARE */
+int sched_core_share_pid(pid_t pid)
+{
+       struct task_struct *task;
+       int err;
+
+       if (pid == 0) { /* Recent current task's cookie. */
+               /* Resetting a cookie requires privileges. */
+               if (current->core_task_cookie)
+                       if (!capable(CAP_SYS_ADMIN))
+                               return -EPERM;
+               task = NULL;
+       } else {
+               rcu_read_lock();
+               task = pid ? find_task_by_vpid(pid) : current;
+               if (!task) {
+                       rcu_read_unlock();
+                       return -ESRCH;
+               }
+
+               get_task_struct(task);
+
+               /*
+                * Check if this process has the right to modify the specified
+                * process. Use the regular "ptrace_may_access()" checks.
+                */
+               if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
+                       rcu_read_unlock();
+                       err = -EPERM;
+                       goto out_put;
+               }
+               rcu_read_unlock();
+       }
+
+       err = sched_core_share_tasks(current, task);
+out_put:
+       if (task)
+               put_task_struct(task);
+       return err;
+}
+
 /* CGroup interface */
 static u64 cpu_core_tag_read_u64(struct cgroup_subsys_state *css, struct 
cftype *cft)
 {
diff --git a/kernel/sys.c b/kernel/sys.c
index a730c03ee607..61a3c98e36de 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2530,6 +2530,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, 
unsigned long, arg3,
 
                error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
                break;
+       case PR_SCHED_CORE_SHARE:
+               error = sched_core_share_pid(arg2);
+               break;
        default:
                error = -EINVAL;
                break;
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 7f0827705c9a..4c45b7dcd92d 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -247,4 +247,7 @@ struct prctl_mm_map {
 #define PR_SET_IO_FLUSHER              57
 #define PR_GET_IO_FLUSHER              58
 
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE_SHARE            59
+
 #endif /* _LINUX_PRCTL_H */
-- 
2.29.2.299.gdc1121823c-goog

Reply via email to