Because we have disabled RT priority inheritance for
the regular binder domain, the following can happen:

1) thread A (prio 98) calls into thread B
2) because RT prio inheritance is disabled, thread B
   runs at the lowest nice (prio 100) instead
3) thread B calls back into A; A will run at prio 100
   for the duration of the transaction
4) When thread A is done with the call from B, we will
   try to restore the prio back to 98. But, we fail
   because the process doesn't hold CAP_SYS_NICE,
   neither is RLIMIT_RT_PRIO set.

While the proper fix going forward will be to
correctly apply CAP_SYS_NICE or RLIMIT_RT_PRIO,
for now it seems reasonable to not check permissions
on the restore path.

Signed-off-by: Martijn Coenen <m...@android.com>
---
 drivers/android/binder.c | 30 ++++++++++++++++++++++--------
 1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 017693dd4ec1..0c0ecd78b9a7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1109,9 +1109,10 @@ static int to_kernel_prio(int policy, int user_priority)
 }
 
 /**
- * binder_set_priority() - sets the scheduler priority of a task
+ * binder_do_set_priority() - sets the scheduler priority of a task
  * @task:      task to set priority on
  * @desired:   desired priority to run at
+ * @verify:    verify whether @task is allowed to run at @desired prio
  *
  * The scheduler policy of tasks is changed explicitly, because we want to
  * support a few distinct features:
@@ -1145,8 +1146,9 @@ static int to_kernel_prio(int policy, int user_priority)
  *    temporarily runs at its original priority.
  * 5) rt_mutex does not currently support PI for CFS tasks.
  */
-static void binder_set_priority(struct task_struct *task,
-                               struct binder_priority desired)
+static void binder_do_set_priority(struct task_struct *task,
+                                  struct binder_priority desired,
+                                  bool verify)
 {
        int priority; /* user-space prio value */
        bool has_cap_nice;
@@ -1170,7 +1172,7 @@ static void binder_set_priority(struct task_struct *task,
 
        priority = to_userspace_prio(policy, desired.prio);
 
-       if (is_rt_policy(policy) && !has_cap_nice) {
+       if (verify && is_rt_policy(policy) && !has_cap_nice) {
                long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
 
                if (max_rtprio == 0) {
@@ -1182,7 +1184,7 @@ static void binder_set_priority(struct task_struct *task,
                }
        }
 
-       if (is_fair_policy(policy) && !has_cap_nice) {
+       if (verify && is_fair_policy(policy) && !has_cap_nice) {
                long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
 
                if (min_nice > MAX_NICE) {
@@ -1215,6 +1217,18 @@ static void binder_set_priority(struct task_struct *task,
                set_user_nice(task, priority);
 }
 
+static void binder_set_priority(struct task_struct *task,
+                               struct binder_priority desired)
+{
+       binder_do_set_priority(task, desired, /* verify = */ true);
+}
+
+static void binder_restore_priority(struct task_struct *task,
+                                   struct binder_priority desired)
+{
+       binder_do_set_priority(task, desired, /* verify = */ false);
+}
+
 static void binder_transaction_priority(struct task_struct *task,
                                        struct binder_transaction *t,
                                        struct binder_priority node_prio,
@@ -3251,7 +3265,7 @@ static void binder_transaction(struct binder_proc *proc,
                binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
                binder_inner_proc_unlock(target_proc);
                wake_up_interruptible_sync(&target_thread->wait);
-               binder_set_priority(current, in_reply_to->saved_priority);
+               binder_restore_priority(current, in_reply_to->saved_priority);
                binder_free_transaction(in_reply_to);
        } else if (!(t->flags & TF_ONE_WAY)) {
                BUG_ON(t->buffer->async_transaction != 0);
@@ -3340,7 +3354,7 @@ static void binder_transaction(struct binder_proc *proc,
 
        BUG_ON(thread->return_error.cmd != BR_OK);
        if (in_reply_to) {
-               binder_set_priority(current, in_reply_to->saved_priority);
+               binder_restore_priority(current, in_reply_to->saved_priority);
                thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
                binder_enqueue_work(thread->proc,
                                    &thread->return_error.work,
@@ -3940,7 +3954,7 @@ static int binder_thread_read(struct binder_proc *proc,
                        wait_event_interruptible(binder_user_error_wait,
                                                 binder_stop_on_user_error < 2);
                }
-               binder_set_priority(current, proc->default_priority);
+               binder_restore_priority(current, proc->default_priority);
        }
 
        if (non_block) {
-- 
2.14.1.581.gf28d330327-goog

_______________________________________________
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel

Reply via email to