The new /sys interface like this:
        #cat /sys/devices/virtual/workqueue/writeback/sched_attr
        policy=0 prio=0 nice=0

        # echo "policy=0 prio=0 nice=-1" > 
/sys/devices/virtual/workqueue/writeback/sched_attr
        # cat /sys/devices/virtual/workqueue/writeback/sched_attr
        policy=0 prio=0 nice=-1

Also, the possibility of specifying more than just a priority
for the wq may be useful for a wide variety of applications.

Signed-off-by: Wen Yang <wen.yan...@zte.com.cn>
Signed-off-by: Jiang Biao <jiang.bi...@zte.com.cn>
Signed-off-by: Tan Hu <tan...@zte.com.cn>
Suggested-by: Tejun Heo <t...@kernel.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Lai Jiangshan <jiangshan...@gmail.com>
Cc: kernel test robot <xiaolong...@intel.com>
Cc: linux-kernel@vger.kernel.org
---
 include/linux/workqueue.h |   5 --
 kernel/workqueue.c        | 130 ++++++++++++++++++++++++++++------------------
 2 files changed, 79 insertions(+), 56 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9faaade..d9d0f36 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -128,11 +128,6 @@ struct delayed_work {
  */
 struct workqueue_attrs {
        /**
-        * @nice: nice level
-        */
-       int nice;
-
-       /**
         * @sched_attr: kworker's scheduling parameters
         */
        struct sched_attr sched_attr;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e1613d0..8c5aba5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1773,7 +1773,7 @@ static struct worker *create_worker(struct worker_pool 
*pool)
 
        if (pool->cpu >= 0)
                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
-                        pool->attrs->nice < 0  ? "H" : "");
+                        pool->attrs->sched_attr.sched_nice < 0  ? "H" : "");
        else
                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
 
@@ -1782,7 +1782,7 @@ static struct worker *create_worker(struct worker_pool 
*pool)
        if (IS_ERR(worker->task))
                goto fail;
 
-       set_user_nice(worker->task, pool->attrs->nice);
+       set_user_nice(worker->task, pool->attrs->sched_attr.sched_nice);
        kthread_bind_mask(worker->task, pool->attrs->cpumask);
 
        /* successful, attach the worker to the pool */
@@ -3179,7 +3179,6 @@ static void copy_sched_attr(struct sched_attr *to,
 static void copy_workqueue_attrs(struct workqueue_attrs *to,
                                 const struct workqueue_attrs *from)
 {
-       to->nice = from->nice;
        copy_sched_attr(&to->sched_attr, &from->sched_attr);
        cpumask_copy(to->cpumask, from->cpumask);
        /*
@@ -3195,17 +3194,29 @@ static u32 wqattrs_hash(const struct workqueue_attrs 
*attrs)
 {
        u32 hash = 0;
 
-       hash = jhash_1word(attrs->nice, hash);
+       hash = jhash_1word(attrs->sched_attr.sched_nice, hash);
        hash = jhash(cpumask_bits(attrs->cpumask),
                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
        return hash;
 }
 
+static bool sched_attr_equal(const struct sched_attr *a,
+               const struct sched_attr *b)
+{
+       if (a->sched_policy != b->sched_policy)
+               return false;
+       if (a->sched_priority != b->sched_priority)
+               return false;
+       if (a->sched_nice != b->sched_nice)
+               return false;
+       return true;
+}
+
 /* content equality test */
 static bool wqattrs_equal(const struct workqueue_attrs *a,
                          const struct workqueue_attrs *b)
 {
-       if (a->nice != b->nice)
+       if (a->sched_attr.sched_nice != b->sched_attr.sched_nice)
                return false;
        if (!cpumask_equal(a->cpumask, b->cpumask))
                return false;
@@ -3259,8 +3270,6 @@ static void rcu_free_wq(struct rcu_head *rcu)
 
        if (!(wq->flags & WQ_UNBOUND))
                free_percpu(wq->cpu_pwqs);
-       else
-               free_workqueue_attrs(wq->unbound_attrs);
        free_workqueue_attrs(wq->attrs);
        kfree(wq->rescuer);
        kfree(wq);
@@ -4353,7 +4362,8 @@ static void pr_cont_pool_info(struct worker_pool *pool)
        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
        if (pool->node != NUMA_NO_NODE)
                pr_cont(" node=%d", pool->node);
-       pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
+       pr_cont(" flags=0x%x nice=%d", pool->flags,
+                       pool->attrs->sched_attr.sched_nice);
 }
 
 static void pr_cont_work(bool comma, struct work_struct *work)
@@ -5074,7 +5084,64 @@ static ssize_t sched_attr_show(struct device *dev,
        return written;
 }
 
-static DEVICE_ATTR_RO(sched_attr);
+static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct 
*wq);
+
+static int wq_set_unbound_sched_attr(struct workqueue_struct *wq,
+               const struct sched_attr *new)
+{
+       struct workqueue_attrs *attrs;
+       int ret = -ENOMEM;
+
+       apply_wqattrs_lock();
+       attrs = wq_sysfs_prep_attrs(wq);
+       if (!attrs)
+               goto out_unlock;
+       copy_sched_attr(&attrs->sched_attr, new);
+       ret = apply_workqueue_attrs_locked(wq, attrs);
+
+out_unlock:
+       apply_wqattrs_unlock();
+       free_workqueue_attrs(attrs);
+       return ret;
+}
+
+static ssize_t sched_attr_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       struct sched_attr new = {
+               .size = sizeof(struct sched_attr),
+               .sched_policy = SCHED_NORMAL,
+               .sched_flags = 0,
+               .sched_priority = 0,
+       };
+       int ret = 0;
+
+       if (!capable(CAP_SYS_NICE))
+               return -EPERM;
+
+       if (sscanf(buf, "policy=0 prio=0 nice=%d",
+                               &new.sched_nice) != 1)
+               return -EINVAL;
+
+       pr_debug("set wq's sched_attr: policy=%u prio=%u nice=%d\n",
+                       new.sched_policy,
+                       new.sched_priority,
+                       new.sched_nice);
+       mutex_lock(&wq->mutex);
+       if (sched_attr_equal(&wq->attrs->sched_attr, &new)) {
+               mutex_unlock(&wq->mutex);
+               return count;
+       }
+       mutex_unlock(&wq->mutex);
+
+       if (wq->flags & WQ_UNBOUND)
+               ret = wq_set_unbound_sched_attr(wq, &new);
+       else
+               ret = -EPERM;
+       return ret ?: count;
+}
+static DEVICE_ATTR_RW(sched_attr);
 
 static struct attribute *wq_sysfs_attrs[] = {
        &dev_attr_per_cpu.attr,
@@ -5104,19 +5171,6 @@ static ssize_t wq_pool_ids_show(struct device *dev,
        return written;
 }
 
-static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
-                           char *buf)
-{
-       struct workqueue_struct *wq = dev_to_wq(dev);
-       int written;
-
-       mutex_lock(&wq->mutex);
-       written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->attrs->nice);
-       mutex_unlock(&wq->mutex);
-
-       return written;
-}
-
 /* prepare workqueue_attrs for sysfs store operations */
 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
 {
@@ -5132,31 +5186,6 @@ static struct workqueue_attrs 
*wq_sysfs_prep_attrs(struct workqueue_struct *wq)
        return attrs;
 }
 
-static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
-{
-       struct workqueue_struct *wq = dev_to_wq(dev);
-       struct workqueue_attrs *attrs;
-       int ret = -ENOMEM;
-
-       apply_wqattrs_lock();
-
-       attrs = wq_sysfs_prep_attrs(wq);
-       if (!attrs)
-               goto out_unlock;
-
-       if (sscanf(buf, "%d", &attrs->nice) == 1 &&
-           attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
-               ret = apply_workqueue_attrs_locked(wq, attrs);
-       else
-               ret = -EINVAL;
-
-out_unlock:
-       apply_wqattrs_unlock();
-       free_workqueue_attrs(attrs);
-       return ret ?: count;
-}
-
 static ssize_t wq_cpumask_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
@@ -5235,7 +5264,6 @@ static ssize_t wq_numa_store(struct device *dev, struct 
device_attribute *attr,
 
 static struct device_attribute wq_sysfs_unbound_attrs[] = {
        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
-       __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
        __ATTR_NULL,
@@ -5610,7 +5638,7 @@ int __init workqueue_init_early(void)
                        BUG_ON(init_worker_pool(pool));
                        pool->cpu = cpu;
                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
-                       pool->attrs->nice = std_nice[i++];
+                       pool->attrs->sched_attr.sched_nice = std_nice[i++];
                        pool->node = cpu_to_node(cpu);
 
                        /* alloc pool ID */
@@ -5625,7 +5653,7 @@ int __init workqueue_init_early(void)
                struct workqueue_attrs *attrs;
 
                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
-               attrs->nice = std_nice[i];
+               attrs->sched_attr.sched_nice = std_nice[i];
                unbound_std_wq_attrs[i] = attrs;
 
                /*
@@ -5634,7 +5662,7 @@ int __init workqueue_init_early(void)
                 * Turn off NUMA so that dfl_pwq is used for all nodes.
                 */
                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
-               attrs->nice = std_nice[i];
+               attrs->sched_attr.sched_nice = std_nice[i];
                attrs->no_numa = true;
                ordered_wq_attrs[i] = attrs;
        }
-- 
1.8.3.1

Reply via email to