On Thu, 8 Sep 2016, Fenghua Yu wrote:
>  void rdtgroup_exit(struct task_struct *tsk)
>  {
> +     if (tsk->rdtgroup) {
> +             atomic_dec(&tsk->rdtgroup->refcount);
> +             tsk->rdtgroup = NULL;

The changelog still talks about this list stuff while this patch seems to
remove it and solely rely on the tsk->rdtgroup pointer, which is a sensible
thing to do.

Writing sensible changelogs would make the life of reviewers too easy,
right?

> +     }
> +}
>  
> -     if (!list_empty(&tsk->rg_list)) {
> -             struct rdtgroup *rdtgrp = tsk->rdtgroup;
> +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
> +{
> +     struct task_struct *p;
> +     struct rdtgroup *this = r;
>  
> -             list_del_init(&tsk->rg_list);
> -             tsk->rdtgroup = NULL;
> -             atomic_dec(&rdtgrp->refcount);
> +
> +     if (r == root_rdtgrp)
> +             return;

Why has the root_rdtgroup a task file in the first place?

>  static void rdtgroup_destroy_locked(struct rdtgroup *rdtgrp)
> @@ -781,8 +811,6 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, 
> const char *name,
>               goto out_unlock;
>       }
>  
> -     INIT_LIST_HEAD(&rdtgrp->pset.tasks);
> -
>       cpumask_clear(&rdtgrp->cpu_mask);
>  
>       rdtgrp->root = root;
> @@ -843,7 +871,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
>       if (!rdtgrp)
>               return -ENODEV;
>  
> -     if (!list_empty(&rdtgrp->pset.tasks)) {
> +     if (atomic_read(&rdtgrp->refcount)) {

So you rely on rdtgrp->refcount completely now.

>  /*
>   * Forcibly remove all of subdirectories under root.
> @@ -1088,22 +1094,16 @@ void rdtgroup_fork(struct task_struct *child)
>  {
>       struct rdtgroup *rdtgrp;
>  
> -     INIT_LIST_HEAD(&child->rg_list);
> +     child->rdtgroup = NULL;
>       if (!rdtgroup_mounted)
>               return;
>  
> -     mutex_lock(&rdtgroup_mutex);
> -
>       rdtgrp = current->rdtgroup;
>       if (!rdtgrp)
> -             goto out;
> +             return;
>  
> -     list_add_tail(&child->rg_list, &rdtgrp->pset.tasks);
>       child->rdtgroup = rdtgrp;
>       atomic_inc(&rdtgrp->refcount);

This lacks any form of documentation WHY this is correct and works. I asked
you last time to document the locking and serialization rules ....

>       cpumask_copy(&root->rdtgrp.cpu_mask, cpu_online_mask);

Can't remember if I told you already, but this is racy against hotplug.

> +static int _rdtgroup_move_task(struct task_struct *tsk, struct rdtgroup 
> *rdtgrp)
> +{
> +     if (tsk->rdtgroup)
> +             atomic_dec(&tsk->rdtgroup->refcount);
> +
> +     if (rdtgrp == root_rdtgrp)
> +             tsk->rdtgroup = NULL;
> +     else
> +             tsk->rdtgroup = rdtgrp;
> +
> +     atomic_inc(&rdtgrp->refcount);
> +
> +     return 0;
> +}
> +
> +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
> +                           bool threadgroup, struct kernfs_open_file *of)
> +{
> +     struct task_struct *tsk;
> +     int ret;
> +
> +     rcu_read_lock();
> +     if (pid) {
> +             tsk = find_task_by_vpid(pid);
> +             if (!tsk) {
> +                     ret = -ESRCH;
> +                     goto out_unlock_rcu;
> +             }
> +     } else {
> +             tsk = current;
> +     }
> +
> +     if (threadgroup)
> +             tsk = tsk->group_leader;
> +
> +     get_task_struct(tsk);
> +     rcu_read_unlock();
> +
> +     ret = rdtgroup_procs_write_permission(tsk, of);
> +     if (!ret)
> +             _rdtgroup_move_task(tsk, rdtgrp);
> +
> +     put_task_struct(tsk);
> +     goto out_unlock_threadgroup;
> +
> +out_unlock_rcu:
> +     rcu_read_unlock();
> +out_unlock_threadgroup:
> +     return ret;
> +}
> +
> +ssize_t _rdtgroup_procs_write(struct rdtgroup *rdtgrp,
> +                        struct kernfs_open_file *of, char *buf,
> +                        size_t nbytes, loff_t off, bool threadgroup)

global visible? And what is the underscore for?

> +{
> +     pid_t pid;
> +     int ret;
> +
> +     if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
> +             return -EINVAL;

Why do you evaluate the buffer inside the lock held region?

This function split is completely bogus and artificial.

> +
> +     ret = rdtgroup_move_task(pid, rdtgrp, threadgroup, of);
> +
> +     return ret ?: nbytes;
> +}
> +
> +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
> +                               char *buf, size_t nbytes, loff_t off)
> +{
> +     struct rdtgroup *rdtgrp;
> +     int ret;
> +
> +     rdtgrp = rdtgroup_kn_lock_live(of->kn);
> +     if (!rdtgrp)
> +             return -ENODEV;
> +
> +     ret = _rdtgroup_procs_write(rdtgrp, of, buf, nbytes, off, false);
> +
> +     rdtgroup_kn_unlock(of->kn);
> +     return ret;
> +}

More sigh.

     tglx

Reply via email to