Action API was changed to work with actions and action_idr in concurrency
safe manner, however tcf_del_walker() still uses actions without taking a
reference or idrinfo->lock first, and deletes them directly, disregarding
possible concurrent delete.

Add tc_action_wq workqueue to action API. Implement
tcf_idr_release_unsafe() that assumes external synchronization by caller
and delays blocking action cleanup part to tc_action_wq workqueue. Extend
tcf_action_cleanup() with 'async' argument to indicate that function should
free action asynchronously.

Change tcf_del_walker() to take idrinfo->lock while iterating over actions
and use new tcf_idr_release_unsafe() to release them while holding the
lock.

Signed-off-by: Vlad Buslov <vla...@mellanox.com>
---
 include/net/act_api.h |  1 +
 net/sched/act_api.c   | 73 ++++++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 65 insertions(+), 9 deletions(-)

diff --git a/include/net/act_api.h b/include/net/act_api.h
index c6f195b3c706..4c5117bc4afb 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -38,6 +38,7 @@ struct tc_action {
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
        struct tcf_chain        *goto_chain;
+       struct work_struct      work;
 };
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 6f118d62c731..4ad9062c34b3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -90,13 +90,38 @@ static void free_tcf(struct tc_action *p)
        kfree(p);
 }
 
-static void tcf_action_cleanup(struct tc_action *p)
+static void tcf_action_free(struct tc_action *p)
+{
+       gen_kill_estimator(&p->tcfa_rate_est);
+       free_tcf(p);
+}
+
+static void tcf_action_free_work(struct work_struct *work)
+{
+       struct tc_action *p = container_of(work,
+                                          struct tc_action,
+                                          work);
+
+       tcf_action_free(p);
+}
+
+static struct workqueue_struct *tc_action_wq;
+
+static bool tcf_action_queue_work(struct work_struct *work, work_func_t func)
+{
+       INIT_WORK(work, func);
+       return queue_work(tc_action_wq, work);
+}
+
+static void tcf_action_cleanup(struct tc_action *p, bool async)
 {
        if (p->ops->cleanup)
                p->ops->cleanup(p);
 
-       gen_kill_estimator(&p->tcfa_rate_est);
-       free_tcf(p);
+       if (async)
+               tcf_action_queue_work(&p->work, tcf_action_free_work);
+       else
+               tcf_action_free(p);
 }
 
 static int __tcf_action_put(struct tc_action *p, bool bind)
@@ -109,7 +134,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
                idr_remove(&idrinfo->action_idr, p->tcfa_index);
                spin_unlock(&idrinfo->lock);
 
-               tcf_action_cleanup(p);
+               tcf_action_cleanup(p, false);
                return 1;
        }
 
@@ -147,6 +172,24 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool 
strict)
 }
 EXPORT_SYMBOL(__tcf_idr_release);
 
+/* Release idr without obtaining idrinfo->lock. Caller must prevent any
+ * concurrent modifications of idrinfo->action_idr!
+ */
+
+static int tcf_idr_release_unsafe(struct tc_action *p)
+{
+       if (atomic_read(&p->tcfa_bindcnt) > 0)
+               return -EPERM;
+
+       if (refcount_dec_and_test(&p->tcfa_refcnt)) {
+               idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
+               tcf_action_cleanup(p, true);
+               return ACT_P_DELETED;
+       }
+
+       return 0;
+}
+
 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 {
        struct tc_cookie *act_cookie;
@@ -262,20 +305,25 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, 
struct sk_buff *skb,
        if (nla_put_string(skb, TCA_KIND, ops->kind))
                goto nla_put_failure;
 
+       spin_lock(&idrinfo->lock);
        idr_for_each_entry_ul(idr, p, id) {
-               ret = __tcf_idr_release(p, false, true);
+               ret = tcf_idr_release_unsafe(p);
                if (ret == ACT_P_DELETED) {
                        module_put(ops->owner);
                        n_i++;
                } else if (ret < 0) {
-                       goto nla_put_failure;
+                       goto nla_put_failure_locked;
                }
        }
+       spin_unlock(&idrinfo->lock);
+
        if (nla_put_u32(skb, TCA_FCNT, n_i))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
        return n_i;
+nla_put_failure_locked:
+       spin_unlock(&idrinfo->lock);
 nla_put_failure:
        nla_nest_cancel(skb, nest);
        return ret;
@@ -341,7 +389,7 @@ static int tcf_idr_delete_index(struct tcf_idrinfo 
*idrinfo, u32 index)
                                                p->tcfa_index));
                        spin_unlock(&idrinfo->lock);
 
-                       tcf_action_cleanup(p);
+                       tcf_action_cleanup(p, false);
                        module_put(owner);
                        return 0;
                }
@@ -1713,16 +1761,23 @@ static int __init tc_action_init(void)
 {
        int err;
 
+       tc_action_wq = alloc_ordered_workqueue("tc_action_workqueue", 0);
+       if (!tc_action_wq)
+               return -ENOMEM;
+
        err = register_pernet_subsys(&tcf_action_net_ops);
        if (err)
-               return err;
+               goto err_register_pernet_subsys;
 
        rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
                      0);
+       return err;
 
-       return 0;
+err_register_pernet_subsys:
+       destroy_workqueue(tc_action_wq);
+       return err;
 }
 
 subsys_initcall(tc_action_init);
-- 
2.7.5

Reply via email to