<<<snip>>>
> diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
> new file mode 100644
> index 0000000..29c7802
> --- /dev/null
> +++ b/net/core/lwtunnel.c
> @@ -0,0 +1,162 @@
> +/*
> + * lwtunnel  Infrastructure for light weight tunnels like mpls
> + *
> + *
> + *           This program is free software; you can redistribute it and/or
> + *           modify it under the terms of the GNU General Public License
> + *           as published by the Free Software Foundation; either version
> + *           2 of the License, or (at your option) any later version.
> + *
> + */
> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
> +
> +#include <linux/capability.h>
> +#include <linux/module.h>
> +#include <linux/types.h>
> +#include <linux/kernel.h>
> +#include <linux/slab.h>
> +#include <linux/uaccess.h>
> +#include <linux/skbuff.h>
> +#include <linux/netdevice.h>
> +#include <linux/in.h>
> +#include <linux/init.h>
> +#include <linux/err.h>
> +
> +#include <net/lwtunnel.h>
> +#include <net/rtnetlink.h>
> +
> +struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
> +{
> +     struct lwtunnel_state *lws;
> +
> +     return kzalloc(sizeof(*lws) + hdr_len, GFP_KERNEL);

This seems to be called with rcu_read_lock so GFP_ATOMIC  would have to
be used. (Judging by patch 3/3’s mpls_build_state and lwtunnel_build_state)

> +}
> +EXPORT_SYMBOL(lwtunnel_state_alloc);
> +
> +const struct lwtunnel_encap_ops __rcu *
> +             lwtun_encaps[MAX_LWTUNNEL_ENCAP_OPS] __read_mostly;
> +
> +int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
> +                        unsigned int num)
> +{
> +     if (num >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return -ERANGE;
> +
> +     return !cmpxchg((const struct lwtunnel_encap_ops **)
> +                     &lwtun_encaps[num],
> +                     NULL, ops) ? 0 : -1;
> +}
> +EXPORT_SYMBOL(lwtunnel_encap_add_ops);
> +
> +int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
> +                        unsigned int num)
> +{
> +     int ret;
> +
> +     if (num >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return -ERANGE;
> +
> +     ret = (cmpxchg((const struct lwtunnel_encap_ops **)
> +                    &lwtun_encaps[num],
> +                    ops, NULL) == ops) ? 0 : -1;
> +
> +     synchronize_net();
> +
> +     return ret;
> +}
> +EXPORT_SYMBOL(lwtunnel_encap_del_ops);
> +
> +int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
> +                      struct nlattr *encap, struct lwtunnel_state **lws)
> +{
> +     const struct lwtunnel_encap_ops *ops;
> +     int ret = -EINVAL;
> +
> +     if (encap_type == LWTUNNEL_ENCAP_NONE ||
> +         encap_type >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return ret;
> +
> +     ret = -EOPNOTSUPP;
> +     rcu_read_lock();
> +     ops = rcu_dereference(lwtun_encaps[encap_type]);
> +     if (likely(ops && ops->build_state))
> +             ret = ops->build_state(dev, encap, lws);
> +     rcu_read_unlock();
> +
> +     return ret;
> +}
> +EXPORT_SYMBOL(lwtunnel_build_state);
> +
> +int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
> +{
> +     const struct lwtunnel_encap_ops *ops;
> +     struct nlattr *nest;
> +     int ret = -EINVAL;
> +
> +     if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
> +         lwtstate->type >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return 0;
> +
> +     ret = -EOPNOTSUPP;
> +     nest = nla_nest_start(skb, RTA_ENCAP);
> +     rcu_read_lock();
> +     ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
> +     if (likely(ops && ops->fill_encap))
> +             ret = ops->fill_encap(skb, lwtstate);
> +     rcu_read_unlock();
> +
> +     if (ret)
> +             goto errout;
> +
> +     nla_nest_end(skb, nest);
> +
> +     return 0;
> +
> +errout:
> +     nla_nest_cancel(skb, nest);
> +
> +     return ret;
> +}
> +EXPORT_SYMBOL(lwtunnel_fill_encap);
> +
> +int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
> +{
> +     const struct lwtunnel_encap_ops *ops;
> +     int ret = 0;
> +
> +     if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
> +         lwtstate->type >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return 0;
> +
> +     rcu_read_lock();
> +     ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
> +     if (likely(ops && ops->get_encap_size))
> +             ret = nla_total_size(ops->get_encap_size(lwtstate));
> +     rcu_read_unlock();
> +
> +     return ret;
> +}
> +EXPORT_SYMBOL(lwtunnel_get_encap_size);
> +
> +int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
> +{
> +     const struct lwtunnel_encap_ops *ops;
> +     struct lwtunnel_state *lwtstate = lwtunnel_skb_lwstate(skb);
> +     int ret = 0;
> +
> +     if (!lwtstate)
> +             return -EINVAL;
> +
> +     if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
> +         lwtstate->type >= MAX_LWTUNNEL_ENCAP_OPS)
> +             return 0;
> +
> +     rcu_read_lock();
> +     ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
> +     if (likely(ops && ops->output))
> +             ret = ops->output(sk, skb);
> +     rcu_read_unlock();
> +
> +     return ret;
> +}
> +EXPORT_SYMBOL(lwtunnel_output);
> -- 
> 1.7.10.4
> 
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe netdev" in

Reply via email to