Hi Jon,

Please see my comments below.

On 03/27/2016 04:05 AM, Jon Maloy wrote:
> When sending packets through the bearer layer we always test for the
> presence of the bearer before we send the packets. However, we do
> not free the buffers in the case there is no bearer available.
> We have now discovered that we lose exactly one buffer in the function
> tipc_bearer_xmit_skb() every time a bearer is enabled.
> 

In my opinion, if we found there is a skb leak issue, I think we should first
look into its root cause before we propose any alternative solution. Otherwise,
I am afraid that we are still unable to completely solve the issue with the 
commit.

Regards,
Ying

> In this commit, we release the buffers or buffer chains in all the
> three send functions if no bearer can be found.
> 
> Signed-off-by: Jon Maloy <jon.ma...@ericsson.com>
> ---
>  net/tipc/bearer.c | 45 ++++++++++++++++++++++-----------------------
>  1 file changed, 22 insertions(+), 23 deletions(-)
> 
> diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
> index 27a5406..2c66013 100644
> --- a/net/tipc/bearer.c
> +++ b/net/tipc/bearer.c
> @@ -450,6 +450,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
>       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
>       if (likely(b))
>               b->media->send_msg(net, skb, b, dest);
> +     else
> +             kfree_skb(skb);
>       rcu_read_unlock();
>  }
>  
> @@ -468,11 +470,11 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
>  
>       rcu_read_lock();
>       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
> -     if (likely(b)) {
> -             skb_queue_walk_safe(xmitq, skb, tmp) {
> -                     __skb_dequeue(xmitq);
> -                     b->media->send_msg(net, skb, b, dst);
> -             }
> +     if (unlikely(!b))
> +             __skb_queue_purge(xmitq);
> +     skb_queue_walk_safe(xmitq, skb, tmp) {
> +             __skb_dequeue(xmitq);
> +             b->media->send_msg(net, skb, b, dst);
>       }
>       rcu_read_unlock();
>  }
> @@ -490,14 +492,14 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
>  
>       rcu_read_lock();
>       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
> -     if (likely(b)) {
> -             skb_queue_walk_safe(xmitq, skb, tmp) {
> -                     hdr = buf_msg(skb);
> -                     msg_set_non_seq(hdr, 1);
> -                     msg_set_mc_netid(hdr, net_id);
> -                     __skb_dequeue(xmitq);
> -                     b->media->send_msg(net, skb, b, &b->bcast_addr);
> -             }
> +     if (unlikely(!b))
> +             __skb_queue_purge(xmitq);
> +     skb_queue_walk_safe(xmitq, skb, tmp) {
> +             hdr = buf_msg(skb);
> +             msg_set_non_seq(hdr, 1);
> +             msg_set_mc_netid(hdr, net_id);
> +             __skb_dequeue(xmitq);
> +             b->media->send_msg(net, skb, b, &b->bcast_addr);
>       }
>       rcu_read_unlock();
>  }
> @@ -513,24 +515,21 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
>   * ignores packets sent using interface multicast, and traffic sent to other
>   * nodes (which can happen if interface is running in promiscuous mode).
>   */
> -static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
> +static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
>                          struct packet_type *pt, struct net_device *orig_dev)
>  {
>       struct tipc_bearer *b;
>  
>       rcu_read_lock();
>       b = rcu_dereference_rtnl(dev->tipc_ptr);
> -     if (likely(b)) {
> -             if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
> -                     buf->next = NULL;
> -                     tipc_rcv(dev_net(dev), buf, b);
> -                     rcu_read_unlock();
> -                     return NET_RX_SUCCESS;
> -             }
> +     if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) {
> +             skb->next = NULL;
> +             tipc_rcv(dev_net(dev), skb, b);
> +             rcu_read_unlock();
> +             return NET_RX_SUCCESS;
>       }
>       rcu_read_unlock();
> -
> -     kfree_skb(buf);
> +     kfree_skb(skb);
>       return NET_RX_DROP;
>  }
>  
> 


------------------------------------------------------------------------------
Transform Data into Opportunity.
Accelerate data analysis in your applications with
Intel Data Analytics Acceleration Library.
Click to learn more.
http://pubads.g.doubleclick.net/gampad/clk?id=278785471&iu=/4140
_______________________________________________
tipc-discussion mailing list
tipc-discussion@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/tipc-discussion

Reply via email to