Packets ingressing on a LAG that egress on the CPU port, which are not
classified as management, will have a FORWARD tag that does not
contain the normal source device/port tuple. Instead the trunk bit
will be set, and the port field holds the LAG id.

Since the exact source port information is not available in the tag,
frames are injected directly on the LAG interface and thus do never
pass through any DSA port interface on ingress.

Management frames (TO_CPU) are not affected and will pass through the
DSA port interface as usual.

Signed-off-by: Tobias Waldekranz <tob...@waldekranz.com>
---
 net/dsa/dsa.c      | 23 +++++++++++++----------
 net/dsa/tag_edsa.c | 12 +++++++++++-
 2 files changed, 24 insertions(+), 11 deletions(-)

diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 2131bf2b3a67..b84e5f0be049 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -220,7 +220,6 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct 
net_device *dev,
        }
 
        skb = nskb;
-       p = netdev_priv(skb->dev);
        skb_push(skb, ETH_HLEN);
        skb->pkt_type = PACKET_HOST;
        skb->protocol = eth_type_trans(skb, skb->dev);
@@ -234,17 +233,21 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct 
net_device *dev,
                skb = nskb;
        }
 
-       s = this_cpu_ptr(p->stats64);
-       u64_stats_update_begin(&s->syncp);
-       s->rx_packets++;
-       s->rx_bytes += skb->len;
-       u64_stats_update_end(&s->syncp);
+       if (dsa_slave_dev_check(skb->dev)) {
+               p = netdev_priv(skb->dev);
+               s = this_cpu_ptr(p->stats64);
+               u64_stats_update_begin(&s->syncp);
+               s->rx_packets++;
+               s->rx_bytes += skb->len;
+               u64_stats_update_end(&s->syncp);
 
-       if (dsa_skb_defer_rx_timestamp(p, skb))
-               return 0;
-
-       gro_cells_receive(&p->gcells, skb);
+               if (dsa_skb_defer_rx_timestamp(p, skb))
+                       return 0;
 
+               gro_cells_receive(&p->gcells, skb);
+       } else {
+               netif_rx(skb);
+       }
        return 0;
 }
 
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 120614240319..800b02f04394 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -86,6 +86,7 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct 
net_device *dev)
 static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                                struct packet_type *pt)
 {
+       bool trunk = false;
        u8 *edsa_header;
        int frame_type;
        int code;
@@ -120,6 +121,7 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct 
net_device *dev,
                break;
 
        case FRAME_TYPE_FORWARD:
+               trunk = !!(edsa_header[1] & 7);
                skb->offload_fwd_mark = 1;
                break;
 
@@ -133,7 +135,15 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, 
struct net_device *dev,
        source_device = edsa_header[0] & 0x1f;
        source_port = (edsa_header[1] >> 3) & 0x1f;
 
-       skb->dev = dsa_master_find_slave(dev, source_device, source_port);
+       if (trunk) {
+               struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+               skb->dev = dsa_lag_dev_by_id(cpu_dp->dst, source_port);
+       } else {
+               skb->dev = dsa_master_find_slave(dev, source_device,
+                                                source_port);
+       }
+
        if (!skb->dev)
                return NULL;
 
-- 
2.17.1

Reply via email to