Module Name: src Committed By: msaitoh Date: Thu Mar 19 14:22:23 UTC 2015
Modified Files: src/sys/dev/pci/ixgbe: ixgbe.c ixv.c Log Message: Sync ixg(4) up to FreeBSD r230572. - Fix in the interrupt handler to make sure the stack TX queue is processed. (FreeBSD r222588) - The maximum read size of incoming packets is done in 1024-byte increments. The current code was rounding down the maximum frame size instead of routing up, resulting in a read size of 1024 bytes, in the non-jumbo frame case, and splitting the packets across multiple mbufs. (FreeBSD r225045) - Consequently the above problem exposed another issue, which is when packets were splitted across multiple mbufs, and all of the mbufs in the chain have the M_PKTHDR flag set. (FreeBSD r225045) - Use the correct constant for conversion between interrupt rate and EITR values (the previous values were off by a factor of 2) (FreeBSD r230572) - Make dev.ix.N.queueM.interrupt_rate a RW sysctl variable. Changing individual values affects the queue immediately, and propagates to all interfaces at the next reinit. (FreeBSD r230572) - Add dev.ix.N.queueM.irqs rdonly sysctl, to export the actual interrupt counts. (FreeBSD r230572) - Some netmap related changes. To generate a diff of this commit: cvs rdiff -u -r1.21 -r1.22 src/sys/dev/pci/ixgbe/ixgbe.c cvs rdiff -u -r1.3 -r1.4 src/sys/dev/pci/ixgbe/ixv.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/dev/pci/ixgbe/ixgbe.c diff -u src/sys/dev/pci/ixgbe/ixgbe.c:1.21 src/sys/dev/pci/ixgbe/ixgbe.c:1.22 --- src/sys/dev/pci/ixgbe/ixgbe.c:1.21 Tue Feb 24 14:49:28 2015 +++ src/sys/dev/pci/ixgbe/ixgbe.c Thu Mar 19 14:22:23 2015 @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2011, Intel Corporation + Copyright (c) 2001-2013, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -59,9 +59,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.51 2011/04/25 23:34:21 jfv Exp $*/ -/*$NetBSD: ixgbe.c,v 1.21 2015/02/24 14:49:28 msaitoh Exp $*/ +/*$NetBSD: ixgbe.c,v 1.22 2015/03/19 14:22:23 msaitoh Exp $*/ #include "opt_inet.h" +#include "opt_inet6.h" #include "ixgbe.h" @@ -73,7 +74,7 @@ int ixgbe_display_debug_stat /********************************************************************* * Driver version *********************************************************************/ -char ixgbe_driver_version[] = "2.3.10"; +char ixgbe_driver_version[] = "2.3.11"; /********************************************************************* * PCI Device ID Table @@ -269,7 +270,7 @@ static int ixgbe_enable_aim = TRUE; #define TUNABLE_INT(__x, __y) TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim); -static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY); +static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate); /* How many packets rxeof tries to clean at a time */ @@ -302,7 +303,7 @@ TUNABLE_INT("hw.ixgbe.enable_msix", &ixg * it can be a performance win in some workloads, but * in others it actually hurts, its off by default. */ -static bool ixgbe_header_split = FALSE; +static int ixgbe_header_split = FALSE; TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split); #if defined(NETBSD_MSI_OR_MSIX) @@ -352,6 +353,18 @@ static int atr_sample_rate = 20; static int fdir_pballoc = 1; #endif +#ifdef DEV_NETMAP +/* + * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to + * be a reference on how to implement netmap support in a driver. + * Additional comments are in ixgbe_netmap.h . + * + * <dev/netma/ixgbe_netmap.h> contains functions for netmap support + * that extend the standard driver. + */ +#include <dev/netmap/ixgbe_netmap.h> +#endif /* DEV_NETMAP */ + /********************************************************************* * Device identification routine * @@ -649,6 +662,9 @@ ixgbe_attach(device_t parent, device_t d ixgbe_add_hw_stats(adapter); +#ifdef DEV_NETMAP + ixgbe_netmap_attach(adapter); +#endif /* DEV_NETMAP */ INIT_DEBUGOUT("ixgbe_attach: end"); return; err_late: @@ -719,6 +735,9 @@ ixgbe_detach(device_t dev, int flags) ether_ifdetach(adapter->ifp); callout_halt(&adapter->timer, NULL); +#ifdef DEV_NETMAP + netmap_detach(adapter->ifp); +#endif /* DEV_NETMAP */ ixgbe_free_pci_resources(adapter); #if 0 /* XXX the NetBSD port is probably missing something here */ bus_generic_detach(dev); @@ -1291,6 +1310,31 @@ ixgbe_init_locked(struct adapter *adapte msec_delay(1); } wmb(); +#ifdef DEV_NETMAP + /* + * In netmap mode, we must preserve the buffers made + * available to userspace before the if_init() + * (this is true by default on the TX side, because + * init makes all buffers available to userspace). + * + * netmap_reset() and the device specific routines + * (e.g. ixgbe_setup_receive_rings()) map these + * buffers at the end of the NIC ring, so here we + * must set the RDT (tail) register to make sure + * they are not overwritten. + * + * In this driver the NIC ring starts at RDH = 0, + * RDT points to the last slot available for reception (?), + * so RDT = num_rx_desc - 1 means the whole ring is available. + */ + if (ifp->if_capenable & IFCAP_NETMAP) { + struct netmap_adapter *na = NA(adapter->ifp); + struct netmap_kring *kring = &na->rx_rings[i]; + int t = na->num_rx_desc - 1 - kring->nr_hwavail; + + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t); + } else +#endif /* DEV_NETMAP */ IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1); } @@ -1536,7 +1580,7 @@ ixgbe_legacy_irq(void *arg) #if defined(NETBSD_MSI_OR_MSIX) /********************************************************************* * - * MSI Queue Interrupt Service routine + * MSIX Queue Interrupt Service routine * **********************************************************************/ void @@ -1555,6 +1599,17 @@ ixgbe_msix_que(void *arg) IXGBE_TX_LOCK(txr); more_tx = ixgbe_txeof(txr); + /* + ** Make certain that if the stack + ** has anything queued the task gets + ** scheduled to handle it. + */ +#if __FreeBSD_version < 800000 + if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) +#else + if (!drbr_empty(adapter->ifp, txr->br)) +#endif + more_tx = 1; IXGBE_TX_UNLOCK(txr); /* Do AIM now? */ @@ -1891,11 +1946,7 @@ ixgbe_xmit(struct tx_ring *txr, struct m txr->next_avail_desc = i; txbuf->m_head = m_head; - /* We exchange the maps instead of copying because otherwise - * we end up with many pointers to the same map and we free - * one map twice in ixgbe_free_transmit_structures(). Who - * knows what other problems this caused. --dyoung - */ + /* Swap the dma map between the first and last descriptor */ txr->tx_buffers[first].map = txbuf->map; txbuf->map = map; bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, @@ -2497,7 +2548,9 @@ ixgbe_setup_msix(struct adapter *adapter msi: msgs = pci_msi_count(dev); if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) - device_printf(adapter->dev,"Using MSI interrupt\n"); + device_printf(adapter->dev,"Using an MSI interrupt\n"); + else + device_printf(adapter->dev,"Using a Legacy interrupt\n"); return (msgs); #endif } @@ -2635,7 +2688,6 @@ ixgbe_setup_interface(device_t dev, stru ifp = adapter->ifp = &ec->ec_if; strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); - ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = ixgbe_init; ifp->if_stop = ixgbe_ifstop; @@ -2665,19 +2717,20 @@ ixgbe_setup_interface(device_t dev, stru ifp->if_capenable = 0; ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM; - ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; ec->ec_capabilities |= ETHERCAP_JUMBO_MTU; + ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING + | ETHERCAP_VLAN_MTU; ec->ec_capenable = ec->ec_capabilities; /* Don't enable LRO by default */ ifp->if_capabilities |= IFCAP_LRO; /* - ** Dont turn this on by default, if vlans are + ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If - ** using vlans directly on the em driver you can + ** using vlans directly on the ixgbe driver you can ** enable this and get full hardware tag filtering. */ ec->ec_capabilities |= ETHERCAP_VLAN_HWFILTER; @@ -3052,9 +3105,20 @@ ixgbe_setup_transmit_ring(struct tx_ring struct adapter *adapter = txr->adapter; struct ixgbe_tx_buf *txbuf; int i; +#ifdef DEV_NETMAP + struct netmap_adapter *na = NA(adapter->ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ /* Clear the old ring contents */ IXGBE_TX_LOCK(txr); +#ifdef DEV_NETMAP + /* + * (under lock): if in netmap mode, do some consistency + * checks and set slot to entry 0 of the netmap ring. + */ + slot = netmap_reset(na, NR_TX, txr->me, 0); +#endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ @@ -3072,6 +3136,25 @@ ixgbe_setup_transmit_ring(struct tx_ring m_freem(txbuf->m_head); txbuf->m_head = NULL; } +#ifdef DEV_NETMAP + /* + * In netmap mode, set the map for the packet buffer. + * NOTE: Some drivers (not this one) also need to set + * the physical buffer address in the NIC ring. + * Slots in the netmap ring (indexed by "si") are + * kring->nkr_hwofs positions "ahead" wrt the + * corresponding slot in the NIC ring. In some drivers + * (not here) nkr_hwofs can be negative. When computing + * si = i + kring->nkr_hwofs make sure to handle wraparounds. + */ + if (slot) { + int si = i + na->tx_rings[txr->me].nkr_hwofs; + + if (si >= na->num_tx_desc) + si -= na->num_tx_desc; + netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si)); + } +#endif /* DEV_NETMAP */ /* Clear the EOP index */ txbuf->eop_index = -1; } @@ -3549,6 +3632,48 @@ ixgbe_txeof(struct tx_ring *txr) KASSERT(mutex_owned(&txr->tx_mtx)); +#ifdef DEV_NETMAP + if (ifp->if_capenable & IFCAP_NETMAP) { + struct netmap_adapter *na = NA(ifp); + struct netmap_kring *kring = &na->tx_rings[txr->me]; + + tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD); + /* + * In netmap mode, all the work is done in the context + * of the client thread. Interrupt handlers only wake up + * clients, which may be sleeping on individual rings + * or on a global resource for all rings. + * To implement tx interrupt mitigation, we wake up the client + * thread roughly every half ring, even if the NIC interrupts + * more frequently. This is implemented as follows: + * - ixgbe_txsync() sets kring->nr_kflags with the index of + * the slot that should wake up the thread (nkr_num_slots + * means the user thread should not be woken up); + * - the driver ignores tx interrupts unless netmap_mitigate=0 + * or the slot has the DD bit set. + * + * When the driver has separate locks, we need to + * release and re-acquire txlock to avoid deadlocks. + * XXX see if we can find a better way. + */ + if (!netmap_mitigate || + (kring->nr_kflags < kring->nkr_num_slots && + tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) { + kring->nr_kflags = kring->nkr_num_slots; + selwakeuppri(&na->tx_rings[txr->me].si, PI_NET); + IXGBE_TX_UNLOCK(txr); + IXGBE_CORE_LOCK(adapter); + selwakeuppri(&na->tx_rings[na->num_queues + 1].si, PI_NET); + IXGBE_CORE_UNLOCK(adapter); + IXGBE_TX_LOCK(txr); + } + return FALSE; + } +#endif /* DEV_NETMAP */ + if (txr->tx_avail == adapter->num_tx_desc) { txr->queue_status = IXGBE_QUEUE_IDLE; return false; @@ -3928,12 +4053,20 @@ ixgbe_setup_receive_ring(struct rx_ring struct lro_ctrl *lro = &rxr->lro; #endif /* LRO */ int rsize, error = 0; +#ifdef DEV_NETMAP + struct netmap_adapter *na = NA(rxr->adapter->ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ adapter = rxr->adapter; ifp = adapter->ifp; /* Clear the ring contents */ IXGBE_RX_LOCK(rxr); +#ifdef DEV_NETMAP + /* same as in ixgbe_setup_transmit_ring() */ + slot = netmap_reset(na, NR_RX, rxr->me, 0); +#endif /* DEV_NETMAP */ rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); @@ -3960,6 +4093,28 @@ ixgbe_setup_receive_ring(struct rx_ring struct mbuf *mh, *mp; rxbuf = &rxr->rx_buffers[j]; +#ifdef DEV_NETMAP + /* + * In netmap mode, fill the map and set the buffer + * address in the NIC ring, considering the offset + * between the netmap and NIC rings (see comment in + * ixgbe_setup_transmit_ring() ). No need to allocate + * an mbuf, so end the block with a continue; + */ + if (slot) { + int sj = j + na->rx_rings[rxr->me].nkr_hwofs; + uint64_t paddr; + void *addr; + + if (sj >= na->num_rx_desc) + sj -= na->num_rx_desc; + addr = PNMB(slot + sj, &paddr); + netmap_load_map(rxr->ptag, rxbuf->pmap, addr); + /* Update descriptor */ + rxr->rx_base[j].read.pkt_addr = htole64(paddr); + continue; + } +#endif /* DEV_NETMAP */ /* ** Don't allocate mbufs if not ** doing header split, its wasteful @@ -4091,6 +4246,8 @@ fail: **********************************************************************/ #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 +#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) + static void ixgbe_initialize_receive_units(struct adapter *adapter) { @@ -4125,7 +4282,7 @@ ixgbe_initialize_receive_units(struct ad hlreg &= ~IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); - bufsz = adapter->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; for (i = 0; i < adapter->num_queues; i++, rxr++) { u64 rdba = rxr->rxdma.dma_paddr; @@ -4410,6 +4567,25 @@ ixgbe_rxeof(struct ix_queue *que, int co IXGBE_RX_LOCK(rxr); +#ifdef DEV_NETMAP + if (ifp->if_capenable & IFCAP_NETMAP) { + /* + * Same as the txeof routine: only wakeup clients on intr. + * NKR_PENDINTR in nr_kflags is used to implement interrupt + * mitigation (ixgbe_rxsync() will not look for new packets + * unless NKR_PENDINTR is set). + */ + struct netmap_adapter *na = NA(ifp); + + na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR; + selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET); + IXGBE_RX_UNLOCK(rxr); + IXGBE_CORE_LOCK(adapter); + selwakeuppri(&na->rx_rings[na->num_queues + 1].si, PI_NET); + IXGBE_CORE_UNLOCK(adapter); + return (FALSE); + } +#endif /* DEV_NETMAP */ for (i = rxr->next_to_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; u32 rsc, ptype; @@ -4573,9 +4749,10 @@ ixgbe_rxeof(struct ix_queue *que, int co sendmp = rbuf->fmp; rbuf->m_pack = rbuf->fmp = NULL; - if (sendmp != NULL) /* secondary frag */ + if (sendmp != NULL) { /* secondary frag */ + mp->m_flags &= ~M_PKTHDR; sendmp->m_pkthdr.len += mp->m_len; - else { + } else { /* first desc of a non-ps chain */ sendmp = mp; sendmp->m_flags |= M_PKTHDR; @@ -4975,7 +5152,7 @@ ixgbe_configure_ivars(struct adapter *ad u32 newitr; if (ixgbe_max_interrupt_rate > 0) - newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8; + newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; else newitr = 0; @@ -5338,6 +5515,7 @@ ixgbe_sysctl_rdt_handler(SYSCTLFN_ARGS) static int ixgbe_sysctl_interrupt_rate_handler(SYSCTLFN_ARGS) { + int error; struct sysctlnode node; struct ix_queue *que; uint32_t reg, usec, rate; @@ -5349,11 +5527,23 @@ ixgbe_sysctl_interrupt_rate_handler(SYSC reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); usec = ((reg & 0x0FF8) >> 3); if (usec > 0) - rate = 1000000 / usec; + rate = 500000 / usec; else rate = 0; node.sysctl_data = &rate; - return sysctl_lookup(SYSCTLFN_CALL(&node)); + error = sysctl_lookup(SYSCTLFN_CALL(&node)); + if (error) + return error; + reg &= ~0xfff; /* default, no limitation */ + ixgbe_max_interrupt_rate = 0; + if (rate > 0 && rate < 500000) { + if (rate < 1000) + rate = 1000; + ixgbe_max_interrupt_rate = rate; + reg |= ((4000000/rate) & 0xff8 ); + } + IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); + return 0; } const struct sysctlnode * @@ -5462,13 +5652,20 @@ ixgbe_add_hw_stats(struct adapter *adapt break; if (sysctl_createv(log, 0, &rnode, &cnode, - CTLFLAG_READONLY, CTLTYPE_INT, + CTLFLAG_READWRITE, CTLTYPE_INT, "interrupt_rate", SYSCTL_DESCR("Interrupt Rate"), ixgbe_sysctl_interrupt_rate_handler, 0, (void *)&adapter->queues[i], 0, CTL_CREATE, CTL_EOL) != 0) break; if (sysctl_createv(log, 0, &rnode, &cnode, + CTLFLAG_READONLY, CTLTYPE_QUAD, + "irqs", SYSCTL_DESCR("irqs on this queue"), + NULL, 0, &(adapter->queues[i].irqs), + 0, CTL_CREATE, CTL_EOL) != 0) + break; + + if (sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READONLY, CTLTYPE_INT, "txd_head", SYSCTL_DESCR("Transmit Descriptor Head"), ixgbe_sysctl_tdh_handler, 0, (void *)txr, @@ -5769,7 +5966,7 @@ static int ixgbe_set_advertise(SYSCTLFN_ARGS) { struct sysctlnode node; - int t, error; + int t, error = 0; struct adapter *adapter; struct ixgbe_hw *hw; ixgbe_link_speed speed, last; Index: src/sys/dev/pci/ixgbe/ixv.c diff -u src/sys/dev/pci/ixgbe/ixv.c:1.3 src/sys/dev/pci/ixgbe/ixv.c:1.4 --- src/sys/dev/pci/ixgbe/ixv.c:1.3 Tue Mar 10 09:26:49 2015 +++ src/sys/dev/pci/ixgbe/ixv.c Thu Mar 19 14:22:23 2015 @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2001-2010, Intel Corporation + Copyright (c) 2001-2011, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,16 +31,17 @@ ******************************************************************************/ /*$FreeBSD: src/sys/dev/ixgbe/ixv.c,v 1.2 2011/03/23 13:10:15 jhb Exp $*/ -/*$NetBSD: ixv.c,v 1.3 2015/03/10 09:26:49 msaitoh Exp $*/ +/*$NetBSD: ixv.c,v 1.4 2015/03/19 14:22:23 msaitoh Exp $*/ #include "opt_inet.h" +#include "opt_inet6.h" #include "ixv.h" /********************************************************************* * Driver version *********************************************************************/ -char ixv_driver_version[] = "1.0.0"; +char ixv_driver_version[] = "1.0.1"; /********************************************************************* * PCI Device ID Table @@ -223,7 +224,7 @@ TUNABLE_INT("hw.ixv.flow_control", &ixv_ * it can be a performance win in some workloads, but * in others it actually hurts, its off by default. */ -static bool ixv_header_split = FALSE; +static int ixv_header_split = FALSE; TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); /* @@ -254,7 +255,7 @@ static int ixv_total_ports; * ixv_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * - * return 0 on success, positive on failure + * return 1 on success, 0 on failure *********************************************************************/ static int @@ -1355,11 +1356,8 @@ ixv_xmit(struct tx_ring *txr, struct mbu txr->next_avail_desc = i; txbuf->m_head = m_head; - /* We exchange the maps instead of copying because otherwise - * we end up with many pointers to the same map and we free - * one map twice in ixgbe_free_transmit_structures(). Who - * knows what other problems this caused. --dyoung - */ + /* Swap the dma map between the first and last descriptor */ + txr->tx_buffers[first].map = txbuf->map; txbuf->map = map; bus_dmamap_sync(txr->txtag->dt_dmat, map, 0, m_head->m_pkthdr.len, BUS_DMASYNC_PREWRITE); @@ -1918,7 +1916,6 @@ ixv_setup_interface(device_t dev, struct ifp = adapter->ifp = &ec->ec_if; strlcpy(ifp->if_xname, device_xname(dev), IFNAMSIZ); - ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = ixv_init; ifp->if_stop = ixv_ifstop; @@ -1949,8 +1946,9 @@ ixv_setup_interface(device_t dev, struct ifp->if_capenable = 0; ec->ec_capabilities |= ETHERCAP_VLAN_HWCSUM; - ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; ec->ec_capabilities |= ETHERCAP_JUMBO_MTU; + ec->ec_capabilities |= ETHERCAP_VLAN_HWTAGGING + | ETHERCAP_VLAN_MTU; ec->ec_capenable = ec->ec_capabilities; /* Don't enable LRO by default */