Module Name:    src
Committed By:   msaitoh
Date:           Fri Dec  2 10:21:43 UTC 2016

Modified Files:
        src/sys/dev/pci/ixgbe: ix_txrx.c ixgbe.c ixgbe.h ixgbe_mbx.h ixgbe_vf.c
            ixv.c

Log Message:
Update ixg(4) and ixv(4) up to FreeBSD r283881:
- SRIOV support (not enabled because NetBSD doesn't support it).


To generate a diff of this commit:
cvs rdiff -u -r1.4 -r1.5 src/sys/dev/pci/ixgbe/ix_txrx.c
cvs rdiff -u -r1.44 -r1.45 src/sys/dev/pci/ixgbe/ixgbe.c
cvs rdiff -u -r1.12 -r1.13 src/sys/dev/pci/ixgbe/ixgbe.h
cvs rdiff -u -r1.6 -r1.7 src/sys/dev/pci/ixgbe/ixgbe_mbx.h
cvs rdiff -u -r1.8 -r1.9 src/sys/dev/pci/ixgbe/ixgbe_vf.c
cvs rdiff -u -r1.22 -r1.23 src/sys/dev/pci/ixgbe/ixv.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/ixgbe/ix_txrx.c
diff -u src/sys/dev/pci/ixgbe/ix_txrx.c:1.4 src/sys/dev/pci/ixgbe/ix_txrx.c:1.5
--- src/sys/dev/pci/ixgbe/ix_txrx.c:1.4	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ix_txrx.c	Fri Dec  2 10:21:43 2016
@@ -58,8 +58,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-/*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 282289 2015-04-30 22:53:27Z erj $*/
-/*$NetBSD: ix_txrx.c,v 1.4 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ix_txrx.c 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ix_txrx.c,v 1.5 2016/12/02 10:21:43 msaitoh Exp $*/
 
 #include "ixgbe.h"
 
@@ -614,7 +614,6 @@ ixgbe_setup_transmit_ring(struct tx_ring
 {
 	struct adapter *adapter = txr->adapter;
 	struct ixgbe_tx_buf *txbuf;
-	int i;
 #ifdef DEV_NETMAP
 	struct netmap_adapter *na = NA(adapter->ifp);
 	struct netmap_slot *slot;
@@ -637,7 +636,7 @@ ixgbe_setup_transmit_ring(struct tx_ring
 
 	/* Free any existing tx buffers. */
         txbuf = txr->tx_buffers;
-	for (i = 0; i < txr->num_desc; i++, txbuf++) {
+	for (int i = 0; i < txr->num_desc; i++, txbuf++) {
 		if (txbuf->m_head != NULL) {
 			bus_dmamap_sync(txr->txtag->dt_dmat, txbuf->map,
 			    0, txbuf->m_head->m_pkthdr.len,
@@ -659,7 +658,8 @@ ixgbe_setup_transmit_ring(struct tx_ring
 		 */
 		if (slot) {
 			int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
-			netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
+			netmap_load_map(na, txr->txtag,
+			    txbuf->map, NMB(na, slot + si));
 		}
 #endif /* DEV_NETMAP */
 		/* Clear the EOP descriptor pointer */
@@ -812,8 +812,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr, 
 	if ((mtag = VLAN_OUTPUT_TAG(ec, mp)) != NULL) {
 		vtag = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
-	}
-	else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
+	} else if (!IXGBE_IS_X550VF(adapter) && (offload == FALSE))
 		return (0);
 
 	/*
@@ -1415,7 +1414,7 @@ ixgbe_allocate_receive_buffers(struct rx
 	struct	adapter 	*adapter = rxr->adapter;
 	device_t 		dev = adapter->dev;
 	struct ixgbe_rx_buf 	*rxbuf;
-	int             	i, bsize, error;
+	int             	bsize, error;
 
 	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
 	if (!(rxr->rx_buffers =
@@ -1437,7 +1436,7 @@ ixgbe_allocate_receive_buffers(struct rx
 		goto fail;
 	}
 
-	for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
+	for (int i = 0; i < rxr->num_desc; i++, rxbuf++) {
 		rxbuf = &rxr->rx_buffers[i];
 		error = ixgbe_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
 		if (error) {
@@ -1459,9 +1458,8 @@ static void     
 ixgbe_free_receive_ring(struct rx_ring *rxr)
 { 
 	struct ixgbe_rx_buf       *rxbuf;
-	int i;
 
-	for (i = 0; i < rxr->num_desc; i++) {
+	for (int i = 0; i < rxr->num_desc; i++) {
 		rxbuf = &rxr->rx_buffers[i];
 		if (rxbuf->buf != NULL) {
 			bus_dmamap_sync(rxr->ptag->dt_dmat, rxbuf->pmap,
@@ -2231,6 +2229,9 @@ ixgbe_allocate_queues(struct adapter *ad
 	struct rx_ring	*rxr;
 	int rsize, tsize, error = IXGBE_SUCCESS;
 	int txconf = 0, rxconf = 0;
+#ifdef PCI_IOV
+	enum ixgbe_iov_mode iov_mode;
+#endif
 
         /* First allocate the top level queue structs */
         if (!(adapter->queues =
@@ -2263,6 +2264,12 @@ ixgbe_allocate_queues(struct adapter *ad
 	tsize = roundup2(adapter->num_tx_desc *
 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
 
+#ifdef PCI_IOV
+	iov_mode = ixgbe_get_iov_mode(adapter);
+	adapter->pool = ixgbe_max_vfs(iov_mode);
+#else
+	adapter->pool = 0;
+#endif
 	/*
 	 * Now set up the TX queues, txconf is needed to handle the
 	 * possibility that things fail midcourse and we need to
@@ -2272,7 +2279,11 @@ ixgbe_allocate_queues(struct adapter *ad
 		/* Set up some basics */
 		txr = &adapter->tx_rings[i];
 		txr->adapter = adapter;
+#ifdef PCI_IOV
+		txr->me = ixgbe_pf_que_index(iov_mode, i);
+#else
 		txr->me = i;
+#endif
 		txr->num_desc = adapter->num_tx_desc;
 
 		/* Initialize the TX side lock */
@@ -2319,7 +2330,11 @@ ixgbe_allocate_queues(struct adapter *ad
 		rxr = &adapter->rx_rings[i];
 		/* Set up some basics */
 		rxr->adapter = adapter;
+#ifdef PCI_IOV
+		rxr->me = ixgbe_pf_que_index(iov_mode, i);
+#else
 		rxr->me = i;
+#endif
 		rxr->num_desc = adapter->num_rx_desc;
 
 		/* Initialize the RX side lock */

Index: src/sys/dev/pci/ixgbe/ixgbe.c
diff -u src/sys/dev/pci/ixgbe/ixgbe.c:1.44 src/sys/dev/pci/ixgbe/ixgbe.c:1.45
--- src/sys/dev/pci/ixgbe/ixgbe.c:1.44	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ixgbe.c	Fri Dec  2 10:21:43 2016
@@ -58,8 +58,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 282299 2015-05-01 12:10:36Z bz $*/
-/*$NetBSD: ixgbe.c,v 1.44 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/if_ix.c 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ixgbe.c,v 1.45 2016/12/02 10:21:43 msaitoh Exp $*/
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -77,7 +77,7 @@ int             ixgbe_display_debug_stat
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixgbe_driver_version[] = "2.8.3";
+char ixgbe_driver_version[] = "3.1.0";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -164,6 +164,7 @@ static int	ixgbe_setup_msix(struct adapt
 static void	ixgbe_free_pci_resources(struct adapter *);
 static void	ixgbe_local_timer(void *);
 static int	ixgbe_setup_interface(device_t, struct adapter *);
+static void	ixgbe_config_gpie(struct adapter *);
 static void	ixgbe_config_dmac(struct adapter *);
 static void	ixgbe_config_delay_values(struct adapter *);
 static void	ixgbe_config_link(struct adapter *);
@@ -235,6 +236,18 @@ static ixgbe_vendor_info_t *ixgbe_lookup
 static void	ixgbe_reinit_fdir(void *, int);
 #endif
 
+#ifdef PCI_IOV
+static void	ixgbe_ping_all_vfs(struct adapter *);
+static void	ixgbe_handle_mbx(void *, int);
+static int	ixgbe_init_iov(device_t, u16, const nvlist_t *);
+static void	ixgbe_uninit_iov(device_t);
+static int	ixgbe_add_vf(device_t, u16, const nvlist_t *);
+static void	ixgbe_initialize_iov(struct adapter *);
+static void	ixgbe_recalculate_max_frame(struct adapter *);
+static void	ixgbe_init_vf(struct adapter *, struct ixgbe_vf *);
+#endif /* PCI_IOV */
+
+
 /*********************************************************************
  *  FreeBSD Device Interface Entry Points
  *********************************************************************/
@@ -486,7 +499,7 @@ ixgbe_attach(device_t parent, device_t d
 	}
 
 	/* Allocate multicast array memory. */
-	adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+	adapter->mta = malloc(sizeof(*adapter->mta) *
 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
 	if (adapter->mta == NULL) {
 		aprint_error_dev(dev, "Cannot allocate multicast setup array\n");
@@ -568,6 +581,28 @@ ixgbe_attach(device_t parent, device_t d
 	/* Set an initial default flow control value */
 	adapter->fc = ixgbe_fc_full;
 
+#ifdef PCI_IOV
+	if ((hw->mac.type != ixgbe_mac_82598EB) && (adapter->msix > 1)) {
+		nvlist_t *pf_schema, *vf_schema;
+
+		hw->mbx.ops.init_params(hw);
+		pf_schema = pci_iov_schema_alloc_node();
+		vf_schema = pci_iov_schema_alloc_node();
+		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+		    IOV_SCHEMA_HASDEFAULT, TRUE);
+		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+		error = pci_iov_attach(dev, pf_schema, vf_schema);
+		if (error != 0) {
+			device_printf(dev,
+			    "Error %d setting up SR-IOV\n", error);
+		}
+	}
+#endif /* PCI_IOV */
+
 	/* Check for certain supported features */
 	ixgbe_check_wol_support(adapter);
 	ixgbe_check_eee_support(adapter);
@@ -642,6 +677,13 @@ ixgbe_detach(device_t dev, int flags)
 	}
 #endif
 
+#ifdef PCI_IOV
+	if (pci_iov_detach(dev) != 0) {
+		device_printf(dev, "SR-IOV in use; detach first.\n");
+		return (EBUSY);
+	}
+#endif /* PCI_IOV */
+
 	/* Stop the adapter */
 	IXGBE_CORE_LOCK(adapter);
 	ixgbe_setup_low_power_mode(adapter);
@@ -658,6 +700,9 @@ ixgbe_detach(device_t dev, int flags)
 	softint_disestablish(adapter->link_si);
 	softint_disestablish(adapter->mod_si);
 	softint_disestablish(adapter->msf_si);
+#ifdef PCI_IOV
+	softint_disestablish(adapter->mbx_si);
+#endif
 	softint_disestablish(adapter->phy_si);
 #ifdef IXGBE_FDIR
 	softint_disestablish(adapter->fdir_si);
@@ -991,6 +1036,9 @@ ixgbe_ioctl(struct ifnet * ifp, u_long c
 		else if (command == SIOCSIFCAP || command == SIOCSIFMTU) {
 			IXGBE_CORE_LOCK(adapter);
 			ixgbe_init_locked(adapter);
+#ifdef PCI_IOV
+			ixgbe_recalculate_max_frame(adapter);
+#endif
 			IXGBE_CORE_UNLOCK(adapter);
 		} else if (command == SIOCADDMULTI || command == SIOCDELMULTI) {
 			/*
@@ -1027,13 +1075,19 @@ ixgbe_init_locked(struct adapter *adapte
 	struct ifnet   *ifp = adapter->ifp;
 	device_t 	dev = adapter->dev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32		k, txdctl, mhadd, gpie;
+	struct tx_ring  *txr;
+	struct rx_ring  *rxr;
+	u32		txdctl, mhadd;
 	u32		rxdctl, rxctrl;
+#ifdef PCI_IOV
+	enum ixgbe_iov_mode mode;
+#endif
 
 	/* XXX check IFF_UP and IFF_RUNNING, power-saving state! */
 
 	KASSERT(mutex_owned(&adapter->core_mtx));
 	INIT_DEBUGOUT("ixgbe_init_locked: begin");
+
 	hw->adapter_stopped = FALSE;
 	ixgbe_stop_adapter(hw);
         callout_stop(&adapter->timer);
@@ -1042,13 +1096,22 @@ ixgbe_init_locked(struct adapter *adapte
 	adapter->max_frame_size =
 		ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
 
+#ifdef PCI_IOV
+	mode = ixgbe_get_iov_mode(adapter);
+	adapter->pool = ixgbe_max_vfs(mode);
+	/* Queue indices may change with IOV mode */
+	for (int i = 0; i < adapter->num_queues; i++) {
+		adapter->rx_rings[i].me = ixgbe_pf_que_index(mode, i);
+		adapter->tx_rings[i].me = ixgbe_pf_que_index(mode, i);
+	}
+#endif
         /* reprogram the RAR[0] in case user changed it. */
-        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV);
 
 	/* Get the latest mac address, User can use a LAA */
-	memcpy(hw->mac.addr, CLLADDR(adapter->ifp->if_sadl),
+	memcpy(hw->mac.addr, CLLADDR(ifp->if_sadl),
 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
-	ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+	ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1);
 	hw->addr_ctrl.rar_used_count = 1;
 
 	/* Prepare transmit descriptors and buffers */
@@ -1059,6 +1122,9 @@ ixgbe_init_locked(struct adapter *adapte
 	}
 
 	ixgbe_init_hw(hw);
+#ifdef PCI_IOV
+	ixgbe_initialize_iov(adapter);
+#endif
 	ixgbe_initialize_transmit_units(adapter);
 
 	/* Setup Multicast table */
@@ -1068,14 +1134,10 @@ ixgbe_init_locked(struct adapter *adapte
 	** Determine the correct mbuf pool
 	** for doing jumbo frames
 	*/
-	if (adapter->max_frame_size <= 2048)
+	if (adapter->max_frame_size <= MCLBYTES)
 		adapter->rx_mbuf_sz = MCLBYTES;
-	else if (adapter->max_frame_size <= 4096)
-		adapter->rx_mbuf_sz = MJUMPAGESIZE;
-	else if (adapter->max_frame_size <= 9216)
-		adapter->rx_mbuf_sz = MJUM9BYTES;
 	else
-		adapter->rx_mbuf_sz = MJUM16BYTES;
+		adapter->rx_mbuf_sz = MJUMPAGESIZE;
 
 	/* Prepare receive descriptors and buffers */
 	if (ixgbe_setup_receive_structures(adapter)) {
@@ -1087,31 +1149,8 @@ ixgbe_init_locked(struct adapter *adapte
 	/* Configure RX settings */
 	ixgbe_initialize_receive_units(adapter);
 
-	gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
-
-	/* Enable Fan Failure Interrupt */
-	gpie |= IXGBE_SDP1_GPIEN_BY_MAC(hw);
-
-	/* Add for Module detection */
-	if (hw->mac.type == ixgbe_mac_82599EB)
-		gpie |= IXGBE_SDP2_GPIEN;
-
-	/*
-	 * Thermal Failure Detection (X540)
-	 * Link Detection (X552)
-	 */
-	if (hw->mac.type == ixgbe_mac_X540 ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
-		gpie |= IXGBE_SDP0_GPIEN_X540;
-
-	if (adapter->msix > 1) {
-		/* Enable Enhanced MSIX mode */
-		gpie |= IXGBE_GPIE_MSIX_MODE;
-		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
-		    IXGBE_GPIE_OCD;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+	/* Enable SDP & MSIX interrupts based on adapter */
+	ixgbe_config_gpie(adapter);
 
 	/* Set MTU size */
 	if (ifp->if_mtu > ETHERMTU) {
@@ -1124,7 +1163,8 @@ ixgbe_init_locked(struct adapter *adapte
 	
 	/* Now enable all the queues */
 	for (int i = 0; i < adapter->num_queues; i++) {
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+		txr = &adapter->tx_rings[i];
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
 		txdctl |= IXGBE_TXDCTL_ENABLE;
 		/* Set WTHRESH to 8, burst writeback */
 		txdctl |= (8 << 16);
@@ -1136,11 +1176,12 @@ ixgbe_init_locked(struct adapter *adapte
 		 * Prefetching enables tx line rate even with 1 queue.
 		 */
 		txdctl |= (32 << 0) | (1 << 8);
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
 	}
 
-	for (int i = 0; i < adapter->num_queues; i++) {
-		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+	for (int i = 0, j = 0; i < adapter->num_queues; i++) {
+		rxr = &adapter->rx_rings[i];
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
 		if (hw->mac.type == ixgbe_mac_82598EB) {
 			/*
 			** PTHRESH = 21
@@ -1151,12 +1192,9 @@ ixgbe_init_locked(struct adapter *adapte
 			rxdctl |= 0x080420;
 		}
 		rxdctl |= IXGBE_RXDCTL_ENABLE;
-		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
-		/* XXX I don't trust this loop, and I don't trust the
-		 * XXX memory barrier.  What is this meant to do? --dyoung
-		 */
-		for (k = 0; k < 10; k++) {
-			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+		IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl);
+		for (; j < 10; j++) {
+			if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) &
 			    IXGBE_RXDCTL_ENABLE)
 				break;
 			else
@@ -1185,10 +1223,10 @@ ixgbe_init_locked(struct adapter *adapte
 			struct netmap_kring *kring = &na->rx_rings[i];
 			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
 
-			IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
+			IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), t);
 		} else
 #endif /* DEV_NETMAP */
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(rxr->me), adapter->num_rx_desc - 1);
 	}
 
 	/* Enable Receive engine */
@@ -1227,9 +1265,9 @@ ixgbe_init_locked(struct adapter *adapte
 #endif
 
 	/*
-	** Check on any SFP devices that
-	** need to be kick-started
-	*/
+	 * Check on any SFP devices that
+	 * need to be kick-started
+	 */
 	if (hw->phy.type == ixgbe_phy_none) {
 		int err = hw->phy.ops.identify(hw);
 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -1243,8 +1281,7 @@ ixgbe_init_locked(struct adapter *adapte
 	IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR);
 
 	/* Configure Energy Efficient Ethernet for supported devices */
-	if (adapter->eee_support)
-		ixgbe_setup_eee(hw, adapter->eee_enabled);
+	ixgbe_setup_eee(hw, adapter->eee_enabled);
 
 	/* Config/Enable Link */
 	ixgbe_config_link(adapter);
@@ -1264,6 +1301,15 @@ ixgbe_init_locked(struct adapter *adapte
 	/* And now turn on interrupts */
 	ixgbe_enable_intr(adapter);
 
+#ifdef PCI_IOV
+	/* Enable the use of the MBX by the VF's */
+	{
+		u32 reg = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+		reg |= IXGBE_CTRL_EXT_PFRSTD;
+		IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, reg);
+	}
+#endif
+
 	/* Now inform the stack we're ready */
 	ifp->if_flags |= IFF_RUNNING;
 
@@ -1282,6 +1328,51 @@ ixgbe_init(struct ifnet *ifp)
 }
 
 static void
+ixgbe_config_gpie(struct adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 gpie;
+
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+	/* Fan Failure Interrupt */
+	if (hw->device_id == IXGBE_DEV_ID_82598AT)
+		gpie |= IXGBE_SDP1_GPIEN;
+
+	/*
+	 * Module detection (SDP2)
+	 * Media ready (SDP1)
+	 */
+	if (hw->mac.type == ixgbe_mac_82599EB) {
+		gpie |= IXGBE_SDP2_GPIEN;
+		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
+			gpie |= IXGBE_SDP1_GPIEN;
+	}
+
+	/*
+	 * Thermal Failure Detection (X540)
+	 * Link Detection (X557)
+	 */
+	if (hw->mac.type == ixgbe_mac_X540 ||
+	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
+		gpie |= IXGBE_SDP0_GPIEN_X540;
+
+	if (adapter->msix > 1) {
+		/* Enable Enhanced MSIX mode */
+		gpie |= IXGBE_GPIE_MSIX_MODE;
+		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+		    IXGBE_GPIE_OCD;
+	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+	return;
+}
+
+/*
+ * Requires adapter->max_frame_size to be set.
+ */
+static void
 ixgbe_config_delay_values(struct adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1451,8 +1542,8 @@ ixgbe_legacy_irq(void *arg)
 	}
 
 	/* Check for fan failure */
-	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-	    (reg_eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) {
+	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
 		    "REPLACE IMMEDIATELY!!\n");
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
@@ -1495,6 +1586,7 @@ ixgbe_msix_que(void *arg)
 	bool		more;
 	u32		newitr = 0;
 
+
 	/* Protect against spurious interrupts */
 	if ((ifp->if_flags & IFF_RUNNING) == 0)
 		return 0;
@@ -1623,6 +1715,10 @@ ixgbe_msix_link(void *arg)
 			device_printf(adapter->dev, "System shutdown required!\n");
 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
 		}
+#ifdef PCI_IOV
+		if (reg_eicr & IXGBE_EICR_MAILBOX)
+			taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
+#endif
 	}
 
 	/* Pluggable optics-related interrupt */
@@ -1688,7 +1784,7 @@ ixgbe_media_status(struct ifnet * ifp, s
 	}
 
 	ifmr->ifm_status |= IFM_ACTIVE;
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer;
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
@@ -1913,20 +2009,19 @@ ixgbe_set_promisc(struct adapter *adapte
 static void
 ixgbe_set_multi(struct adapter *adapter)
 {
-	struct ether_multi *enm;
-	struct ether_multistep step;
-	u32	fctrl;
-	u8	*mta;
-	u8	*update_ptr;
-	int	mcnt = 0;
-	struct ethercom *ec = &adapter->osdep.ec;
-	struct ifnet   *ifp = adapter->ifp;
+	u32			fctrl;
+	u8			*update_ptr;
+	struct ixgbe_mc_addr	*mta;
+	int			mcnt = 0;
+	struct ifnet		*ifp = adapter->ifp;
+	struct ethercom		*ec = &adapter->osdep.ec;
+	struct ether_multi	*enm;
+	struct ether_multistep	step;
 
 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
 
 	mta = adapter->mta;
-	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
-	    MAX_NUM_MULTICAST_ADDRESSES);
+	bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
 
 	ifp->if_flags &= ~IFF_ALLMULTI;
 	ETHER_FIRST_MULTI(step, ec, enm);
@@ -1938,8 +2033,8 @@ ixgbe_set_multi(struct adapter *adapter)
 			break;
 		}
 		bcopy(enm->enm_addrlo,
-		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
-		    IXGBE_ETH_LENGTH_OF_ADDRESS);
+		    mta[mcnt].addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+		mta[mcnt].vmdq = adapter->pool;
 		mcnt++;
 		ETHER_NEXT_MULTI(step, enm);
 	}
@@ -1955,7 +2050,7 @@ ixgbe_set_multi(struct adapter *adapter)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 
 	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
-		update_ptr = mta;
+		update_ptr = (u8 *)mta;
 		ixgbe_update_mc_addr_list(&adapter->hw,
 		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
 	}
@@ -1971,13 +2066,13 @@ ixgbe_set_multi(struct adapter *adapter)
 static u8 *
 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
 {
-	u8 *addr = *update_ptr;
-	u8 *newptr;
-	*vmdq = 0;
-
-	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
-	*update_ptr = newptr;
-	return addr;
+	struct ixgbe_mc_addr *mta;
+
+	mta = (struct ixgbe_mc_addr *)*update_ptr;
+	*vmdq = mta->vmdq;
+
+	*update_ptr = (u8*)(mta + 1);;
+	return (mta->addr);
 }
 
 
@@ -2068,6 +2163,7 @@ ixgbe_local_timer(void *arg)
 	IXGBE_CORE_UNLOCK(adapter);
 }
 
+
 /*
 ** Note: this routine updates the OS on the link state
 **	the real check of the hardware only happens with
@@ -2091,6 +2187,9 @@ ixgbe_update_link_status(struct adapter 
 			/* Update DMA coalescing config */
 			ixgbe_config_dmac(adapter);
 			if_link_state_change(ifp, LINK_STATE_UP);
+#ifdef PCI_IOV
+			ixgbe_ping_all_vfs(adapter);
+#endif
 		}
 	} else { /* Link down */
 		if (adapter->link_active == TRUE) {
@@ -2098,6 +2197,9 @@ ixgbe_update_link_status(struct adapter 
 				device_printf(dev,"Link is Down\n");
 			if_link_state_change(ifp, LINK_STATE_DOWN);
 			adapter->link_active = FALSE;
+#ifdef PCI_IOV
+			ixgbe_ping_all_vfs(adapter);
+#endif
 		}
 	}
 
@@ -2214,7 +2316,7 @@ ixgbe_setup_optics(struct adapter *adapt
 	struct ixgbe_hw *hw = &adapter->hw;
 	int		layer;
 
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
 		adapter->optics = IFM_10G_T;
@@ -2466,15 +2568,17 @@ ixgbe_allocate_msix(struct adapter *adap
 		aprint_normal_dev(dev, "for TX/RX, interrupting at %s",
 		    intrstr);
 		if (error == 0) {
+#ifdef IXGBE_DEBUG
 #ifdef	RSS
 			aprintf_normal(
 			    ", bound RSS bucket %d to CPU %d\n",
 			    i, cpu_id);
 #else
-			aprint_verbose(
+			aprint_normal(
 			    ", bound queue %d to cpu %d\n",
 			    i, cpu_id);
 #endif
+#endif /* IXGBE_DEBUG */
 		} else
 			aprint_normal("\n");
 #ifndef IXGBE_LEGACY_TX
@@ -2528,6 +2632,9 @@ ixgbe_allocate_msix(struct adapter *adap
 	    softint_establish(SOFTINT_NET, ixgbe_handle_mod, adapter);
 	adapter->msf_si =
 	    softint_establish(SOFTINT_NET, ixgbe_handle_msf, adapter);
+#ifdef PCI_IOV
+	TASK_INIT(&adapter->mbx_task, 0, ixgbe_handle_mbx, adapter);
+#endif
 	adapter->phy_si =
 	    softint_establish(SOFTINT_NET, ixgbe_handle_phy, adapter);
 #ifdef IXGBE_FDIR
@@ -2788,7 +2895,7 @@ ixgbe_add_media_types(struct adapter *ad
 	device_t dev = adapter->dev;
 	int layer;
 
-	layer = ixgbe_get_supported_physical_layer(hw);
+	layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
 
 	/* Media types with matching NetBSD media defines */
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
@@ -2908,19 +3015,20 @@ ixgbe_initialize_transmit_units(struct a
 	for (int i = 0; i < adapter->num_queues; i++, txr++) {
 		u64	tdba = txr->txdma.dma_paddr;
 		u32	txctrl = 0;
+		int	j = txr->me;
 
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
 		       (tdba & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j),
 		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
 
 		/* Setup the HW Tx Head and Tail descriptor pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
 
 		/* Cache the tail address */
-		txr->tail = IXGBE_TDT(txr->me);
+		txr->tail = IXGBE_TDT(j);
 
 		/* Set the processing limit */
 		txr->process_limit = ixgbe_tx_process_limit;
@@ -2928,23 +3036,23 @@ ixgbe_initialize_transmit_units(struct a
 		/* Disable Head Writeback */
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
 			break;
 		case ixgbe_mac_82599EB:
 		case ixgbe_mac_X540:
 		default:
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
 			break;
                 }
 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
 			break;
 		case ixgbe_mac_82599EB:
 		case ixgbe_mac_X540:
 		default:
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
 			break;
 		}
 
@@ -2952,6 +3060,9 @@ ixgbe_initialize_transmit_units(struct a
 
 	if (hw->mac.type != ixgbe_mac_82598EB) {
 		u32 dmatxctl, rttdcs;
+#ifdef PCI_IOV
+		enum ixgbe_iov_mode mode = ixgbe_get_iov_mode(adapter);
+#endif
 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 		dmatxctl |= IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
@@ -2959,7 +3070,11 @@ ixgbe_initialize_transmit_units(struct a
 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+#ifdef PCI_IOV
+		IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(mode));
+#else
 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+#endif
 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
 	}
@@ -2971,17 +3086,14 @@ static void
 ixgbe_initialise_rss_mapping(struct adapter *adapter)
 {
 	struct ixgbe_hw	*hw = &adapter->hw;
-	uint32_t reta;
-	int i, j, queue_id, table_size;
-	int index_mult;
-	uint32_t rss_key[10];
-	uint32_t mrqc;
+	u32 reta = 0, mrqc, rss_key[10];
+	int queue_id, table_size, index_mult;
 #ifdef	RSS
-	uint32_t rss_hash_config;
+	u32 rss_hash_config;
+#endif
+#ifdef PCI_IOV
+	enum ixgbe_iov_mode mode;
 #endif
-
-	/* Setup RSS */
-	reta = 0;
 
 #ifdef	RSS
 	/* Fetch the configured RSS key */
@@ -3007,7 +3119,7 @@ ixgbe_initialise_rss_mapping(struct adap
 	}
 
 	/* Set up the redirection table */
-	for (i = 0, j = 0; i < table_size; i++, j++) {
+	for (int i = 0, j = 0; i < table_size; i++, j++) {
 		if (j == adapter->num_queues) j = 0;
 #ifdef	RSS
 		/*
@@ -3036,7 +3148,7 @@ ixgbe_initialise_rss_mapping(struct adap
 	}
 
 	/* Now fill our hash function seeds */
-	for (i = 0; i < 10; i++)
+	for (int i = 0; i < 10; i++)
 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
 
 	/* Perform hash on these packet types */
@@ -3074,19 +3186,16 @@ ixgbe_initialise_rss_mapping(struct adap
 	mrqc = IXGBE_MRQC_RSSEN
 	     | IXGBE_MRQC_RSS_FIELD_IPV4
 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
-#if 0
-	     | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
-#endif
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
 	     | IXGBE_MRQC_RSS_FIELD_IPV6
 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
-#if 0
-	     | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
-	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP
-#endif
 	;
 #endif /* RSS */
+#ifdef PCI_IOV
+	mode = ixgbe_get_iov_mode(adapter);
+	mrqc |= ixgbe_get_mrqc(mode);
+#endif
 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
@@ -3146,16 +3255,17 @@ ixgbe_initialize_receive_units(struct ad
 
 	for (i = 0; i < adapter->num_queues; i++, rxr++) {
 		u64 rdba = rxr->rxdma.dma_paddr;
+		int j = rxr->me;
 
 		/* Setup the Base and Length of the Rx Descriptor Ring */
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j),
 			       (rdba & 0x00000000ffffffffULL));
-		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
-		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
+		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j),
 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 
 		/* Set up the SRRCTL register */
-		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j));
 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 		srrctl |= bufsz;
@@ -3167,18 +3277,16 @@ ixgbe_initialize_receive_units(struct ad
 		 * so we do not need to clear the bit, but do it just in case
 		 * this code is moved elsewhere.
 		 */
-		if (adapter->num_queues > 1 &&
-		    adapter->hw.fc.requested_mode == ixgbe_fc_none) {
+		if (adapter->num_queues > 1 && adapter->fc == ixgbe_fc_none)
 			srrctl |= IXGBE_SRRCTL_DROP_EN;
-		} else {
+		else
 			srrctl &= ~IXGBE_SRRCTL_DROP_EN;
-		}
 
-		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl);
 
 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
-		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
 
 		/* Set the processing limit */
 		rxr->process_limit = ixgbe_rx_process_limit;
@@ -3294,9 +3402,9 @@ ixgbe_setup_vlan_hw_support(struct adapt
 		rxr = &adapter->rx_rings[i];
 		/* On 82599 the VLAN enable is per/queue in RXDCTL */
 		if (hw->mac.type != ixgbe_mac_82598EB) {
-			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+			ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
 			ctrl |= IXGBE_RXDCTL_VME;
-			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+			IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
 		}
 		rxr->vtag_strip = TRUE;
 	}
@@ -3346,6 +3454,9 @@ ixgbe_enable_intr(struct adapter *adapte
 #ifdef IXGBE_FDIR
 			mask |= IXGBE_EIMS_FLOW_DIR;
 #endif
+#ifdef PCI_IOV
+			mask |= IXGBE_EIMS_MAILBOX;
+#endif
 			break;
 		case ixgbe_mac_X540:
 			/* Detect if Thermal Sensor is enabled */
@@ -3369,6 +3480,9 @@ ixgbe_enable_intr(struct adapter *adapte
 #ifdef IXGBE_FDIR
 			mask |= IXGBE_EIMS_FLOW_DIR;
 #endif
+#ifdef PCI_IOV
+			mask |= IXGBE_EIMS_MAILBOX;
+#endif
 		/* falls through */
 		default:
 			break;
@@ -3382,6 +3496,9 @@ ixgbe_enable_intr(struct adapter *adapte
 		/* Don't autoclear Link */
 		mask &= ~IXGBE_EIMS_OTHER;
 		mask &= ~IXGBE_EIMS_LSC;
+#ifdef PCI_IOV
+		mask &= ~IXGBE_EIMS_MAILBOX;
+#endif
 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
 	}
 
@@ -3578,8 +3695,8 @@ ixgbe_set_ivar(struct adapter *adapter, 
 static void
 ixgbe_configure_ivars(struct adapter *adapter)
 {
-	struct  ix_queue *que = adapter->queues;
-	u32 newitr;
+	struct  ix_queue	*que = adapter->queues;
+	u32			newitr;
 
 	if (ixgbe_max_interrupt_rate > 0)
 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
@@ -3593,10 +3710,12 @@ ixgbe_configure_ivars(struct adapter *ad
 	}
 
         for (int i = 0; i < adapter->num_queues; i++, que++) {
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+		struct tx_ring *txr = &adapter->tx_rings[i];
 		/* First the RX queue entry */
-                ixgbe_set_ivar(adapter, i, que->msix, 0);
+                ixgbe_set_ivar(adapter, rxr->me, que->msix, 0);
 		/* ... and the TX */
-		ixgbe_set_ivar(adapter, i, que->msix, 1);
+		ixgbe_set_ivar(adapter, txr->me, que->msix, 1);
 		/* Set an Initial EITR value */
                 IXGBE_WRITE_REG(&adapter->hw,
                     IXGBE_EITR(que->msix), newitr);
@@ -3610,7 +3729,8 @@ ixgbe_configure_ivars(struct adapter *ad
 ** ixgbe_sfp_probe - called in the local timer to
 ** determine if a port had optics inserted.
 */  
-static bool ixgbe_sfp_probe(struct adapter *adapter)
+static bool
+ixgbe_sfp_probe(struct adapter *adapter)
 {
 	struct ixgbe_hw	*hw = &adapter->hw;
 	device_t	dev = adapter->dev;
@@ -3670,6 +3790,7 @@ ixgbe_handle_mod(void *context)
 		    "Unsupported SFP+ module type was detected.\n");
 		return;
 	}
+
 	err = hw->mac.ops.setup_sfp(hw);
 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
 		device_printf(dev,
@@ -3794,9 +3915,7 @@ ixgbe_check_eee_support(struct adapter *
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
-	adapter->eee_support = adapter->eee_enabled =
-	    (hw->device_id == IXGBE_DEV_ID_X550T ||
-	        hw->device_id == IXGBE_DEV_ID_X550EM_X_KR);
+	adapter->eee_enabled = !!(hw->mac.ops.setup_eee);
 }
 
 /*
@@ -4187,8 +4306,7 @@ ixgbe_add_device_sysctls(struct adapter 
 			aprint_error_dev(dev, "could not create sysctl\n");
 
 	/* for X550T and X550EM backplane devices */
-	if (hw->device_id == IXGBE_DEV_ID_X550T ||
-	    hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) {
+	if (hw->mac.ops.setup_eee) {
 		const struct sysctlnode *eee_node;
 
 		if (sysctl_createv(log, 0, &rnode, &eee_node,
@@ -4911,6 +5029,7 @@ ixgbe_sysctl_eee_enable(SYSCTLFN_ARGS)
 {
 	struct sysctlnode node = *rnode;
 	struct adapter *adapter = (struct adapter *)node.sysctl_data;
+	struct ixgbe_hw *hw = &adapter->hw;
 	struct ifnet *ifp = adapter->ifp;
 	int new_eee_enabled, error = 0;
 
@@ -4922,7 +5041,7 @@ ixgbe_sysctl_eee_enable(SYSCTLFN_ARGS)
 	if (new_eee_enabled == adapter->eee_enabled)
 		return (0);
 
-	if (new_eee_enabled > 0 && !adapter->eee_support)
+	if (new_eee_enabled > 0 && !hw->mac.ops.setup_eee)
 		return (ENODEV);
 	else
 		adapter->eee_enabled = !!(new_eee_enabled);
@@ -5045,10 +5164,19 @@ ixgbe_enable_rx_drop(struct adapter *ada
         struct ixgbe_hw *hw = &adapter->hw;
 
 	for (int i = 0; i < adapter->num_queues; i++) {
-        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
         	srrctl |= IXGBE_SRRCTL_DROP_EN;
-        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
 	}
+#ifdef PCI_IOV
+	/* enable drop for each vf */
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
+		    IXGBE_QDE_ENABLE));
+	}
+#endif
 }
 
 static void
@@ -5057,12 +5185,738 @@ ixgbe_disable_rx_drop(struct adapter *ad
         struct ixgbe_hw *hw = &adapter->hw;
 
 	for (int i = 0; i < adapter->num_queues; i++) {
-        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+		struct rx_ring *rxr = &adapter->rx_rings[i];
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me));
         	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
-        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl);
+	}
+#ifdef PCI_IOV
+	/* disable drop for each vf */
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+		    (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT)));
+	}
+#endif
+}
+
+#ifdef PCI_IOV
+
+/*
+** Support functions for SRIOV/VF management
+*/
+
+static void
+ixgbe_ping_all_vfs(struct adapter *adapter)
+{
+	struct ixgbe_vf *vf;
+
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		vf = &adapter->vfs[i];
+		if (vf->flags & IXGBE_VF_ACTIVE)
+			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
+	}
+}
+
+
+static void
+ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
+    uint16_t tag)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vmolr, vmvir;
+
+	hw = &adapter->hw;
+
+	vf->vlan_tag = tag;
+	
+	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
+
+	/* Do not receive packets that pass inexact filters. */
+	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
+
+	/* Disable Multicast Promicuous Mode. */
+	vmolr &= ~IXGBE_VMOLR_MPE;
+
+	/* Accept broadcasts. */
+	vmolr |= IXGBE_VMOLR_BAM;
+
+	if (tag == 0) {
+		/* Accept non-vlan tagged traffic. */
+		//vmolr |= IXGBE_VMOLR_AUPE;
+
+		/* Allow VM to tag outgoing traffic; no default tag. */
+		vmvir = 0;
+	} else {
+		/* Require vlan-tagged traffic. */
+		vmolr &= ~IXGBE_VMOLR_AUPE;
+
+		/* Tag all traffic with provided vlan tag. */
+		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
+	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
+}
+
+
+static boolean_t
+ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+
+	/*
+	 * Frame size compatibility between PF and VF is only a problem on
+	 * 82599-based cards.  X540 and later support any combination of jumbo
+	 * frames on PFs and VFs.
+	 */
+	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
+		return (TRUE);
+
+	switch (vf->api_ver) {
+	case IXGBE_API_VER_1_0:
+	case IXGBE_API_VER_UNKNOWN:
+		/*
+		 * On legacy (1.0 and older) VF versions, we don't support jumbo
+		 * frames on either the PF or the VF.
+		 */
+		if (adapter->max_frame_size > ETHER_MAX_LEN ||
+		    vf->max_frame_size > ETHER_MAX_LEN)
+		    return (FALSE);
+
+		return (TRUE);
+
+		break;
+	case IXGBE_API_VER_1_1:
+	default:
+		/*
+		 * 1.1 or later VF versions always work if they aren't using
+		 * jumbo frames.
+		 */
+		if (vf->max_frame_size <= ETHER_MAX_LEN)
+			return (TRUE);
+
+		/*
+		 * Jumbo frames only work with VFs if the PF is also using jumbo
+		 * frames.
+		 */
+		if (adapter->max_frame_size <= ETHER_MAX_LEN)
+			return (TRUE);
+
+		return (FALSE);
+	
+	}
+}
+
+
+static void
+ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
+
+	// XXX clear multicast addresses
+
+	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
+
+	vf->api_ver = IXGBE_API_VER_UNKNOWN;
+}
+
+
+static void
+ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vf_index, vfte;
+
+	hw = &adapter->hw;
+
+	vf_index = IXGBE_VF_INDEX(vf->pool);
+	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
+	vfte |= IXGBE_VF_BIT(vf->pool);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
+}
+
+
+static void
+ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vf_index, vfre;
+
+	hw = &adapter->hw;
+	
+	vf_index = IXGBE_VF_INDEX(vf->pool);
+	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
+	if (ixgbe_vf_frame_size_compatible(adapter, vf))
+		vfre |= IXGBE_VF_BIT(vf->pool);
+	else
+		vfre &= ~IXGBE_VF_BIT(vf->pool);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
+}
+
+
+static void
+ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
+{
+	struct ixgbe_hw *hw;
+	uint32_t ack;
+	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
+
+	hw = &adapter->hw;
+
+	ixgbe_process_vf_reset(adapter, vf);
+
+	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
+		ixgbe_set_rar(&adapter->hw, vf->rar_index,
+		    vf->ether_addr, vf->pool, TRUE);
+		ack = IXGBE_VT_MSGTYPE_ACK;
+	} else
+		ack = IXGBE_VT_MSGTYPE_NACK;
+
+	ixgbe_vf_enable_transmit(adapter, vf);
+	ixgbe_vf_enable_receive(adapter, vf);
+
+	vf->flags |= IXGBE_VF_CTS;
+
+	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
+	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
+	resp[3] = hw->mac.mc_filter_type;
+	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
+}
+
+
+static void
+ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
+{
+	uint8_t *mac;
+
+	mac = (uint8_t*)&msg[1];
+
+	/* Check that the VF has permission to change the MAC address. */
+	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+
+	if (ixgbe_validate_mac_addr(mac) != 0) {
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+
+	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
+
+	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, 
+	    vf->pool, TRUE);
+
+	ixgbe_send_vf_ack(adapter, vf, msg[0]);
+}
+
+
+/*
+** VF multicast addresses are set by using the appropriate bit in
+** 1 of 128 32 bit addresses (4096 possible).
+*/
+static void
+ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
+{
+	u16	*list = (u16*)&msg[1];
+	int	entries;
+	u32	vmolr, vec_bit, vec_reg, mta_reg;
+
+	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+	entries = min(entries, IXGBE_MAX_VF_MC);
+
+	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
+
+	vf->num_mc_hashes = entries;
+
+	/* Set the appropriate MTA bit */
+	for (int i = 0; i < entries; i++) {
+		vf->mc_hash[i] = list[i];
+		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
+                vec_bit = vf->mc_hash[i] & 0x1F;
+                mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
+                mta_reg |= (1 << vec_bit);
+                IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
+        }
+
+	vmolr |= IXGBE_VMOLR_ROMPE;
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
+	ixgbe_send_vf_ack(adapter, vf, msg[0]);
+	return;
+}
+
+
+static void
+ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
+{
+	struct ixgbe_hw *hw;
+	int enable;
+	uint16_t tag;
+
+	hw = &adapter->hw;
+	enable = IXGBE_VT_MSGINFO(msg[0]);
+	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
+
+	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+
+	/* It is illegal to enable vlan tag 0. */
+	if (tag == 0 && enable != 0){
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+	
+	ixgbe_set_vfta(hw, tag, vf->pool, enable);
+	ixgbe_send_vf_ack(adapter, vf, msg[0]);
+}
+
+
+static void
+ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vf_max_size, pf_max_size, mhadd;
+
+	hw = &adapter->hw;
+	vf_max_size = msg[1];
+
+	if (vf_max_size < ETHER_CRC_LEN) {
+		/* We intentionally ACK invalid LPE requests. */
+		ixgbe_send_vf_ack(adapter, vf, msg[0]);
+		return;
+	}
+
+	vf_max_size -= ETHER_CRC_LEN;
+
+	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
+		/* We intentionally ACK invalid LPE requests. */
+		ixgbe_send_vf_ack(adapter, vf, msg[0]);
+		return;
+	}
+
+	vf->max_frame_size = vf_max_size;
+	ixgbe_update_max_frame(adapter, vf->max_frame_size);
+
+	/*
+	 * We might have to disable reception to this VF if the frame size is
+	 * not compatible with the config on the PF.
+	 */
+	ixgbe_vf_enable_receive(adapter, vf);
+
+	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
+
+	if (pf_max_size < adapter->max_frame_size) {
+		mhadd &= ~IXGBE_MHADD_MFS_MASK;
+		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+	}
+
+	ixgbe_send_vf_ack(adapter, vf, msg[0]);
+}
+
+
+static void
+ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
+    uint32_t *msg)
+{
+	//XXX implement this
+	ixgbe_send_vf_nack(adapter, vf, msg[0]);
+}
+
+
+static void
+ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
+    uint32_t *msg)
+{
+
+	switch (msg[0]) {
+	case IXGBE_API_VER_1_0:
+	case IXGBE_API_VER_1_1:
+		vf->api_ver = msg[0];
+		ixgbe_send_vf_ack(adapter, vf, msg[0]);
+		break;
+	default:
+		vf->api_ver = IXGBE_API_VER_UNKNOWN;
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		break;
+	}
+}
+
+
+static void
+ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf,
+    uint32_t *msg)
+{
+	struct ixgbe_hw *hw;
+	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
+	int num_queues;
+
+	hw = &adapter->hw;
+
+	/* GET_QUEUES is not supported on pre-1.1 APIs. */
+	switch (msg[0]) {
+	case IXGBE_API_VER_1_0:
+	case IXGBE_API_VER_UNKNOWN:
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+
+	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | 
+	    IXGBE_VT_MSGTYPE_CTS;
+
+	num_queues = ixgbe_vf_queues(ixgbe_get_iov_mode(adapter));
+	resp[IXGBE_VF_TX_QUEUES] = num_queues;
+	resp[IXGBE_VF_RX_QUEUES] = num_queues;
+	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
+	resp[IXGBE_VF_DEF_QUEUE] = 0;
+
+	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
+}
+
+
+static void
+ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
+	int error;
+
+	hw = &adapter->hw;
+
+	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
+
+	if (error != 0)
+		return;
+
+	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
+	    adapter->ifp->if_xname, msg[0], vf->pool);
+	if (msg[0] == IXGBE_VF_RESET) {
+		ixgbe_vf_reset_msg(adapter, vf, msg);
+		return;
+	}
+
+	if (!(vf->flags & IXGBE_VF_CTS)) {
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+		return;
+	}
+
+	switch (msg[0] & IXGBE_VT_MSG_MASK) {
+	case IXGBE_VF_SET_MAC_ADDR:
+		ixgbe_vf_set_mac(adapter, vf, msg);
+		break;
+	case IXGBE_VF_SET_MULTICAST:
+		ixgbe_vf_set_mc_addr(adapter, vf, msg);
+		break;
+	case IXGBE_VF_SET_VLAN:
+		ixgbe_vf_set_vlan(adapter, vf, msg);
+		break;
+	case IXGBE_VF_SET_LPE:
+		ixgbe_vf_set_lpe(adapter, vf, msg);
+		break;
+	case IXGBE_VF_SET_MACVLAN:
+		ixgbe_vf_set_macvlan(adapter, vf, msg);
+		break;
+	case IXGBE_VF_API_NEGOTIATE:
+		ixgbe_vf_api_negotiate(adapter, vf, msg);
+		break;
+	case IXGBE_VF_GET_QUEUES:
+		ixgbe_vf_get_queues(adapter, vf, msg);
+		break;
+	default:
+		ixgbe_send_vf_nack(adapter, vf, msg[0]);
+	}
+}
+
+
+/*
+ * Tasklet for handling VF -> PF mailbox messages.
+ */
+static void
+ixgbe_handle_mbx(void *context, int pending)
+{
+	struct adapter *adapter;
+	struct ixgbe_hw *hw;
+	struct ixgbe_vf *vf;
+	int i;
+
+	adapter = context;
+	hw = &adapter->hw;
+
+	IXGBE_CORE_LOCK(adapter);
+	for (i = 0; i < adapter->num_vfs; i++) {
+		vf = &adapter->vfs[i];
+
+		if (vf->flags & IXGBE_VF_ACTIVE) {
+			if (ixgbe_check_for_rst(hw, vf->pool) == 0)
+				ixgbe_process_vf_reset(adapter, vf);
+
+			if (ixgbe_check_for_msg(hw, vf->pool) == 0)
+				ixgbe_process_vf_msg(adapter, vf);
+
+			if (ixgbe_check_for_ack(hw, vf->pool) == 0)
+				ixgbe_process_vf_ack(adapter, vf);
+		}
+	}
+	IXGBE_CORE_UNLOCK(adapter);
+}
+
+
+static int
+ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
+{
+	struct adapter *adapter;
+	enum ixgbe_iov_mode mode;
+
+	adapter = device_get_softc(dev);
+	adapter->num_vfs = num_vfs;
+	mode = ixgbe_get_iov_mode(adapter);
+
+	if (num_vfs > ixgbe_max_vfs(mode)) {
+		adapter->num_vfs = 0;
+		return (ENOSPC);
+	}
+
+	IXGBE_CORE_LOCK(adapter);
+
+	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE, 
+	    M_NOWAIT | M_ZERO);
+
+	if (adapter->vfs == NULL) {
+		adapter->num_vfs = 0;
+		IXGBE_CORE_UNLOCK(adapter);
+		return (ENOMEM);
+	}
+
+	ixgbe_init_locked(adapter);
+
+	IXGBE_CORE_UNLOCK(adapter);
+
+	return (0);
+}
+
+
+static void
+ixgbe_uninit_iov(device_t dev)
+{
+	struct ixgbe_hw *hw;
+	struct adapter *adapter;
+	uint32_t pf_reg, vf_reg;
+
+	adapter = device_get_softc(dev);
+	hw = &adapter->hw;
+
+	IXGBE_CORE_LOCK(adapter);
+
+	/* Enable rx/tx for the PF and disable it for all VFs. */
+	pf_reg = IXGBE_VF_INDEX(adapter->pool);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg),
+	    IXGBE_VF_BIT(adapter->pool));
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg),
+	    IXGBE_VF_BIT(adapter->pool));
+
+	if (pf_reg == 0)
+		vf_reg = 1;
+	else
+		vf_reg = 0;
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
+
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
+
+	free(adapter->vfs, M_IXGBE);
+	adapter->vfs = NULL;
+	adapter->num_vfs = 0;
+
+	IXGBE_CORE_UNLOCK(adapter);
+}
+
+
+static void
+ixgbe_initialize_iov(struct adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
+	enum ixgbe_iov_mode mode;
+	int i;
+
+	mode = ixgbe_get_iov_mode(adapter);
+	if (mode == IXGBE_NO_VM)
+		return;
+
+	IXGBE_CORE_LOCK_ASSERT(adapter);
+
+	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+
+	switch (mode) {
+	case IXGBE_64_VM:
+		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+		break;
+	case IXGBE_32_VM:
+		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+		break;
+	default:
+		panic("Unexpected SR-IOV mode %d", mode);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+	mtqc = IXGBE_MTQC_VT_ENA;
+	switch (mode) {
+	case IXGBE_64_VM:
+		mtqc |= IXGBE_MTQC_64VF;
+		break;
+	case IXGBE_32_VM:
+		mtqc |= IXGBE_MTQC_32VF;
+		break;
+	default:
+		panic("Unexpected SR-IOV mode %d", mode);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
+	
+
+	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
+	switch (mode) {
+	case IXGBE_64_VM:
+		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+		break;
+	case IXGBE_32_VM:
+		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
+		break;
+	default:
+		panic("Unexpected SR-IOV mode %d", mode);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+	
+
+	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+	gcr_ext &= ~IXGBE_GPIE_VTMODE_MASK;
+	switch (mode) {
+	case IXGBE_64_VM:
+		gpie |= IXGBE_GPIE_VTMODE_64;
+		break;
+	case IXGBE_32_VM:
+		gpie |= IXGBE_GPIE_VTMODE_32;
+		break;
+	default:
+		panic("Unexpected SR-IOV mode %d", mode);
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+	/* Enable rx/tx for the PF. */
+	vf_reg = IXGBE_VF_INDEX(adapter->pool);
+	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 
+	    IXGBE_VF_BIT(adapter->pool));
+	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 
+	    IXGBE_VF_BIT(adapter->pool));
+
+	/* Allow VM-to-VM communication. */
+	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
+	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
+	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+	for (i = 0; i < adapter->num_vfs; i++)
+		ixgbe_init_vf(adapter, &adapter->vfs[i]);
+}
+
+
+/*
+** Check the max frame setting of all active VF's
+*/
+static void
+ixgbe_recalculate_max_frame(struct adapter *adapter)
+{
+	struct ixgbe_vf *vf;
+
+	IXGBE_CORE_LOCK_ASSERT(adapter);
+
+	for (int i = 0; i < adapter->num_vfs; i++) {
+		vf = &adapter->vfs[i];
+		if (vf->flags & IXGBE_VF_ACTIVE)
+			ixgbe_update_max_frame(adapter, vf->max_frame_size);
 	}
 }
 
+
+static void
+ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	struct ixgbe_hw *hw;
+	uint32_t vf_index, pfmbimr;
+
+	IXGBE_CORE_LOCK_ASSERT(adapter);
+
+	hw = &adapter->hw;
+
+	if (!(vf->flags & IXGBE_VF_ACTIVE))
+		return;
+
+	vf_index = IXGBE_VF_INDEX(vf->pool);
+	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
+	pfmbimr |= IXGBE_VF_BIT(vf->pool);
+	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
+
+	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
+
+	// XXX multicast addresses
+
+	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
+		ixgbe_set_rar(&adapter->hw, vf->rar_index,
+		    vf->ether_addr, vf->pool, TRUE);
+	}
+
+	ixgbe_vf_enable_transmit(adapter, vf);
+	ixgbe_vf_enable_receive(adapter, vf);
+	
+	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
+}
+
+static int
+ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
+{
+	struct adapter *adapter;
+	struct ixgbe_vf *vf;
+	const void *mac;
+
+	adapter = device_get_softc(dev);
+
+	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
+	    vfnum, adapter->num_vfs));
+
+	IXGBE_CORE_LOCK(adapter);
+	vf = &adapter->vfs[vfnum];
+	vf->pool= vfnum;
+
+	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
+	vf->rar_index = vfnum + 1;
+	vf->default_vlan = 0;
+	vf->max_frame_size = ETHER_MAX_LEN;
+	ixgbe_update_max_frame(adapter, vf->max_frame_size);
+
+	if (nvlist_exists_binary(config, "mac-addr")) {
+		mac = nvlist_get_binary(config, "mac-addr", NULL);
+		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
+		if (nvlist_get_bool(config, "allow-set-mac"))
+			vf->flags |= IXGBE_VF_CAP_MAC;
+	} else
+		/*
+		 * If the administrator has not specified a MAC address then
+		 * we must allow the VF to choose one.
+		 */
+		vf->flags |= IXGBE_VF_CAP_MAC;
+
+	vf->flags = IXGBE_VF_ACTIVE;
+
+	ixgbe_init_vf(adapter, vf);
+	IXGBE_CORE_UNLOCK(adapter);
+
+	return (0);
+}
+#endif /* PCI_IOV */
 static void
 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
 {

Index: src/sys/dev/pci/ixgbe/ixgbe.h
diff -u src/sys/dev/pci/ixgbe/ixgbe.h:1.12 src/sys/dev/pci/ixgbe/ixgbe.h:1.13
--- src/sys/dev/pci/ixgbe/ixgbe.h:1.12	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ixgbe.h	Fri Dec  2 10:21:43 2016
@@ -58,8 +58,8 @@
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 282289 2015-04-30 22:53:27Z erj $*/
-/*$NetBSD: ixgbe.h,v 1.12 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe.h 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ixgbe.h,v 1.13 2016/12/02 10:21:43 msaitoh Exp $*/
 
 
 #ifndef _IXGBE_H_
@@ -108,6 +108,11 @@
 #include <sys/cpu.h>
 #include <sys/interrupt.h>
 
+#ifdef PCI_IOV
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#endif
+
 #include "ixgbe_netbsd.h"
 #include "ixgbe_api.h"
 #include "ixgbe_common.h"
@@ -264,6 +269,29 @@
 	 (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \
 	 (_adapter->hw.mac.type == ixgbe_mac_82599_vf))
 
+#ifdef PCI_IOV
+#define IXGBE_VF_INDEX(vmdq)  ((vmdq) / 32)
+#define IXGBE_VF_BIT(vmdq)    (1 << ((vmdq) % 32))
+
+#define IXGBE_VT_MSG_MASK	0xFFFF
+
+#define IXGBE_VT_MSGINFO(msg)	\
+	(((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_GET_QUEUES_RESP_LEN	5
+
+#define IXGBE_API_VER_1_0	0		
+#define IXGBE_API_VER_2_0	1	/* Solaris API.  Not supported. */
+#define IXGBE_API_VER_1_1	2
+#define IXGBE_API_VER_UNKNOWN	UINT16_MAX
+
+enum ixgbe_iov_mode {
+	IXGBE_64_VM,
+	IXGBE_32_VM,
+	IXGBE_NO_VM
+};
+#endif /* PCI_IOV */
+
 
 /*
  *****************************************************************************
@@ -290,6 +318,7 @@ struct ixgbe_i2c_req {
         u8 data[8];
 };
 
+
 struct ixgbe_tx_buf {
 	union ixgbe_adv_tx_desc	*eop;
 	struct mbuf	*m_head;
@@ -317,6 +346,11 @@ struct ixgbe_dma_alloc {
 	bus_size_t		dma_size;
 };
 
+struct ixgbe_mc_addr {
+	u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+	u32 vmdq;
+};
+
 /*
 ** Driver queue struct: this is the interrupt container
 **  for the associated tx and rx ring.
@@ -417,6 +451,28 @@ struct rx_ring {
 #endif
 };
 
+#ifdef PCI_IOV
+#define IXGBE_VF_CTS		(1 << 0) /* VF is clear to send. */
+#define IXGBE_VF_CAP_MAC	(1 << 1) /* VF is permitted to change MAC. */
+#define IXGBE_VF_CAP_VLAN	(1 << 2) /* VF is permitted to join vlans. */
+#define IXGBE_VF_ACTIVE		(1 << 3) /* VF is active. */
+
+#define IXGBE_MAX_VF_MC 30  /* Max number of multicast entries */
+
+struct ixgbe_vf {
+	u_int		pool;
+	u_int		rar_index;
+	u_int		max_frame_size;
+	uint32_t	flags;
+	uint8_t		ether_addr[ETHER_ADDR_LEN];
+	uint16_t	mc_hash[IXGBE_MAX_VF_MC];
+	uint16_t	num_mc_hashes;
+	uint16_t	default_vlan;
+	uint16_t	vlan_tag;
+	uint16_t	api_ver;
+};
+#endif /* PCI_IOV */
+
 /* Our adapter structure */
 struct adapter {
 	struct ifnet		*ifp;
@@ -464,8 +520,8 @@ struct adapter {
 	bool			link_up;
 	u32 			vector;
 	u16			dmac;
-	bool			eee_support;
 	bool			eee_enabled;
+	u32			phy_layer;
 
 	/* Power management-related */
 	bool			wol_support;
@@ -479,6 +535,9 @@ struct adapter {
 	void			*link_si;  /* Link tasklet */
 	void			*mod_si;   /* SFP tasklet */
 	void			*msf_si;   /* Multispeed Fiber */
+#ifdef PCI_IOV
+	void			*mbx_si;   /* VF -> PF mailbox interrupt */
+#endif /* PCI_IOV */
 #ifdef IXGBE_FDIR
 	int			fdir_reinit;
 	void			*fdir_si;
@@ -509,8 +568,12 @@ struct adapter {
 	u32			num_rx_desc;
 
 	/* Multicast array memory */
-	u8			*mta;
-
+	struct ixgbe_mc_addr	*mta;
+	int			num_vfs;
+	int			pool;
+#ifdef PCI_IOV
+	struct ixgbe_vf		*vfs;
+#endif
 
 	/* Misc stats maintained by the driver */
 	struct evcnt   		dropped_pkts;
@@ -707,4 +770,150 @@ bool	ixgbe_rxeof(struct ix_queue *);
 int	ixgbe_dma_malloc(struct adapter *,
 	    bus_size_t, struct ixgbe_dma_alloc *, int);
 void	ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
+
+#ifdef PCI_IOV
+
+static inline boolean_t
+ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
+{
+	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
+}
+
+static inline void
+ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
+{
+
+	if (vf->flags & IXGBE_VF_CTS)
+		msg |= IXGBE_VT_MSGTYPE_CTS;
+	
+	ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool);
+}
+
+static inline void
+ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
+{
+	msg &= IXGBE_VT_MSG_MASK;
+	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
+}
+
+static inline void
+ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
+{
+	msg &= IXGBE_VT_MSG_MASK;
+	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
+}
+
+static inline void
+ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
+{
+	if (!(vf->flags & IXGBE_VF_CTS))
+		ixgbe_send_vf_nack(adapter, vf, 0);
+}
+
+static inline enum ixgbe_iov_mode
+ixgbe_get_iov_mode(struct adapter *adapter)
+{
+	if (adapter->num_vfs == 0)
+		return (IXGBE_NO_VM);
+	if (adapter->num_queues <= 2)
+		return (IXGBE_64_VM);
+	else if (adapter->num_queues <= 4)
+		return (IXGBE_32_VM);
+	else
+		return (IXGBE_NO_VM);
+}
+
+static inline u16
+ixgbe_max_vfs(enum ixgbe_iov_mode mode)
+{
+	/*
+	 * We return odd numbers below because we
+	 * reserve 1 VM's worth of queues for the PF.
+	 */
+	switch (mode) {
+	case IXGBE_64_VM:
+		return (63);
+	case IXGBE_32_VM:
+		return (31);
+	case IXGBE_NO_VM:
+	default:
+		return (0);
+	}
+}
+
+static inline int
+ixgbe_vf_queues(enum ixgbe_iov_mode mode)
+{
+	switch (mode) {
+	case IXGBE_64_VM:
+		return (2);
+	case IXGBE_32_VM:
+		return (4);
+	case IXGBE_NO_VM:
+	default:
+		return (0);
+	}
+}
+
+static inline int
+ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num)
+{
+	return ((vfnum * ixgbe_vf_queues(mode)) + num);
+}
+
+static inline int
+ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num)
+{
+	return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num));
+}
+
+static inline void
+ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
+{
+	if (adapter->max_frame_size < max_frame)
+		adapter->max_frame_size = max_frame;
+}
+
+static inline u32
+ixgbe_get_mrqc(enum ixgbe_iov_mode mode)
+{
+       u32 mrqc = 0;
+       switch (mode) {
+       case IXGBE_64_VM:
+               mrqc = IXGBE_MRQC_VMDQRSS64EN;
+               break;
+       case IXGBE_32_VM:
+               mrqc = IXGBE_MRQC_VMDQRSS32EN;
+               break;
+        case IXGBE_NO_VM:
+                mrqc = 0;
+                break;
+       default:
+            panic("Unexpected SR-IOV mode %d", mode);
+       }
+        return(mrqc);
+}
+
+
+static inline u32
+ixgbe_get_mtqc(enum ixgbe_iov_mode mode)
+{
+       uint32_t mtqc = 0;
+        switch (mode) {
+        case IXGBE_64_VM:
+               mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
+                break;
+        case IXGBE_32_VM:
+               mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
+                break;
+        case IXGBE_NO_VM:
+                mtqc = IXGBE_MTQC_64Q_1PB;
+                break;
+        default:
+                panic("Unexpected SR-IOV mode %d", mode);
+        }
+        return(mtqc);
+}
+#endif /* PCI_IOV */
+
 #endif /* _IXGBE_H_ */

Index: src/sys/dev/pci/ixgbe/ixgbe_mbx.h
diff -u src/sys/dev/pci/ixgbe/ixgbe_mbx.h:1.6 src/sys/dev/pci/ixgbe/ixgbe_mbx.h:1.7
--- src/sys/dev/pci/ixgbe/ixgbe_mbx.h:1.6	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ixgbe_mbx.h	Fri Dec  2 10:21:43 2016
@@ -30,8 +30,8 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.h 282289 2015-04-30 22:53:27Z erj $*/
-/*$NetBSD: ixgbe_mbx.h,v 1.6 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.h 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ixgbe_mbx.h,v 1.7 2016/12/02 10:21:43 msaitoh Exp $*/
 
 #ifndef _IXGBE_MBX_H_
 #define _IXGBE_MBX_H_
@@ -81,6 +81,21 @@
 /* bits 23:16 are used for extra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK	(0xFF << IXGBE_VT_MSGINFO_SHIFT)
 
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+	ixgbe_mbox_api_10,	/* API version 1.0, linux/freebsd VF driver */
+	ixgbe_mbox_api_20,	/* API version 2.0, solaris Phase1 VF driver */
+	ixgbe_mbox_api_11,	/* API version 1.1, linux/freebsd VF driver */
+	/* This value should always be last */
+	ixgbe_mbox_api_unknown,	/* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
 #define IXGBE_VF_RESET		0x01 /* VF requests reset */
 #define IXGBE_VF_SET_MAC_ADDR	0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST	0x03 /* VF requests PF to set MC addr */
@@ -107,6 +122,18 @@
 
 #define IXGBE_PF_CONTROL_MSG		0x0100 /* PF control message */
 
+/* mailbox API, version 2.0 VF requests */
+#define IXGBE_VF_API_NEGOTIATE		0x08 /* negotiate API version */
+#define IXGBE_VF_GET_QUEUES		0x09 /* get queue configuration */
+#define IXGBE_VF_ENABLE_MACADDR		0x0A /* enable MAC address */
+#define IXGBE_VF_DISABLE_MACADDR	0x0B /* disable MAC address */
+#define IXGBE_VF_GET_MACADDRS		0x0C /* get all configured MAC addrs */
+#define IXGBE_VF_SET_MCAST_PROMISC	0x0D /* enable multicast promiscuous */
+#define IXGBE_VF_GET_MTU		0x0E /* get bounds on MTU */
+#define IXGBE_VF_SET_MTU		0x0F /* set a specific MTU */
+
+/* mailbox API, version 2.0 PF requests */
+#define IXGBE_PF_TRANSPARENT_VLAN	0x0101 /* enable transparent vlan */
 
 #define IXGBE_VF_MBX_INIT_TIMEOUT	2000 /* number of retries on mailbox */
 #define IXGBE_VF_MBX_INIT_DELAY		500  /* microseconds between retries */

Index: src/sys/dev/pci/ixgbe/ixgbe_vf.c
diff -u src/sys/dev/pci/ixgbe/ixgbe_vf.c:1.8 src/sys/dev/pci/ixgbe/ixgbe_vf.c:1.9
--- src/sys/dev/pci/ixgbe/ixgbe_vf.c:1.8	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ixgbe_vf.c	Fri Dec  2 10:21:43 2016
@@ -30,8 +30,8 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_vf.c 282289 2015-04-30 22:53:27Z erj $*/
-/*$NetBSD: ixgbe_vf.c,v 1.8 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_vf.c 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ixgbe_vf.c,v 1.9 2016/12/02 10:21:43 msaitoh Exp $*/
 
 
 #include "ixgbe_api.h"
@@ -186,6 +186,8 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *h
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	hw->mac.ops.stop_adapter(hw);
 
+	/* reset the api version */
+	hw->api_version = ixgbe_mbox_api_10;
 
 	DEBUGOUT("Issuing a function level reset to MAC\n");
 
@@ -667,6 +669,57 @@ int ixgbevf_negotiate_api_version(struct
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
 		       unsigned int *default_tc)
 {
-	UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc);
-	return IXGBE_SUCCESS;
+	int err;
+	u32 msg[5];
+
+	/* do nothing if API doesn't support ixgbevf_get_queues */
+	switch (hw->api_version) {
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return 0;
+	}
+
+	/* Fetch queue configuration from the PF */
+	msg[0] = IXGBE_VF_GET_QUEUES;
+	msg[1] = msg[2] = msg[3] = msg[4] = 0;
+	err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
+
+	if (!err)
+		err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
+
+	if (!err) {
+		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+		/*
+		 * if we we didn't get an ACK there must have been
+		 * some sort of mailbox error so we should treat it
+		 * as such
+		 */
+		if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+			return IXGBE_ERR_MBX;
+
+		/* record and validate values from message */
+		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+		if (hw->mac.max_tx_queues == 0 ||
+		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+		if (hw->mac.max_rx_queues == 0 ||
+		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+		/* in case of unknown state assume we cannot tag frames */
+		if (*num_tcs > hw->mac.max_rx_queues)
+			*num_tcs = 1;
+
+		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
+		/* default to queue 0 on out-of-bounds queue number */
+		if (*default_tc >= hw->mac.max_tx_queues)
+			*default_tc = 0;
+	}
+
+	return err;
 }

Index: src/sys/dev/pci/ixgbe/ixv.c
diff -u src/sys/dev/pci/ixgbe/ixv.c:1.22 src/sys/dev/pci/ixgbe/ixv.c:1.23
--- src/sys/dev/pci/ixgbe/ixv.c:1.22	Thu Dec  1 06:56:28 2016
+++ src/sys/dev/pci/ixgbe/ixv.c	Fri Dec  2 10:21:43 2016
@@ -30,8 +30,8 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 282289 2015-04-30 22:53:27Z erj $*/
-/*$NetBSD: ixv.c,v 1.22 2016/12/01 06:56:28 msaitoh Exp $*/
+/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 283881 2015-06-01 17:15:25Z jfv $*/
+/*$NetBSD: ixv.c,v 1.23 2016/12/02 10:21:43 msaitoh Exp $*/
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -42,7 +42,7 @@
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixv_driver_version[] = "1.2.5";
+char ixv_driver_version[] = "1.4.0";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -168,6 +168,11 @@ MODULE_DEPEND(ixv, ether, 1, 1, 1);
 ** TUNEABLE PARAMETERS:
 */
 
+/* Number of Queues - do not exceed MSIX vectors - 1 */
+static int ixv_num_queues = 1;
+#define	TUNABLE_INT(__x, __y)
+TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
+
 /*
 ** AIM: Adaptive Interrupt Moderation
 ** which means that the interrupt rate
@@ -175,7 +180,6 @@ MODULE_DEPEND(ixv, ether, 1, 1, 1);
 ** traffic for that interrupt vector
 */
 static int ixv_enable_aim = FALSE;
-#define	TUNABLE_INT(__x, __y)
 TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
 
 /* How many packets rxeof tries to clean at a time */
@@ -379,6 +383,11 @@ ixv_attach(device_t parent, device_t dev
 
 	ixgbe_reset_hw(hw);
 
+	/* Get the Mailbox API version */
+	device_printf(dev,"MBX API %d negotiation: %d\n",
+	    ixgbe_mbox_api_11,
+	    ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
+
 	error = ixgbe_init_hw(hw);
 	if (error) {
 		aprint_error_dev(dev,"Hardware Initialization Failure\n");
@@ -1457,7 +1466,9 @@ map_err:
 		return ENXIO;
 	}
 
-	adapter->num_queues = 1;
+	/* Pick up the tuneable queues */
+	adapter->num_queues = ixv_num_queues;
+
 	adapter->hw.back = &adapter->osdep;
 
 	/*
@@ -1675,35 +1686,43 @@ ixv_initialize_transmit_units(struct ada
 static void
 ixv_initialize_receive_units(struct adapter *adapter)
 {
-	int i;
 	struct	rx_ring	*rxr = adapter->rx_rings;
 	struct ixgbe_hw	*hw = &adapter->hw;
-	struct ifnet   *ifp = adapter->ifp;
-	u32		bufsz, fctrl, rxcsum, hlreg;
-
-
-	/* Enable broadcasts */
-	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-	fctrl |= IXGBE_FCTRL_BAM;
-	fctrl |= IXGBE_FCTRL_DPF;
-	fctrl |= IXGBE_FCTRL_PMCF;
-	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+	struct ifnet	*ifp = adapter->ifp;
+	u32		bufsz, rxcsum, psrtype;
+	int		max_frame;
 
-	/* Set for Jumbo Frames? */
-	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	if (ifp->if_mtu > ETHERMTU) {
-		hlreg |= IXGBE_HLREG0_JUMBOEN;
+	if (ifp->if_mtu > ETHERMTU)
 		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-	} else {
-		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+	else
 		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-	}
-	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
 
-	for (i = 0; i < adapter->num_queues; i++, rxr++) {
+	psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+	    IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+	    IXGBE_PSRTYPE_L2HDR;
+
+	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+	/* Tell PF our expected packet-size */
+	max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
+	ixgbevf_rlpml_set_vf(hw, max_frame);
+
+	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 		u64 rdba = rxr->rxdma.dma_paddr;
 		u32 reg, rxdctl;
 
+		/* Disable the queue */
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+		rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+		for (int j = 0; j < 10; j++) {
+			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+			    IXGBE_RXDCTL_ENABLE)
+				msec_delay(1);
+			else
+				break;
+		}
+		wmb();
 		/* Setup the Base and Length of the Rx Descriptor Ring */
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
 		    (rdba & 0x00000000ffffffffULL));
@@ -1712,6 +1731,10 @@ ixv_initialize_receive_units(struct adap
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
 		    adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
 
+		/* Reset the ring indices */
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
+
 		/* Set up the SRRCTL register */
 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1720,14 +1743,14 @@ ixv_initialize_receive_units(struct adap
 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
 
-		/* Setup the HW Rx Head and Tail Descriptor Pointers */
-		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+		/* Set the Tail Pointer */
 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
 		    adapter->num_rx_desc - 1);
+
 		/* Set the processing limit */
 		rxr->process_limit = ixv_rx_process_limit;
 
-		/* Set Rx Tail register */
+		/* Capture Rx Tail index */
 		rxr->tail = IXGBE_VFRDT(rxr->me);
 
 		/* Do the queue enabling last */
@@ -2092,10 +2115,6 @@ ixv_add_stats_sysctls(struct adapter *ad
 	    xname, "Discarded RX packets");
 	evcnt_attach_dynamic(&txr->total_packets, EVCNT_TYPE_MISC, NULL,
 	    xname, "TX Packets");
-#if 0
-	evcnt_attach_dynamic(&txr->bytes, EVCNT_TYPE_MISC, NULL,
-	    xname, "TX Bytes");
-#endif
 	evcnt_attach_dynamic(&txr->no_desc_avail, EVCNT_TYPE_MISC, NULL,
 	    xname, "# of times not enough descriptors were available during TX");
 	evcnt_attach_dynamic(&txr->tso_tx, EVCNT_TYPE_MISC, NULL,

Reply via email to