My previous patch is partly rejected for OpenBSD current. Because ixv(4) code
depends on ix(4) that has changed to supported TSO/LRO. I rebased my patch for
OpenBSD current. See the patch at the end of this e-mail.

Thank you, Paul B. Henson! He tested my patch on Linux Qemu and now we have
the knowledge to operate the ixv(4). I will write down the following.

Known Issues and Requirements:

  1. Do not use 'ifconfig lladdr' on ESXi.

     Changing link local address is not permitted on ESXi. If it is changed,
     the interface cannot be usable any more. Need to reboot the VM.

     Link local address can be changed by ESXi user interface.

     On Linux qemu, 'ifconfig lladdr' works.

  2. "pc-q35" emulation is required on Linux Qemu.

     The default chipset emulation "pc-i440fx" doesn't support MSI-X interrupt.
     Ixv(4) requires MSI-X and never work with MSI. So, ixv(4) fails to attach
     with "pc-i440fx" emulation.

  3. Linux ixgbe driver shows "Unhandled Msg 00000010" in dmesg.

     Older version of Linux doesn't support GET_LINK_STATE message. Ixv(4)
     cannot see physical link state is changed. I see GET_LINK_STATE message
     has supported by following commit in Linus repository. But I'm not sure
     which distro has this patch.

     
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=366fd1000995d4cf64e1a61a0d78a051550b9841

  4. Make sure PF (Primary Function) interface is up.

     While PF interface is down, PF driver stop working and doesn't respond to
     VF (Virtual Function). Ixv interface cannot be brought up.

  5. Linux kernel shows all reset messages from VF in dmesg.

     This is not an error case. Ixv(4) sends reset messages when attaching
     device and the interface is brought up.

  6. Performance

     Recent OpenBSD ix(4) supports TSO. It also improves ixv(4) packet
     transmission performance significantly. TSO is enabled by default. But
     LRO is not supported by VF. There is no way to use LRO in ixv(4).

     Increasing MTU size gives you a chance to improve performance but make
     sure all intermediate routes can handle the MTU size.

diff --git a/sys/arch/amd64/conf/GENERIC b/sys/arch/amd64/conf/GENERIC
index c8e4ec8284e..2ad357f9c1b 100644
--- a/sys/arch/amd64/conf/GENERIC
+++ b/sys/arch/amd64/conf/GENERIC
@@ -524,6 +524,7 @@ msk*        at mskc?                        #  each port of 
above
 em*    at pci?                         # Intel Pro/1000 ethernet
 ixgb*  at pci?                         # Intel Pro/10Gb ethernet
 ix*    at pci?                         # Intel 82598EB 10Gb ethernet
+ixv*   at pci?                         # Virtual Function of Intel 82598EB
 myx*   at pci?                         # Myricom Myri-10G 10Gb ethernet
 oce*   at pci?                         # Emulex OneConnect 10Gb ethernet
 txp*   at pci?                         # 3com 3CR990
diff --git a/sys/dev/pci/files.pci b/sys/dev/pci/files.pci
index 101ed502e76..72c8b485938 100644
--- a/sys/dev/pci/files.pci
+++ b/sys/dev/pci/files.pci
@@ -350,13 +350,19 @@ file      dev/pci/ixgb_hw.c               ixgb
 # Intel 82598 10GbE
 device ix: ether, ifnet, ifmedia, intrmap, stoeplitz
 attach ix at pci
-file   dev/pci/if_ix.c                 ix
-file   dev/pci/ixgbe.c                 ix
-file   dev/pci/ixgbe_82598.c           ix
-file   dev/pci/ixgbe_82599.c           ix
-file   dev/pci/ixgbe_x540.c            ix
-file   dev/pci/ixgbe_x550.c            ix
-file   dev/pci/ixgbe_phy.c             ix
+file   dev/pci/if_ix.c                 ix | ixv
+file   dev/pci/ixgbe.c                 ix | ixv
+file   dev/pci/ixgbe_82598.c           ix | ixv
+file   dev/pci/ixgbe_82599.c           ix | ixv
+file   dev/pci/ixgbe_x540.c            ix | ixv
+file   dev/pci/ixgbe_x550.c            ix | ixv
+file   dev/pci/ixgbe_phy.c             ix | ixv
+
+# Virtual Function of i82599.
+device ixv: ether, ifnet, ifmedia, intrmap, stoeplitz
+attach ixv at pci
+file   dev/pci/if_ixv.c                ixv
+file   dev/pci/ixgbe_vf.c              ixv
 
 # Intel Ethernet 700 Series
 device ixl: ether, ifnet, ifmedia, intrmap, stoeplitz
diff --git a/sys/dev/pci/if_ix.c b/sys/dev/pci/if_ix.c
index 98815b51d62..56d8e305dec 100644
--- a/sys/dev/pci/if_ix.c
+++ b/sys/dev/pci/if_ix.c
@@ -507,7 +507,7 @@ ixgbe_start(struct ifqueue *ifq)
         * hardware that this frame is available to transmit.
         */
        if (post)
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
+               IXGBE_WRITE_REG(&sc->hw, txr->tail,
                    txr->next_avail_desc);
 }
 
@@ -705,7 +705,7 @@ ixgbe_watchdog(struct ifnet * ifp)
        for (i = 0; i < sc->num_queues; i++, txr++) {
                printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, 
i,
                    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
-                   IXGBE_READ_REG(hw, IXGBE_TDT(i)));
+                   IXGBE_READ_REG(hw, sc->tx_rings[i].tail));
                printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
                    i, txr->next_to_clean);
        }
@@ -825,7 +825,7 @@ ixgbe_init(void *arg)
                                msec_delay(1);
                }
                IXGBE_WRITE_FLUSH(&sc->hw);
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
+               IXGBE_WRITE_REG(&sc->hw, rxr[i].tail, rxr->last_desc_filled);
        }
 
        /* Set up VLAN support and filter */
@@ -2360,9 +2360,12 @@ ixgbe_initialize_transmit_units(struct ix_softc *sc)
                IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
                    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
 
+               /* Set Tx Tail register */
+               txr->tail = IXGBE_TDT(i);
+
                /* Setup the HW Tx Head and Tail descriptor pointers */
                IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+               IXGBE_WRITE_REG(hw, txr->tail, 0);
 
                /* Setup Transmit Descriptor Cmd Settings */
                txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
@@ -2845,7 +2848,7 @@ ixgbe_rxrefill(void *xrxr)
 
        if (ixgbe_rxfill(rxr)) {
                /* Advance the Rx Queue "Tail Pointer" */
-               IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
+               IXGBE_WRITE_REG(&sc->hw, rxr->tail,
                    rxr->last_desc_filled);
        } else if (if_rxr_inuse(&rxr->rx_ring) == 0)
                timeout_add(&rxr->rx_refill, 1);
@@ -2941,6 +2944,9 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
                srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
                IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
 
+               /* Capture Rx Tail index */
+               rxr->tail = IXGBE_RDT(i);
+
                if (ISSET(ifp->if_xflags, IFXF_LRO)) {
                        rdrxctl = IXGBE_READ_REG(&sc->hw, IXGBE_RSCCTL(i));
 
@@ -2953,7 +2959,7 @@ ixgbe_initialize_receive_units(struct ix_softc *sc)
 
                /* Setup the HW Rx Head and Tail Descriptor Pointers */
                IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+               IXGBE_WRITE_REG(hw, rxr->tail, 0);
        }
 
        if (sc->hw.mac.type != ixgbe_mac_82598EB) {
diff --git a/sys/dev/pci/if_ix.h b/sys/dev/pci/if_ix.h
index 41d756110c7..5426cf9bc8a 100644
--- a/sys/dev/pci/if_ix.h
+++ b/sys/dev/pci/if_ix.h
@@ -82,7 +82,14 @@
  */
 #define IXGBE_TX_OP_THRESHOLD  (sc->num_segs + 2)
 
+/* These defines are used in MTU calculations */
 #define IXGBE_MAX_FRAME_SIZE   9216
+#define IXGBE_MTU_HDR         (ETHER_HDR_LEN + ETHER_CRC_LEN)
+#define IXGBE_MTU_HDR_VLAN    (ETHER_HDR_LEN + ETHER_CRC_LEN + \
+                              ETHER_VLAN_ENCAP_LEN)
+#define IXGBE_MAX_MTU         (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR)
+#define IXGBE_MAX_MTU_VLAN    (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN)
+
 
 /* Flow control constants */
 #define IXGBE_FC_PAUSE         0xFFFF
@@ -116,6 +123,8 @@
 #define IXGBE_BR_SIZE                  4096
 #define IXGBE_QUEUE_MIN_FREE           32
 
+#define IXGBE_EITR_DEFAULT              128
+
 /*
  * Interrupt Moderation parameters
  */
@@ -169,6 +178,7 @@ struct tx_ring {
        struct ix_softc         *sc;
        struct ifqueue          *ifq;
        uint32_t                me;
+       uint32_t                tail;
        uint32_t                watchdog_timer;
        union ixgbe_adv_tx_desc *tx_base;
        struct ixgbe_tx_buf     *tx_buffers;
@@ -194,6 +204,7 @@ struct rx_ring {
        struct ix_softc         *sc;
        struct ifiqueue         *ifiq;
        uint32_t                me;
+       uint32_t                tail;
        union ixgbe_adv_rx_desc *rx_base;
        struct ixgbe_dma_alloc  rxdma;
 #if 0
@@ -245,6 +256,7 @@ struct ix_softc {
        uint16_t                num_segs;
        uint32_t                link_speed;
        bool                    link_up;
+       bool                    link_enabled;
        uint32_t                linkvec;
        struct rwlock           sfflock;
 
diff --git a/sys/dev/pci/if_ixv.c b/sys/dev/pci/if_ixv.c
new file mode 100644
index 00000000000..5eaafe8fae3
--- /dev/null
+++ b/sys/dev/pci/if_ixv.c
@@ -0,0 +1,1637 @@
+/*     $OpenBSD$       */
+
+/******************************************************************************
+
+  Copyright (c) 2001-2017, Intel Corporation
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+   3. Neither the name of the Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <dev/pci/if_ix.h>
+#include <dev/pci/ixgbe_type.h>
+#include <dev/pci/ixgbe.h>
+
+/************************************************************************
+ * Driver version
+ ************************************************************************/
+char ixv_driver_version[] = "1.5.32";
+
+/************************************************************************
+ * PCI Device ID Table
+ *
+ *   Used by probe to select devices to load on
+ *   Last field stores an index into ixv_strings
+ *   Last entry must be all 0s
+ *
+ *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ ************************************************************************/
+const struct pci_matchid ixv_devices[] = {
+       {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599VF},
+       {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540_VF},
+       {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550_VF},
+       {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_VF},
+       {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_VF}
+};
+
+/************************************************************************
+ * Function prototypes
+ ************************************************************************/
+static int     ixv_probe(struct device *, void *, void *);
+static void    ixv_identify_hardware(struct ix_softc *sc);
+static void    ixv_attach(struct device *, struct device *, void *);
+static int     ixv_detach(struct device *, int);
+static int     ixv_ioctl(struct ifnet *, u_long, caddr_t);
+static void    ixv_watchdog(struct ifnet *);
+static void    ixv_init(struct ix_softc *);
+static void    ixv_stop(void *);
+static int     ixv_allocate_msix(struct ix_softc *);
+static void    ixv_setup_interface(struct device *, struct ix_softc *);
+static int     ixv_negotiate_api(struct ix_softc *);
+
+static void    ixv_initialize_transmit_units(struct ix_softc *);
+static void    ixv_initialize_receive_units(struct ix_softc *);
+static void    ixv_initialize_rss_mapping(struct ix_softc *);
+
+static void    ixv_enable_intr(struct ix_softc *);
+static void    ixv_disable_intr(struct ix_softc *);
+static void    ixv_set_multi(struct ix_softc *);
+static void    ixv_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
+static void    ixv_configure_ivars(struct ix_softc *);
+static uint8_t *ixv_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
+
+static void    ixv_setup_vlan_support(struct ix_softc *);
+static void    ixv_configure_vlan(struct ifnet *, uint16_t, uint16_t);
+
+/* The MSI-X Interrupt handlers */
+static int     ixv_msix_que(void *);
+static int     ixv_msix_mbx(void *);
+
+/* Share functions between ixv and ix. */
+void    ixgbe_start(struct ifqueue *ifq);
+int    ixgbe_activate(struct device *, int);
+int    ixgbe_allocate_queues(struct ix_softc *);
+int    ixgbe_setup_transmit_structures(struct ix_softc *);
+int    ixgbe_setup_receive_structures(struct ix_softc *);
+void   ixgbe_free_transmit_structures(struct ix_softc *);
+void   ixgbe_free_receive_structures(struct ix_softc *);
+int    ixgbe_txeof(struct tx_ring *);
+int    ixgbe_rxeof(struct rx_ring *);
+void   ixgbe_rxrefill(void *);
+void    ixgbe_update_link_status(struct ix_softc *);
+int     ixgbe_allocate_pci_resources(struct ix_softc *);
+void    ixgbe_free_pci_resources(struct ix_softc *);
+void   ixgbe_media_status(struct ifnet *, struct ifmediareq *);
+int    ixgbe_media_change(struct ifnet *);
+void   ixgbe_add_media_types(struct ix_softc *);
+int    ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
+int    ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
+
+#if NKSTAT > 0
+static void    ixv_kstats(struct ix_softc *);
+static void    ixv_rxq_kstats(struct ix_softc *, struct rx_ring *);
+static void    ixv_txq_kstats(struct ix_softc *, struct tx_ring *);
+static void    ixv_kstats_tick(void *);
+#endif
+
+/*********************************************************************
+ *  OpenBSD Device Interface Entry Points
+ *********************************************************************/
+
+struct cfdriver ixv_cd = {
+       NULL, "ixv", DV_IFNET
+};
+
+const struct cfattach ixv_ca = {
+       sizeof(struct ix_softc), ixv_probe, ixv_attach, ixv_detach,
+       ixgbe_activate
+};
+
+/*
+ * This checks for a zero mac addr, something that will be likely
+ * unless the Admin on the Host has created one.
+ */
+static inline bool
+ixv_check_ether_addr(uint8_t *addr)
+{
+       bool status = TRUE;
+
+       if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
+            addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
+               status = FALSE;
+
+       return (status);
+}
+
+/************************************************************************
+ * ixv_probe - Device identification routine
+ *
+ *   Determines if the driver should be loaded on
+ *   adapter based on its PCI vendor/device ID.
+ *
+ *   return BUS_PROBE_DEFAULT on success, positive on failure
+ ************************************************************************/
+static int
+ixv_probe(struct device *parent, void *match, void *aux)
+{
+       INIT_DEBUGOUT("ixv_probe: begin");
+
+       return (pci_matchbyid((struct pci_attach_args *)aux, ixv_devices,
+           nitems(ixv_devices)));
+}
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixv_identify_hardware(struct ix_softc *sc)
+{
+       struct ixgbe_osdep      *os = &sc->osdep;
+       struct pci_attach_args  *pa = &os->os_pa;
+       uint32_t                 reg;
+
+       /* Save off the information about this board */
+       sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
+       sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
+
+       reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
+       sc->hw.revision_id = PCI_REVISION(reg);
+
+       reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
+       sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
+       sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
+
+       /* Pick up the 82599 and VF settings */
+       if (sc->hw.mac.type != ixgbe_mac_82598EB)
+               sc->hw.phy.smart_speed = ixgbe_smart_speed_on;
+       sc->num_segs = IXGBE_82599_SCATTER;
+}
+
+/************************************************************************
+ * ixv_attach - Device initialization routine
+ *
+ *   Called when the driver is being loaded.
+ *   Identifies the type of hardware, allocates all resources
+ *   and initializes the hardware.
+ *
+ *   return 0 on success, positive on failure
+ ************************************************************************/
+static void
+ixv_attach(struct device *parent, struct device *self, void *aux)
+{
+       struct pci_attach_args  *pa = (struct pci_attach_args *)aux;
+       struct ix_softc         *sc = (struct ix_softc *)self;
+       struct ixgbe_hw *hw;
+       int     error;
+
+       INIT_DEBUGOUT("ixv_attach: begin");
+
+       sc->osdep.os_sc = sc;
+       sc->osdep.os_pa = *pa;
+
+       rw_init(&sc->sfflock, "ixvsff");
+
+       /* Allocate, clear, and link in our adapter structure */
+       sc->dev = *self;
+       sc->hw.back = sc;
+       hw = &sc->hw;
+
+       /* Indicate to RX setup to use Jumbo Clusters */
+       sc->num_tx_desc = DEFAULT_TXD;
+       sc->num_rx_desc = DEFAULT_RXD;
+
+       ixv_identify_hardware(sc);
+
+#if NKSTAT > 0
+       ixv_kstats(sc);
+#endif
+
+       /* Allocate multicast array memory */
+       sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
+           IXGBE_MAX_MULTICAST_ADDRESSES_VF, M_DEVBUF, M_NOWAIT);
+       if (sc->mta == NULL) {
+               printf("Can not allocate multicast setup array\n");
+               return;
+       }
+
+       /* Do base PCI setup - map BAR0 */
+       if (ixgbe_allocate_pci_resources(sc)) {
+               printf("ixgbe_allocate_pci_resources() failed!\n");
+               goto err_out;
+       }
+
+       /* Allocate our TX/RX Queues */
+       if (ixgbe_allocate_queues(sc)) {
+               printf("ixgbe_allocate_queues() failed!\n");
+               goto err_out;
+       }
+
+       /* A subset of set_mac_type */
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82599_VF:
+               hw->mac.type = ixgbe_mac_82599_vf;
+               break;
+       case IXGBE_DEV_ID_X540_VF:
+               hw->mac.type = ixgbe_mac_X540_vf;
+               break;
+       case IXGBE_DEV_ID_X550_VF:
+               hw->mac.type = ixgbe_mac_X550_vf;
+               break;
+       case IXGBE_DEV_ID_X550EM_X_VF:
+               hw->mac.type = ixgbe_mac_X550EM_x_vf;
+               break;
+       case IXGBE_DEV_ID_X550EM_A_VF:
+               hw->mac.type = ixgbe_mac_X550EM_a_vf;
+               break;
+       default:
+               /* Shouldn't get here since probe succeeded */
+               printf("Unknown device ID!\n");
+               goto err_out;
+       }
+
+       /* Initialize the shared code */
+       if (ixgbe_init_ops_vf(hw)) {
+               printf("ixgbe_init_ops_vf() failed!\n");
+               goto err_out;
+       }
+
+       /* Setup the mailbox */
+       ixgbe_init_mbx_params_vf(hw);
+
+       hw->mac.max_tx_queues = 4;
+       hw->mac.max_rx_queues = 4;
+
+       /* Set the right number of segments */
+       sc->num_segs = IXGBE_82599_SCATTER;
+
+       error = hw->mac.ops.reset_hw(hw);
+       switch (error) {
+       case 0:
+               break;
+       case IXGBE_ERR_RESET_FAILED:
+               printf("...reset_hw() failure: Reset Failed!\n");
+               goto err_out;
+       default:
+               printf("...reset_hw() failed with error %d\n",
+                   error);
+               goto err_out;
+       }
+
+       error = hw->mac.ops.init_hw(hw);
+       if (error) {
+               printf("...init_hw() failed with error %d\n",
+                   error);
+               goto err_out;
+       }
+
+       /* Negotiate mailbox API version */
+       if (ixv_negotiate_api(sc)) {
+               printf("Mailbox API negotiation failed during attach!\n");
+               goto err_out;
+       }
+
+       /* If no mac address was assigned, make a random one */
+       if (!ixv_check_ether_addr(hw->mac.addr)) {
+               uint8_t addr[ETHER_ADDR_LEN];
+               arc4random_buf(&addr, sizeof(addr));
+               addr[0] &= 0xFE;
+               addr[0] |= 0x02;
+               bcopy(addr, hw->mac.addr, sizeof(addr));
+               bcopy(addr, hw->mac.perm_addr, sizeof(addr));
+       }
+
+       bcopy(hw->mac.addr, sc->arpcom.ac_enaddr,
+           IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+       /* Setup OS specific network interface */
+       ixv_setup_interface(self, sc);
+
+       /* Setup MSI-X */
+       if (ixv_allocate_msix(sc)) {
+               printf("ixv_allocate_msix() failed!\n");
+               goto err_late;
+       }
+
+       /* Check if VF was disabled by PF */
+       if (hw->mac.ops.get_link_state(hw, &sc->link_enabled)) {
+               /* PF is not capable of controlling VF state. Enable the link. 
*/
+               sc->link_enabled = TRUE;
+       }
+
+       /* Set an initial default flow control value */
+       sc->fc = ixgbe_fc_full;
+
+       INIT_DEBUGOUT("ixv_attach: end");
+
+       return;
+
+err_late:
+       ixgbe_free_transmit_structures(sc);
+       ixgbe_free_receive_structures(sc);
+err_out:
+       ixgbe_free_pci_resources(sc);
+       free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
+            IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+} /* ixv_attach */
+
+/************************************************************************
+ * ixv_detach - Device removal routine
+ *
+ *   Called when the driver is being removed.
+ *   Stops the adapter and deallocates all the resources
+ *   that were allocated for driver operation.
+ *
+ *   return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixv_detach(struct device *self, int flags)
+{
+       struct ix_softc *sc = (struct ix_softc *)self;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+
+       INIT_DEBUGOUT("ixv_detach: begin");
+
+       ixv_stop(sc);
+       ether_ifdetach(ifp);
+       if_detach(ifp);
+
+       free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
+            IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+
+       ixgbe_free_pci_resources(sc);
+
+       ixgbe_free_transmit_structures(sc);
+       ixgbe_free_receive_structures(sc);
+
+       return (0);
+} /* ixv_detach */
+
+/*********************************************************************
+ *  Watchdog entry point
+ *
+ **********************************************************************/
+static void
+ixv_watchdog(struct ifnet * ifp)
+{
+       struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
+       struct tx_ring *txr = sc->tx_rings;
+       struct ixgbe_hw *hw = &sc->hw;
+       int             tx_hang = FALSE;
+       int             i;
+
+       /*
+        * The timer is set to 5 every time ixgbe_start() queues a packet.
+        * Anytime all descriptors are clean the timer is set to 0.
+        */
+       for (i = 0; i < sc->num_queues; i++, txr++) {
+               if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
+                       continue;
+               else {
+                       tx_hang = TRUE;
+                       break;
+               }
+       }
+       if (tx_hang == FALSE)
+               return;
+
+
+       printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
+       for (i = 0; i < sc->num_queues; i++, txr++) {
+               printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, 
i,
+                   IXGBE_READ_REG(hw, IXGBE_VFTDH(i)),
+                   IXGBE_READ_REG(hw, txr->tail));
+               printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
+                   i, txr->next_to_clean);
+       }
+       ifp->if_flags &= ~IFF_RUNNING;
+
+       ixv_init(sc);
+}
+
+/************************************************************************
+ * ixv_init - Init entry point
+ *
+ *   Used in two ways: It is used by the stack as an init entry
+ *   point in network interface structure. It is also used
+ *   by the driver as a hw/sw initialization routine to get
+ *   to a consistent state.
+ *
+ *   return 0 on success, positive on failure
+ ************************************************************************/
+void
+ixv_init(struct ix_softc *sc)
+{
+       struct ifnet    *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw *hw = &sc->hw;
+       struct ix_queue *que = sc->queues;
+       uint32_t        mask;
+       int             i, s, error = 0;
+
+       INIT_DEBUGOUT("ixv_init: begin");
+
+       s = splnet();
+
+       hw->adapter_stopped = FALSE;
+       hw->mac.ops.stop_adapter(hw);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       /* Get the latest mac address, User can use a LAA */
+       bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
+           IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
+
+       /* Prepare transmit descriptors and buffers */
+       if (ixgbe_setup_transmit_structures(sc)) {
+               printf("Could not setup transmit structures\n");
+               ixv_stop(sc);
+               splx(s);
+               return;
+       }
+
+       /* Reset VF and renegotiate mailbox API version */
+       hw->mac.ops.reset_hw(hw);
+       error = ixv_negotiate_api(sc);
+       if (error) {
+               printf("Mailbox API negotiation failed in init!\n");
+               splx(s);
+               return;
+       }
+
+       ixv_initialize_transmit_units(sc);
+
+       /* Setup Multicast table */
+       ixv_set_multi(sc);
+
+       /* Use 2k clusters, even for jumbo frames */
+       sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
+
+       /* Prepare receive descriptors and buffers */
+       if (ixgbe_setup_receive_structures(sc)) {
+               printf("Could not setup receive structures\n");
+               ixv_stop(sc);
+               splx(s);
+               return;
+       }
+
+       /* Configure RX settings */
+       ixv_initialize_receive_units(sc);
+
+       /* Set up VLAN offload and filter */
+       ixv_setup_vlan_support(sc);
+
+       /* Set up MSI-X routing */
+       ixv_configure_ivars(sc);
+
+       /* Set up auto-mask */
+       mask = (1 << sc->linkvec);
+       for (i = 0; i < sc->num_queues; i++, que++)
+               mask |= (1 << que->msix);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
+
+       /* Set moderation on the Link interrupt */
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(sc->linkvec),
+                       IXGBE_LINK_ITR);
+
+       /* Config/Enable Link */
+       error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
+       if (error) {
+               /* PF is not capable of controlling VF state. Enable the link. 
*/
+               sc->link_enabled = TRUE;
+       } else if (sc->link_enabled == FALSE)
+               printf("VF is disabled by PF\n");
+
+       hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
+           FALSE);
+
+       /* And now turn on interrupts */
+       ixv_enable_intr(sc);
+
+       /* Now inform the stack we're ready */
+       ifp->if_flags |= IFF_RUNNING;
+       for (i = 0; i < sc->num_queues; i++)
+               ifq_clr_oactive(ifp->if_ifqs[i]);
+
+       splx(s);
+} /* ixv_init */
+
+/*
+ * MSI-X Interrupt Handlers and Tasklets
+ */
+
+static inline void
+ixv_enable_queue(struct ix_softc *sc, uint32_t vector)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t             queue = 1 << vector;
+       uint32_t             mask;
+
+       mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+} /* ixv_enable_queue */
+
+static inline void
+ixv_disable_queue(struct ix_softc *sc, uint32_t vector)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint64_t             queue = (1ULL << vector);
+       uint32_t             mask;
+
+       mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
+} /* ixv_disable_queue */
+
+/************************************************************************
+ * ixv_msix_que - MSI Queue Interrupt Service routine
+ ************************************************************************/
+int
+ixv_msix_que(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct ix_softc *sc = que->sc;
+       struct ifnet    *ifp = &sc->arpcom.ac_if;
+       struct tx_ring  *txr = que->txr;
+       struct rx_ring  *rxr = que->rxr;
+
+       if ((ifp->if_flags & IFF_RUNNING) == 0)
+               return 1;
+
+       ixv_disable_queue(sc, que->msix);
+
+       ixgbe_rxeof(rxr);
+       ixgbe_txeof(txr);
+       ixgbe_rxrefill(rxr);
+
+       /* Reenable this interrupt */
+       ixv_enable_queue(sc, que->msix);
+
+       return 1;
+} /* ixv_msix_que */
+
+
+/************************************************************************
+ * ixv_msix_mbx
+ ************************************************************************/
+static int
+ixv_msix_mbx(void *arg)
+{
+       struct ix_softc  *sc = arg;
+       struct ixgbe_hw *hw = &sc->hw;
+
+       sc->hw.mac.get_link_status = TRUE;
+       KERNEL_LOCK();
+       ixgbe_update_link_status(sc);
+       KERNEL_UNLOCK();
+
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
+
+
+       return 1;
+} /* ixv_msix_mbx */
+
+/************************************************************************
+ * ixv_negotiate_api
+ *
+ *   Negotiate the Mailbox API with the PF;
+ *   start with the most featured API first.
+ ************************************************************************/
+static int
+ixv_negotiate_api(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int             mbx_api[] = { ixgbe_mbox_api_12,
+                                     ixgbe_mbox_api_11,
+                                     ixgbe_mbox_api_10,
+                                     ixgbe_mbox_api_unknown };
+       int             i = 0;
+
+       while (mbx_api[i] != ixgbe_mbox_api_unknown) {
+               if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
+                       return (0);
+               i++;
+       }
+
+       return (EINVAL);
+} /* ixv_negotiate_api */
+
+
+/************************************************************************
+ * ixv_set_multi - Multicast Update
+ *
+ *   Called whenever multicast address list is updated.
+ ************************************************************************/
+static void
+ixv_set_multi(struct ix_softc *sc)
+{
+       struct ifnet       *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw    *hw = &sc->hw;
+       struct arpcom      *ac = &sc->arpcom;
+       uint8_t            *mta, *update_ptr;
+       struct ether_multi *enm;
+       struct ether_multistep step;
+       int                xcast_mode, mcnt = 0;
+
+       IOCTL_DEBUGOUT("ixv_set_multi: begin");
+
+        mta = sc->mta;
+       bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+             IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+
+       ifp->if_flags &= ~IFF_ALLMULTI;
+       if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
+           ac->ac_multicnt > IXGBE_MAX_MULTICAST_ADDRESSES_VF) {
+               ifp->if_flags |= IFF_ALLMULTI;
+       } else {
+               ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
+               while (enm != NULL) {
+                       bcopy(enm->enm_addrlo,
+                             &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+                             IXGBE_ETH_LENGTH_OF_ADDRESS);
+                       mcnt++;
+
+                       ETHER_NEXT_MULTI(step, enm);
+               }
+
+               update_ptr = mta;
+               hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
+                                               ixv_mc_array_itr, TRUE);
+       }
+
+        /* request the most inclusive mode we need */
+        if (ISSET(ifp->if_flags, IFF_PROMISC))
+                xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
+        else if (ISSET(ifp->if_flags, IFF_ALLMULTI))
+                xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
+        else if (ISSET(ifp->if_flags, (IFF_BROADCAST | IFF_MULTICAST)))
+                xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+        else
+                xcast_mode = IXGBEVF_XCAST_MODE_NONE;
+
+        hw->mac.ops.update_xcast_mode(hw, xcast_mode);
+
+
+} /* ixv_set_multi */
+
+/************************************************************************
+ * ixv_mc_array_itr
+ *
+ *   An iterator function needed by the multicast shared code.
+ *   It feeds the shared code routine the addresses in the
+ *   array of ixv_set_multi() one by one.
+ ************************************************************************/
+static uint8_t *
+ixv_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
+{
+       uint8_t *mta = *update_ptr;
+
+       *vmdq = 0;
+       *update_ptr = mta + IXGBE_ETH_LENGTH_OF_ADDRESS;
+
+       return (mta);
+} /* ixv_mc_array_itr */
+
+/************************************************************************
+ * ixv_stop - Stop the hardware
+ *
+ *   Disables all traffic on the adapter by issuing a
+ *   global reset on the MAC and deallocates TX/RX buffers.
+ ************************************************************************/
+static void
+ixv_stop(void *arg)
+{
+       struct ix_softc  *sc = arg;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw *hw = &sc->hw;
+       int i;
+
+       INIT_DEBUGOUT("ixv_stop: begin\n");
+#if NKSTAT > 0
+       timeout_del(&sc->sc_kstat_tmo);
+#endif
+       ixv_disable_intr(sc);
+
+
+       /* Tell the stack that the interface is no longer active */
+       ifp->if_flags &= ~IFF_RUNNING;
+
+       hw->mac.ops.reset_hw(hw);
+       sc->hw.adapter_stopped = FALSE;
+       hw->mac.ops.stop_adapter(hw);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       intr_barrier(sc->tag);
+       for (i = 0; i < sc->num_queues; i++) {
+               struct ifqueue *ifq = ifp->if_ifqs[i];
+               ifq_barrier(ifq);
+               ifq_clr_oactive(ifq);
+
+               if (sc->queues[i].tag != NULL)
+                       intr_barrier(sc->queues[i].tag);
+               timeout_del(&sc->rx_rings[i].rx_refill);
+       }
+
+       KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
+
+       /* Should we really clear all structures on stop? */
+       ixgbe_free_transmit_structures(sc);
+       ixgbe_free_receive_structures(sc);
+
+       ixgbe_update_link_status(sc);
+} /* ixv_stop */
+
+/************************************************************************
+ * ixv_setup_interface
+ *
+ *   Setup networking device structure and register an interface.
+ ************************************************************************/
+static void
+ixv_setup_interface(struct device *dev, struct ix_softc *sc)
+{
+       struct ifnet *ifp;
+       int i;
+
+       ifp = &sc->arpcom.ac_if;
+
+       strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
+       ifp->if_softc = sc;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_xflags = IFXF_MPSAFE;
+       ifp->if_ioctl = ixv_ioctl;
+       ifp->if_qstart = ixgbe_start;
+       ifp->if_timer = 0;
+       ifp->if_watchdog = ixv_watchdog;
+       ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
+           ETHER_HDR_LEN - ETHER_CRC_LEN;
+       ifp->if_configure_vlan = ixv_configure_vlan;
+       ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
+
+       ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+#if NVLAN > 0
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+#endif
+
+       ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
+       ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
+       ifp->if_capabilities |= IFCAP_CSUM_IPv4;
+
+       if (sc->hw.mac.type != ixgbe_mac_82598EB)
+               ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
+
+
+       /*
+        * Specify the media types supported by this sc and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
+           ixgbe_media_status);
+       ixgbe_add_media_types(sc);
+       ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+       if_attach(ifp);
+       ether_ifattach(ifp);
+
+       if_attach_queues(ifp, sc->num_queues);
+       if_attach_iqueues(ifp, sc->num_queues);
+       for (i = 0; i < sc->num_queues; i++) {
+               struct ifqueue *ifq = ifp->if_ifqs[i];
+               struct ifiqueue *ifiq = ifp->if_iqs[i];
+               struct tx_ring *txr = &sc->tx_rings[i];
+               struct rx_ring *rxr = &sc->rx_rings[i];
+
+               ifq->ifq_softc = txr;
+               txr->ifq = ifq;
+
+               ifiq->ifiq_softc = rxr;
+               rxr->ifiq = ifiq;
+
+#if NKSTAT > 0
+               ixv_txq_kstats(sc, txr);
+               ixv_rxq_kstats(sc, rxr);
+#endif
+       }
+
+       sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
+} /* ixv_setup_interface */
+
+/************************************************************************
+ * ixv_initialize_transmit_units - Enable transmit unit.
+ ************************************************************************/
+static void
+ixv_initialize_transmit_units(struct ix_softc *sc)
+{
+        struct ifnet    *ifp = &sc->arpcom.ac_if;
+       struct tx_ring  *txr;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint64_t tdba;
+       uint32_t txctrl, txdctl;
+
+       for (int i = 0; i < sc->num_queues; i++, txr++) {
+               txr = &sc->tx_rings[i];
+               tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
+
+               /* Set WTHRESH to 8, burst writeback */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= (8 << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+               /* Set Tx Tail register */
+               txr->tail = IXGBE_VFTDT(i);
+
+               /* Set the HW Tx Head and Tail indices */
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(i), 0);
+               IXGBE_WRITE_REG(&sc->hw, txr->tail, 0);
+
+               /* Setup Transmit Descriptor Cmd Settings */
+               txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+               txr->watchdog_timer = 0;
+
+               /* Set Ring parameters */
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+                   (tdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+                   sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+               txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+               txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+
+               /* Now enable */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+       }
+        ifp->if_timer = 0;
+
+       return;
+} /* ixv_initialize_transmit_units */
+
+/************************************************************************
+ * ixv_initialize_rss_mapping
+ ************************************************************************/
+static void
+ixv_initialize_rss_mapping(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t             reta = 0, mrqc, rss_key[10];
+       int             queue_id;
+       int             i, j;
+
+        /* set up random bits */
+       stoeplitz_to_key(&rss_key, sizeof(rss_key));
+
+       /* Now fill out hash function seeds */
+       for (i = 0; i < 10; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+
+       /* Set up the redirection table */
+       for (i = 0, j = 0; i < 64; i++, j++) {
+               if (j == sc->num_queues)
+                       j = 0;
+
+               /*
+                * Fetch the RSS bucket id for the given indirection
+                * entry. Cap it at the number of configured buckets
+                * (which is num_queues.)
+                */
+               queue_id = queue_id % sc->num_queues;
+
+               /*
+                * The low 8 bits are for hash value (n+0);
+                * The next 8 bits are for hash value (n+1), etc.
+                */
+               reta >>= 8;
+               reta |= ((uint32_t)queue_id) << 24;
+               if ((i & 3) == 3) {
+                       IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
+                       reta = 0;
+               }
+       }
+
+        /*                                                                     
         * Disable UDP - IP fragments aren't currently being handled            
        * and so we end up with a mix of 2-tuple and 4-tuple                    
       * traffic.                                                               
      */
+       mrqc = IXGBE_MRQC_RSSEN
+               | IXGBE_MRQC_RSS_FIELD_IPV4
+               | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+               | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+               | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+               | IXGBE_MRQC_RSS_FIELD_IPV6
+               | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+       ;
+       IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
+} /* ixv_initialize_rss_mapping */
+
+
+/************************************************************************
+ * ixv_initialize_receive_units - Setup receive registers and features.
+ ************************************************************************/
+static void
+ixv_initialize_receive_units(struct ix_softc *sc)
+{
+       struct rx_ring  *rxr = sc->rx_rings;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t             bufsz, psrtype;
+
+       bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+       psrtype = IXGBE_PSRTYPE_TCPHDR
+               | IXGBE_PSRTYPE_UDPHDR
+               | IXGBE_PSRTYPE_IPV4HDR
+               | IXGBE_PSRTYPE_IPV6HDR
+               | IXGBE_PSRTYPE_L2HDR;
+
+       if (sc->num_queues > 1)
+               psrtype |= 1 << 29;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+       /* Tell PF our max_frame size */
+       if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
+               printf("There is a problem with the PF setup."
+                      "  It is likely the receive unit for this VF will not 
function correctly.\n");
+       }
+
+       for (int i = 0; i < sc->num_queues; i++, rxr++) {
+                uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
+               uint32_t reg, rxdctl;
+
+               /* Disable the queue */
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+               for (int j = 0; j < 10; j++) {
+                       if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               msec_delay(1);
+                       else
+                               break;
+               }
+
+               /* Setup the Base and Length of the Rx Descriptor Ring */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+                   (rdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+                   sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+               /* Capture Rx Tail index */
+               rxr->tail = IXGBE_VFRDT(rxr->me);
+
+               /* Reset the ring indices */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+               IXGBE_WRITE_REG(hw, rxr->tail, 0);
+
+               /* Set up the SRRCTL register */
+               reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+               reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+               reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+               reg |= bufsz;
+               reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+
+               /* Do the queue enabling last */
+               rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+               for (int k = 0; k < 10; k++) {
+                       if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               break;
+                       msec_delay(1);
+               }
+
+               /* Set the Tail Pointer */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
+                               sc->num_rx_desc - 1);
+       }
+
+       /*
+        * Do not touch RSS and RETA settings for older hardware
+        * as those are shared among PF and all VF.
+        */
+       if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
+               ixv_initialize_rss_mapping(sc);
+
+       return;
+} /* ixv_initialize_receive_units */
+
+/************************************************************************
+ * ixv_setup_vlan_support
+ ************************************************************************/
+static void
+ixv_setup_vlan_support(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t             ctrl, vid, vfta, retry;
+
+       /*
+        * We get here thru init, meaning
+        * a soft reset, this has already cleared
+        * the VFTA and other state, so if there
+        * have been no vlan's registered do nothing.
+        */
+       if (sc->num_vlans == 0) {
+               sc->vlan_stripping = 0;
+               return;
+       }
+       sc->vlan_stripping = 1;
+
+       /* Enable the queues */
+       for (int i = 0; i < sc->num_queues; i++) {
+               ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               ctrl |= IXGBE_RXDCTL_VME;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+               /*
+                * Let Rx path know that it needs to store VLAN tag
+                * as part of extra mbuf info.
+                */
+       }
+
+       /*
+        * A soft reset zero's out the VFTA, so
+        * we need to repopulate it now.
+        */
+       for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
+               if (sc->shadow_vfta[i] == 0)
+                       continue;
+               vfta = sc->shadow_vfta[i];
+               /*
+                * Reconstruct the vlan id's
+                * based on the bits set in each
+                * of the array ints.
+                */
+               for (int j = 0; j < 32; j++) {
+                       retry = 0;
+                       if ((vfta & (1 << j)) == 0)
+                               continue;
+                       vid = (i * 32) + j;
+                       /* Call the shared code mailbox routine */
+                       while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
+                               if (++retry > 5)
+                                       break;
+                       }
+               }
+       }
+} /* ixv_setup_vlan_support */
+
+static void
+ixv_configure_vlan(struct ifnet *ifp, uint16_t vtag_add, uint16_t vtag_del)
+{
+       struct ix_softc *sc = ifp->if_softc;
+       uint16_t            index, bit;
+
+       if ((vtag_add > 0) && (vtag_add < 4096)) {
+               index = (vtag_add >> 5) & 0x7F;
+               bit = vtag_add & 0x1F;
+               sc->shadow_vfta[index] |= (1 << bit);
+               ++sc->num_vlans;
+       }
+
+       if ((vtag_del > 0) && (vtag_del < 4096) && (sc->num_vlans > 0)) {
+               index = (vtag_del >> 5) & 0x7F;
+               bit = vtag_del & 0x1F;
+               sc->shadow_vfta[index] &= ~(1 << bit);
+               --sc->num_vlans;
+       }
+
+       /* Re-init to load the changes */
+       ixv_init(sc);
+} /* ixv_configure_vlan */
+
+/************************************************************************
+ * ixv_enable_intr
+ ************************************************************************/
+static void
+ixv_enable_intr(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       struct ix_queue *que = sc->queues;
+       uint32_t         mask;
+       int              i;
+
+       /* For VTEIAC */
+       mask = (1 << sc->linkvec);
+       for (i = 0; i < sc->num_queues; i++, que++)
+               mask |= (1 << que->msix);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+
+       /* For VTEIMS */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
+       que = sc->queues;
+       for (i = 0; i < sc->num_queues; i++, que++)
+               ixv_enable_queue(sc, que->msix);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       return;
+} /* ixv_enable_intr */
+
+/************************************************************************
+ * ixv_disable_intr
+ ************************************************************************/
+static void
+ixv_disable_intr(struct ix_softc *sc)
+{
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
+       IXGBE_WRITE_FLUSH(&sc->hw);
+
+       return;
+} /* ixv_disable_intr */
+
+/************************************************************************
+ * ixv_set_ivar
+ *
+ *   Setup the correct IVAR register for a particular MSI-X interrupt
+ *    - entry is the register array entry
+ *    - vector is the MSI-X vector for this queue
+ *    - type is RX/TX/MISC
+ ************************************************************************/
+static void
+ixv_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t             ivar, index;
+
+       vector |= IXGBE_IVAR_ALLOC_VAL;
+
+       if (type == -1) { /* MISC IVAR */
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+               ivar &= ~0xFF;
+               ivar |= vector;
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+       } else {          /* RX/TX IVARS */
+               index = (16 * (entry & 1)) + (8 * type);
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
+               ivar &= ~(0xFF << index);
+               ivar |= (vector << index);
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
+       }
+} /* ixv_set_ivar */
+
+/************************************************************************
+ * ixv_configure_ivars
+ ************************************************************************/
+static void
+ixv_configure_ivars(struct ix_softc *sc)
+{
+       struct ix_queue *que = sc->queues;
+
+       for (int i = 0; i < sc->num_queues; i++, que++) {
+               /* First the RX queue entry */
+               ixv_set_ivar(sc, i, que->msix, 0);
+               /* ... and the TX */
+               ixv_set_ivar(sc, i, que->msix, 1);
+               /* Set an initial value in EITR */
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
+                   IXGBE_EITR_DEFAULT);
+       }
+
+       /* For the mailbox interrupt */
+       ixv_set_ivar(sc, 1, sc->linkvec, -1);
+} /* ixv_configure_ivars */
+
+/************************************************************************
+ * ixv_ioctl - Ioctl entry point
+ *
+ *   Called when the user wants to configure the interface.
+ *
+ *   return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+       struct ix_softc *sc = ifp->if_softc;
+       struct ifreq   *ifr = (struct ifreq *)data;
+       int             s, error = 0;
+
+       s = splnet();
+
+       switch (command) {
+       case SIOCSIFADDR:
+               IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
+               ifp->if_flags |= IFF_UP;
+               if (!(ifp->if_flags & IFF_RUNNING))
+                       ixv_init(sc);
+               break;
+
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+               if (ifp->if_flags & IFF_UP) {
+                       if (ifp->if_flags & IFF_RUNNING)
+                               error = ENETRESET;
+                       else
+                               ixv_init(sc);
+               } else {
+                       if (ifp->if_flags & IFF_RUNNING)
+                               ixv_stop(sc);
+               }
+               break;
+
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+               break;
+
+       case SIOCGIFRXR:
+               error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
+               break;
+
+       case SIOCGIFSFFPAGE:
+               error = rw_enter(&sc->sfflock, RW_WRITE|RW_INTR);
+               if (error != 0)
+                       break;
+
+               error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
+               rw_exit(&sc->sfflock);
+               break;
+
+       default:
+               error = ether_ioctl(ifp, &sc->arpcom, command, data);
+       }
+
+       if (error == ENETRESET) {
+               if (ifp->if_flags & IFF_RUNNING) {
+                       ixv_disable_intr(sc);
+                       ixv_set_multi(sc);
+                       ixv_enable_intr(sc);
+               }
+               error = 0;
+       }
+
+       splx(s);
+       return (error);
+} /* ixv_ioctl */
+
+/************************************************************************
+ * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
+ ************************************************************************/
+static int
+ixv_allocate_msix(struct ix_softc *sc)
+{
+        struct ixgbe_osdep      *os = &sc->osdep;
+        struct pci_attach_args  *pa  = &os->os_pa;
+       int                      i = 0, error = 0;
+       struct ix_queue         *que;
+       pci_intr_handle_t       ih;
+       pcireg_t                reg, msix_ctrl;
+
+       for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
+               if (pci_intr_map_msix(pa, i, &ih)) {
+                       printf("ixv_allocate_msix: "
+                              "pci_intr_map_msix vec %d failed\n", i);
+                       error = ENOMEM;
+                       goto fail;
+               }
+
+               que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
+                       IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
+                       ixv_msix_que, que, que->name);
+               if (que->tag == NULL) {
+                       printf("ixv_allocate_msix: "
+                              "pci_intr_establish vec %d failed\n", i);
+                       error = ENOMEM;
+                       goto fail;
+               }
+
+               que->msix = i;
+       }
+
+       /* and Mailbox */
+       if (pci_intr_map_msix(pa, i, &ih)) {
+               printf("ixgbe_allocate_msix: "
+                      "pci_intr_map_msix mbox vector failed\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
+                       ixv_msix_mbx, sc, sc->dev.dv_xname);
+       if (sc->tag == NULL) {
+               printf("ixv_allocate_msix: "
+                      "pci_intr_establish mbox vector failed\n");
+               error = ENOMEM;
+               goto fail;
+       }
+       sc->linkvec = i;
+
+       /*
+        * Due to a broken design QEMU will fail to properly
+        * enable the guest for MSI-X unless the vectors in
+        * the table are all set up, so we must rewrite the
+        * ENABLE in the MSI-X control register again at this
+        * point to cause it to successfully initialize us.
+        */
+       if (sc->hw.mac.type == ixgbe_mac_82599_vf &&
+           pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, NULL, 
&reg)) {
+               reg += PCIR_MSIX_CTRL;
+               msix_ctrl = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
+               msix_ctrl |= PCI_MSIX_MC_MSIXE;
+               pci_conf_write(pa->pa_pc, pa->pa_tag, msix_ctrl, reg);
+       }
+
+       printf(", %s, %d queue%s\n", pci_intr_string(pa->pa_pc, ih),
+           i, (i > 1) ? "s" : "");
+
+       return (0);
+
+fail:
+       for (que = sc->queues; i > 0; i--, que++) {
+               if (que->tag == NULL)
+                       continue;
+               pci_intr_disestablish(pa->pa_pc, que->tag);
+               que->tag = NULL;
+       }
+       return (error);
+} /* ixv_allocate_msix */
+
+#if NKSTAT > 0
+enum ixv_counter_idx {
+       ixv_good_packets_received_count,
+       ixv_good_packets_transmitted_count,
+       ixv_good_octets_received_count,
+       ixv_good_octets_transmitted_count,
+       ixv_multicast_packets_received_count,
+
+       ixv_counter_num,
+};
+
+CTASSERT(KSTAT_KV_U_PACKETS <= 0xff);
+CTASSERT(KSTAT_KV_U_BYTES <= 0xff);
+
+struct ixv_counter {
+       char                     name[KSTAT_KV_NAMELEN];
+       uint32_t                 reg;
+       uint8_t                  width;
+       uint8_t                  unit;
+};
+
+static const struct ixv_counter ixv_counters[ixv_counter_num] = {
+       [ixv_good_packets_received_count] = { "rx good",  IXGBE_VFGPRC, 32, 
KSTAT_KV_U_PACKETS },
+       [ixv_good_packets_transmitted_count] = { "tx good",  IXGBE_VFGPTC, 32, 
KSTAT_KV_U_PACKETS },
+       [ixv_good_octets_received_count] = { "rx total",  IXGBE_VFGORC_LSB, 36, 
KSTAT_KV_U_BYTES },
+       [ixv_good_octets_transmitted_count] = { "tx total",  IXGBE_VFGOTC_LSB, 
36, KSTAT_KV_U_BYTES },
+       [ixv_multicast_packets_received_count] = { "rx mcast",  IXGBE_VFMPRC, 
32, KSTAT_KV_U_PACKETS },
+};
+
+struct ixv_rxq_kstats {
+       struct kstat_kv qprc;
+       struct kstat_kv qbrc;
+       struct kstat_kv qprdc;
+};
+
+static const struct ixv_rxq_kstats ixv_rxq_kstats_tpl = {
+       KSTAT_KV_UNIT_INITIALIZER("packets",
+           KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+       KSTAT_KV_UNIT_INITIALIZER("bytes",
+           KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
+       KSTAT_KV_UNIT_INITIALIZER("qdrops",
+           KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+};
+
+struct ixv_txq_kstats {
+       struct kstat_kv qptc;
+       struct kstat_kv qbtc;
+};
+
+static const struct ixv_txq_kstats ixv_txq_kstats_tpl = {
+       KSTAT_KV_UNIT_INITIALIZER("packets",
+           KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+       KSTAT_KV_UNIT_INITIALIZER("bytes",
+           KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
+};
+
+static int     ixv_kstats_read(struct kstat *ks);
+static int     ixv_rxq_kstats_read(struct kstat *ks);
+static int     ixv_txq_kstats_read(struct kstat *ks);
+
+static void
+ixv_kstats(struct ix_softc *sc)
+{
+       struct kstat *ks;
+       struct kstat_kv *kvs;
+       unsigned int i;
+
+       mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
+       timeout_set(&sc->sc_kstat_tmo, ixv_kstats_tick, sc);
+
+       ks = kstat_create(sc->dev.dv_xname, 0, "ixv-stats", 0,
+           KSTAT_T_KV, 0);
+       if (ks == NULL)
+               return;
+
+       kvs = mallocarray(nitems(ixv_counters), sizeof(*kvs),
+           M_DEVBUF, M_WAITOK|M_ZERO);
+
+       for (i = 0; i < nitems(ixv_counters); i++) {
+               const struct ixv_counter *ixc = &ixv_counters[i];
+
+               kstat_kv_unit_init(&kvs[i], ixc->name,
+                   KSTAT_KV_T_COUNTER64, ixc->unit);
+       }
+
+       kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+       ks->ks_softc = sc;
+       ks->ks_data = kvs;
+       ks->ks_datalen = nitems(ixv_counters) * sizeof(*kvs);
+       ks->ks_read = ixv_kstats_read;
+
+       sc->sc_kstat = ks;
+       kstat_install(ks);
+}
+
+static void
+ixv_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr)
+{
+       struct ixv_rxq_kstats *stats;
+       struct kstat *ks;
+
+       ks = kstat_create(sc->dev.dv_xname, 0, "ixv-rxq", rxr->me,
+           KSTAT_T_KV, 0);
+       if (ks == NULL)
+               return;
+
+       stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
+       *stats = ixv_rxq_kstats_tpl;
+
+       kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+       ks->ks_softc = rxr;
+       ks->ks_data = stats;
+       ks->ks_datalen = sizeof(*stats);
+       ks->ks_read = ixv_rxq_kstats_read;
+
+       rxr->kstat = ks;
+       kstat_install(ks);
+}
+
+static void
+ixv_txq_kstats(struct ix_softc *sc, struct tx_ring *txr)
+{
+       struct ixv_txq_kstats *stats;
+       struct kstat *ks;
+
+       ks = kstat_create(sc->dev.dv_xname, 0, "ixv-txq", txr->me,
+           KSTAT_T_KV, 0);
+       if (ks == NULL)
+               return;
+
+       stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
+       *stats = ixv_txq_kstats_tpl;
+
+       kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+       ks->ks_softc = txr;
+       ks->ks_data = stats;
+       ks->ks_datalen = sizeof(*stats);
+       ks->ks_read = ixv_txq_kstats_read;
+
+       txr->kstat = ks;
+       kstat_install(ks);
+}
+
+/**********************************************************************
+ *
+ *  Update the board statistics counters.
+ *
+ **********************************************************************/
+
+static void
+ixv_kstats_tick(void *arg)
+{
+       struct ix_softc *sc = arg;
+       int i;
+
+       timeout_add_sec(&sc->sc_kstat_tmo, 1);
+
+       mtx_enter(&sc->sc_kstat_mtx);
+       ixv_kstats_read(sc->sc_kstat);
+       for (i = 0; i < sc->num_queues; i++) {
+               ixv_rxq_kstats_read(sc->rx_rings[i].kstat);
+               ixv_txq_kstats_read(sc->tx_rings[i].kstat);
+       }
+       mtx_leave(&sc->sc_kstat_mtx);
+}
+
+static uint64_t
+ixv_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
+{
+       uint64_t lo, hi;
+
+       lo = IXGBE_READ_REG(hw, loreg);
+       hi = IXGBE_READ_REG(hw, hireg);
+
+       return (((hi & 0xf) << 32) | lo);
+}
+
+static int
+ixv_kstats_read(struct kstat *ks)
+{
+       struct ix_softc *sc = ks->ks_softc;
+       struct kstat_kv *kvs = ks->ks_data;
+       struct ixgbe_hw *hw = &sc->hw;
+       unsigned int i;
+
+       for (i = 0; i < nitems(ixv_counters); i++) {
+               const struct ixv_counter *ixc = &ixv_counters[i];
+               uint32_t reg = ixc->reg;
+               uint64_t v;
+
+               if (reg == 0)
+                       continue;
+
+               if (ixc->width > 32) {
+                       if (sc->hw.mac.type == ixgbe_mac_82598EB)
+                               v = IXGBE_READ_REG(hw, reg + 4);
+                       else
+                               v = ixv_read36(hw, reg, reg + 4);
+               } else
+                       v = IXGBE_READ_REG(hw, reg);
+
+               kstat_kv_u64(&kvs[i]) = v;
+       }
+
+       getnanouptime(&ks->ks_updated);
+
+       return (0);
+}
+
+int
+ixv_rxq_kstats_read(struct kstat *ks)
+{
+       struct ixv_rxq_kstats *stats = ks->ks_data;
+       struct rx_ring *rxr = ks->ks_softc;
+       struct ix_softc *sc = rxr->sc;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t i = rxr->me;
+
+       kstat_kv_u64(&stats->qprc) += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               kstat_kv_u64(&stats->qprdc) +=
+                   IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+               kstat_kv_u64(&stats->qbrc) +=
+                   IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+       } else {
+               kstat_kv_u64(&stats->qprdc) +=
+                   IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               kstat_kv_u64(&stats->qbrc) +=
+                   ixv_read36(hw, IXGBE_QBRC_L(i), IXGBE_QBRC_H(i));
+       }
+
+       getnanouptime(&ks->ks_updated);
+
+       return (0);
+}
+
+int
+ixv_txq_kstats_read(struct kstat *ks)
+{
+       struct ixv_txq_kstats *stats = ks->ks_data;
+       struct tx_ring *txr = ks->ks_softc;
+       struct ix_softc *sc = txr->sc;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t i = txr->me;
+
+       kstat_kv_u64(&stats->qptc) += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               kstat_kv_u64(&stats->qbtc) +=
+                   IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+       } else {
+               kstat_kv_u64(&stats->qbtc) +=
+                   ixv_read36(hw, IXGBE_QBTC_L(i), IXGBE_QBTC_H(i));
+       }
+
+       getnanouptime(&ks->ks_updated);
+
+       return (0);
+}
+#endif /* NKVSTAT > 0 */
diff --git a/sys/dev/pci/ixgbe.c b/sys/dev/pci/ixgbe.c
index 0fd3b9b8fbf..b395715ed27 100644
--- a/sys/dev/pci/ixgbe.c
+++ b/sys/dev/pci/ixgbe.c
@@ -70,7 +70,6 @@ int32_t prot_autoc_write_generic(struct ixgbe_hw *, uint32_t, 
bool);
 /* MBX */
 int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t mbx_id);
 int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t mbx_id);
-uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw);
 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask,
                               int32_t index);
 int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number);
@@ -4473,7 +4472,6 @@ void ixgbe_enable_rx(struct ixgbe_hw *hw)
 int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, 
uint16_t mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       int32_t ret_val = IXGBE_ERR_MBX;
 
        DEBUGFUNC("ixgbe_read_mbx");
 
@@ -4482,7 +4480,40 @@ int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t 
*msg, uint16_t size, uint16
                size = mbx->size;
 
        if (mbx->ops.read)
-               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+           return mbx->ops.read(hw, msg, size, mbx_id);
+
+       return IXGBE_ERR_CONFIG;
+}
+
+/**
+ * ixgbe_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+int32_t ixgbe_poll_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
+                      uint16_t mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       int32_t ret_val;
+
+       DEBUGFUNC("ixgbe_poll_mbx");
+
+       if (!mbx->ops.read || !mbx->ops.check_for_msg ||
+           !mbx->timeout)
+               return IXGBE_ERR_CONFIG;
+
+       /* limit read to size of mailbox */
+       if (size > mbx->size)
+               size = mbx->size;
+
+       ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+       /* if ack received read message, otherwise we timed out */
+       if (!ret_val)
+               return mbx->ops.read(hw, msg, size, mbx_id);
 
        return ret_val;
 }
@@ -4499,15 +4530,25 @@ int32_t ixgbe_read_mbx(struct ixgbe_hw *hw, uint32_t 
*msg, uint16_t size, uint16
 int32_t ixgbe_write_mbx(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size, 
uint16_t mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       int32_t ret_val = IXGBE_SUCCESS;
+       int32_t ret_val = IXGBE_ERR_MBX;
 
        DEBUGFUNC("ixgbe_write_mbx");
 
-       if (size > mbx->size)
-               ret_val = IXGBE_ERR_MBX;
+       /*
+        * exit if either we can't write, release
+        * or there is no timeout defined
+        */
+       if (!mbx->ops.write || !mbx->ops.check_for_ack ||
+           !mbx->ops.release || !mbx->timeout)
+               return IXGBE_ERR_CONFIG;
 
-       else if (mbx->ops.write)
+       if (size > mbx->size) {
+               ret_val = IXGBE_ERR_PARAM;
+               ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+                            "Invalid mailbox message size %u", size);
+       } else {
                ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+       }
 
        return ret_val;
 }
@@ -4587,7 +4628,7 @@ int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, uint16_t 
mbx_id)
        DEBUGFUNC("ixgbe_poll_for_msg");
 
        if (!countdown || !mbx->ops.check_for_msg)
-               goto out;
+               return IXGBE_ERR_CONFIG;
 
        while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
                countdown--;
@@ -4596,12 +4637,13 @@ int32_t ixgbe_poll_for_msg(struct ixgbe_hw *hw, 
uint16_t mbx_id)
                usec_delay(mbx->usec_delay);
        }
 
-       if (countdown == 0)
+       if (countdown == 0) {
                ERROR_REPORT2(IXGBE_ERROR_POLLING,
-                          "Polling for VF%d mailbox message timedout", mbx_id);
+                          "Polling for VF%u mailbox message timedout", mbx_id);
+               return IXGBE_ERR_TIMEOUT;
+       }
 
-out:
-       return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+       return IXGBE_SUCCESS;
 }
 
 /**
@@ -4619,7 +4661,7 @@ int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, uint16_t 
mbx_id)
        DEBUGFUNC("ixgbe_poll_for_ack");
 
        if (!countdown || !mbx->ops.check_for_ack)
-               goto out;
+               return IXGBE_ERR_CONFIG;
 
        while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
                countdown--;
@@ -4628,12 +4670,180 @@ int32_t ixgbe_poll_for_ack(struct ixgbe_hw *hw, 
uint16_t mbx_id)
                usec_delay(mbx->usec_delay);
        }
 
-       if (countdown == 0)
+       if (countdown == 0) {
                ERROR_REPORT2(IXGBE_ERROR_POLLING,
-                            "Polling for VF%d mailbox ack timedout", mbx_id);
+                            "Polling for VF%u mailbox ack timedout", mbx_id);
+               return IXGBE_ERR_TIMEOUT;
+       }
 
-out:
-       return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mailbox_vf - read VF's mailbox register
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
+ **/
+static uint32_t ixgbe_read_mailbox_vf(struct ixgbe_hw *hw)
+{
+       uint32_t vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+       vf_mailbox |= hw->mbx.vf_mailbox;
+       hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+       return vf_mailbox;
+}
+
+static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw)
+{
+       uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
+
+       if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+               hw->mbx.stats.reqs++;
+               hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+       }
+}
+
+static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw)
+{
+       uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
+
+       if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+               hw->mbx.stats.acks++;
+               hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+       }
+}
+
+static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw)
+{
+       uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
+
+       if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+               hw->mbx.stats.rsts++;
+               hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+                                       IXGBE_VFMAILBOX_RSTD);
+       }
+}
+
+/**
+ * ixgbe_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static int32_t ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, uint32_t mask)
+{
+       uint32_t vf_mailbox = ixgbe_read_mailbox_vf(hw);
+
+       if (vf_mailbox & mask)
+               return IXGBE_SUCCESS;
+
+       return IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static int32_t ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+       DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS))
+               return IXGBE_SUCCESS;
+
+       return IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static int32_t ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+       DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+               /* TODO: should this be autocleared? */
+               ixgbe_clear_ack_vf(hw);
+               return IXGBE_SUCCESS;
+       }
+
+       return IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+static int32_t ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+       DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI |
+                                         IXGBE_VFMAILBOX_RSTD)) {
+               /* TODO: should this be autocleared? */
+               ixgbe_clear_rst_vf(hw);
+               return IXGBE_SUCCESS;
+       }
+
+       return IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static int32_t ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+       int32_t ret_val = IXGBE_ERR_MBX;
+       uint32_t vf_mailbox;
+
+       DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+       if (!mbx->timeout)
+               return IXGBE_ERR_CONFIG;
+
+       while (countdown--) {
+               /* Reserve mailbox for VF use */
+               vf_mailbox = ixgbe_read_mailbox_vf(hw);
+               vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+               IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+               /* Verify that VF is the owner of the lock */
+               if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+                       ret_val = IXGBE_SUCCESS;
+                       break;
+               }
+
+               /* Wait a bit before trying again */
+               usec_delay(mbx->usec_delay);
+       }
+
+       if (ret_val != IXGBE_SUCCESS) {
+               ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+                               "Failed to obtain mailbox lock");
+               ret_val = IXGBE_ERR_TIMEOUT;
+       }
+
+       return ret_val;
 }
 
 /**
@@ -4711,34 +4921,14 @@ void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
        mbx->ops.write_posted = ixgbe_write_posted_mbx;
 }
 
-/**
- *  ixgbe_read_v2p_mailbox - read v2p mailbox
- *  @hw: pointer to the HW structure
- *
- *  This function is used to read the v2p mailbox without losing the read to
- *  clear status bits.
- **/
-uint32_t ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
-{
-       uint32_t v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
-       v2p_mailbox |= hw->mbx.v2p_mailbox;
-       hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
-
-       return v2p_mailbox;
-}
-
 int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, uint32_t mask, int32_t 
index)
 {
-       uint32_t mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
-       int32_t ret_val = IXGBE_ERR_MBX;
+       uint32_t pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
 
-       if (mbvficr & mask) {
-               ret_val = IXGBE_SUCCESS;
-               IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
-       }
+       if (pfmbicr & mask)
+               return IXGBE_SUCCESS;
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -4748,21 +4938,47 @@ int32_t ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, 
uint32_t mask, int32_t index
  *
  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
  **/
-int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, uint16_t vf_id)
 {
-       int32_t ret_val = IXGBE_ERR_MBX;
-       int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
-       uint32_t vf_bit = vf_number % 16;
-
+       uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+       int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
        DEBUGFUNC("ixgbe_check_for_msg_pf");
 
-       if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
-                                   index)) {
-               ret_val = IXGBE_SUCCESS;
+       if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift,
+                                   index))
+               return IXGBE_SUCCESS;
+
+       return IXGBE_ERR_MBX;
+}
+
+static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, uint16_t vf_id)
+{
+       uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+       int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
+       uint32_t pfmbicr;
+
+       pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+       if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
                hw->mbx.stats.reqs++;
-       }
 
-       return ret_val;
+       IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+                       IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
+}
+
+static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, uint16_t vf_id)
+{
+       uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+       int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
+       uint32_t pfmbicr;
+
+       pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+       if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
+               hw->mbx.stats.acks++;
+
+       IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+                       IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
 }
 
 /**
@@ -4772,18 +4988,19 @@ int32_t ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, 
uint16_t vf_number)
  *
  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
  **/
-int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, uint16_t vf_id)
 {
+       uint32_t vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+       int32_t index = IXGBE_PFMBICR_INDEX(vf_id);
        int32_t ret_val = IXGBE_ERR_MBX;
-       int32_t index = IXGBE_MBVFICR_INDEX(vf_number);
-       uint32_t vf_bit = vf_number % 16;
 
        DEBUGFUNC("ixgbe_check_for_ack_pf");
 
-       if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+       if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift,
                                    index)) {
                ret_val = IXGBE_SUCCESS;
-               hw->mbx.stats.acks++;
+               /* TODO: should this be autocleared? */
+               ixgbe_clear_ack_pf(hw, vf_id);
        }
 
        return ret_val;
@@ -4796,24 +5013,24 @@ int32_t ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, 
uint16_t vf_number)
  *
  *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
  **/
-int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, uint16_t vf_id)
 {
-       uint32_t reg_offset = (vf_number < 32) ? 0 : 1;
-       uint32_t vf_shift = vf_number % 32;
-       uint32_t vflre = 0;
+       uint32_t vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id);
+       uint32_t index = IXGBE_PFVFLRE_INDEX(vf_id);
        int32_t ret_val = IXGBE_ERR_MBX;
+       uint32_t vflre = 0;
 
        DEBUGFUNC("ixgbe_check_for_rst_pf");
 
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
-               vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+               vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index));
                break;
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
        case ixgbe_mac_X550EM_a:
        case ixgbe_mac_X540:
-               vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+               vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
                break;
        default:
                break;
@@ -4821,7 +5038,7 @@ int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, 
uint16_t vf_number)
 
        if (vflre & (1 << vf_shift)) {
                ret_val = IXGBE_SUCCESS;
-               IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
                hw->mbx.stats.rsts++;
        }
 
@@ -4835,28 +5052,61 @@ int32_t ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, 
uint16_t vf_number)
  *
  *  return SUCCESS if we obtained the mailbox lock
  **/
-int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_number)
+int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_id)
 {
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
        int32_t ret_val = IXGBE_ERR_MBX;
-       uint32_t p2v_mailbox;
+       uint32_t pf_mailbox;
 
        DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
 
-       /* Take ownership of the buffer */
-       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+       if (!mbx->timeout)
+               return IXGBE_ERR_CONFIG;
 
-       /* reserve mailbox for vf use */
-       p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
-       if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
-               ret_val = IXGBE_SUCCESS;
-       else
-               ERROR_REPORT2(IXGBE_ERROR_POLLING,
-                          "Failed to obtain mailbox lock for VF%d", vf_number);
+       while (countdown--) {
+               /* Reserve mailbox for PF use */
+               pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+               pf_mailbox |= IXGBE_PFMAILBOX_PFU;
+               IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+               /* Verify that PF is the owner of the lock */
+               pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+               if (pf_mailbox & IXGBE_PFMAILBOX_PFU) {
+                       ret_val = IXGBE_SUCCESS;
+                       break;
+               }
 
+               /* Wait a bit before trying again */
+               usec_delay(mbx->usec_delay);
+       }
+
+       if (ret_val != IXGBE_SUCCESS) {
+               ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+                             "Failed to obtain mailbox lock");
+               ret_val = IXGBE_ERR_TIMEOUT;
+       }
 
        return ret_val;
 }
 
+/**
+ * ixgbe_release_mbx_lock_pf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ **/
+static void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, uint16_t vf_id)
+{
+       uint32_t pf_mailbox;
+
+       DEBUGFUNC("ixgbe_release_mbx_lock_pf");
+
+       /* Return ownership of the buffer */
+       pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+       pf_mailbox &= ~IXGBE_PFMAILBOX_PFU;
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+}
+
 /**
  *  ixgbe_write_mbx_pf - Places a message in the mailbox
  *  @hw: pointer to the HW structure
@@ -4867,35 +5117,79 @@ int32_t ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, 
uint16_t vf_number)
  *  returns SUCCESS if it successfully copied message into the buffer
  **/
 int32_t ixgbe_write_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
-                          uint16_t vf_number)
+                          uint16_t vf_id)
 {
+       uint32_t pf_mailbox;
        int32_t ret_val;
        uint16_t i;
 
        DEBUGFUNC("ixgbe_write_mbx_pf");
 
        /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
        if (ret_val)
-               goto out_no_write;
+               goto out;
 
        /* flush msg and acks as we are overwriting the message buffer */
-       ixgbe_check_for_msg_pf(hw, vf_number);
-       ixgbe_check_for_ack_pf(hw, vf_number);
+       ixgbe_clear_msg_pf(hw, vf_id);
+       ixgbe_clear_ack_pf(hw, vf_id);
 
        /* copy the caller specified message to the mailbox memory buffer */
        for (i = 0; i < size; i++)
-               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
 
-       /* Interrupt VF to tell it a message has been sent and release buffer*/
-       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+       /* Interrupt VF to tell it a message has been sent */
+       pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+       pf_mailbox |= IXGBE_PFMAILBOX_STS;
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+       /* if msg sent wait until we receive an ack */
+       ixgbe_poll_for_ack(hw, vf_id);
 
        /* update stats */
        hw->mbx.stats.msgs_tx++;
 
-out_no_write:
+out:
+       hw->mbx.ops.release(hw, vf_id);
+
        return ret_val;
+}
+
+/**
+ * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer.  The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static int32_t ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
+                                       uint16_t size, uint16_t vf_id)
+{
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_read_mbx_pf_legacy");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+       if (ret_val != IXGBE_SUCCESS)
+               return ret_val;
+
+       /* copy the message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
+
+       /* Acknowledge the message and release buffer */
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
 
+       return IXGBE_SUCCESS;
 }
 
 /**
@@ -4910,32 +5204,350 @@ out_no_write:
  *  a message due to a VF request so no polling for message is needed.
  **/
 int32_t ixgbe_read_mbx_pf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t size,
-                         uint16_t vf_number)
+                         uint16_t vf_id)
 {
+       uint32_t pf_mailbox;
        int32_t ret_val;
        uint16_t i;
 
        DEBUGFUNC("ixgbe_read_mbx_pf");
 
-       /* lock the mailbox to prevent pf/vf race condition */
-       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
-       if (ret_val)
-               goto out_no_read;
+       /* check if there is a message from VF */
+       ret_val = ixgbe_check_for_msg_pf(hw, vf_id);
+       if (ret_val != IXGBE_SUCCESS)
+               return IXGBE_ERR_MBX_NOMSG;
+
+       ixgbe_clear_msg_pf(hw, vf_id);
 
        /* copy the message to the mailbox memory buffer */
        for (i = 0; i < size; i++)
-               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
 
        /* Acknowledge the message and release buffer */
-       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+       pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+       pf_mailbox |= IXGBE_PFMAILBOX_ACK;
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
 
        /* update stats */
        hw->mbx.stats.msgs_rx++;
 
-out_no_read:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_release_mbx_lock_dummy - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+       DEBUGFUNC("ixgbe_release_mbx_lock_dummy");
+}
+
+/**
+ * ixgbe_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, uint16_t mbx_id)
+{
+       uint32_t vf_mailbox;
+
+       DEBUGFUNC("ixgbe_release_mbx_lock_vf");
+
+       /* Return ownership of the buffer */
+       vf_mailbox = ixgbe_read_mailbox_vf(hw);
+       vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
+                                        uint16_t size, uint16_t mbx_id)
+{
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_write_mbx_vf_legacy");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               return ret_val;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_check_for_msg_vf(hw, 0);
+       ixgbe_clear_msg_vf(hw);
+       ixgbe_check_for_ack_vf(hw, 0);
+       ixgbe_clear_ack_vf(hw);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* interrupt the PF to tell it a message has been sent */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t ixgbe_write_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg,
+                                 uint16_t size, uint16_t mbx_id)
+{
+       uint32_t vf_mailbox;
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_write_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_clear_msg_vf(hw);
+       ixgbe_clear_ack_vf(hw);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* interrupt the PF to tell it a message has been sent */
+       vf_mailbox = ixgbe_read_mailbox_vf(hw);
+       vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+       /* if msg sent wait until we receive an ack */
+       ixgbe_poll_for_ack(hw, mbx_id);
+
+out:
+       hw->mbx.ops.release(hw, mbx_id);
+
        return ret_val;
 }
 
+/**
+ * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+static int32_t ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
+                                       uint16_t size, uint16_t mbx_id)
+{
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_read_mbx_vf_legacy");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               return ret_val;
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+       /* Acknowledge receipt and release mailbox, then we're done */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+static int32_t ixgbe_read_mbx_vf(struct ixgbe_hw *hw, uint32_t *msg, uint16_t 
size,
+                                uint16_t mbx_id)
+{
+       uint32_t vf_mailbox;
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_read_mbx_vf");
+
+       /* check if there is a message from PF */
+       ret_val = ixgbe_check_for_msg_vf(hw, 0);
+       if (ret_val != IXGBE_SUCCESS)
+               return IXGBE_ERR_MBX_NOMSG;
+
+       ixgbe_clear_msg_vf(hw);
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+       /* Acknowledge receipt */
+       vf_mailbox = ixgbe_read_mailbox_vf(hw);
+       vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes single set the hw->mbx struct to correct values for vf mailbox
+ * Set of legacy functions is being used here
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+       mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.release = ixgbe_release_mbx_lock_dummy;
+       mbx->ops.read = ixgbe_read_mbx_vf_legacy;
+       mbx->ops.write = ixgbe_write_mbx_vf_legacy;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+       mbx->ops.clear = NULL;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+}
+
+/**
+ * ixgbe_upgrade_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+       mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.release = ixgbe_release_mbx_lock_vf;
+       mbx->ops.read = ixgbe_read_mbx_vf;
+       mbx->ops.write = ixgbe_write_mbx_vf;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+       mbx->ops.clear = NULL;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+}
+
+/**
+ * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, uint32_t *msg,
+                                        uint16_t size, uint16_t vf_id)
+{
+       int32_t ret_val;
+       uint16_t i;
+
+       DEBUGFUNC("ixgbe_write_mbx_pf_legacy");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+       if (ret_val)
+               return ret_val;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_check_for_msg_pf(hw, vf_id);
+       ixgbe_clear_msg_pf(hw, vf_id);
+       ixgbe_check_for_ack_pf(hw, vf_id);
+       ixgbe_clear_ack_pf(hw, vf_id);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
+
+       /* Interrupt VF to tell it a message has been sent and release buffer*/
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_mbx_pf - Clear Mailbox Memory
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Set VFMBMEM of given VF to 0x0.
+ **/
+static int32_t ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, uint16_t vf_id)
+{
+       uint16_t mbx_size = hw->mbx.size;
+       uint16_t i;
+
+       if (vf_id > 63)
+               return IXGBE_ERR_PARAM;
+
+       for (i = 0; i < mbx_size; ++i)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0);
+
+       return IXGBE_SUCCESS;
+}
+
 /**
  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
  *  @hw: pointer to the HW structure
@@ -4953,18 +5565,59 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
            hw->mac.type != ixgbe_mac_X540)
                return;
 
-       mbx->timeout = 0;
-       mbx->usec_delay = 0;
+       /* Initialize common mailbox settings */
+       mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+       mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       /* Initialize counters with zeroes */
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+
+       /* Initialize mailbox operations */
+       mbx->ops.release = ixgbe_release_mbx_lock_dummy;
+       mbx->ops.read = ixgbe_read_mbx_pf_legacy;
+       mbx->ops.write = ixgbe_write_mbx_pf_legacy;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+       mbx->ops.clear = ixgbe_clear_mbx_pf;
+}
+
+/**
+ * ixgbe_upgrade_mbx_params_pf - Upgrade initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Initializes the hw->mbx struct to new function set for improved
+ * stability and handling of messages.
+ */
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, uint16_t vf_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+   /* Ensure we are not calling this function from VF */
+       if (hw->mac.type != ixgbe_mac_82599EB &&
+           hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a &&
+           hw->mac.type != ixgbe_mac_X540)
+               return;
 
+       mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+       mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
        mbx->size = IXGBE_VFMAILBOX_SIZE;
 
+       mbx->ops.release = ixgbe_release_mbx_lock_pf;
        mbx->ops.read = ixgbe_read_mbx_pf;
        mbx->ops.write = ixgbe_write_mbx_pf;
-       mbx->ops.read_posted = ixgbe_read_posted_mbx;
-       mbx->ops.write_posted = ixgbe_write_posted_mbx;
        mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
        mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
        mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+       mbx->ops.clear = ixgbe_clear_mbx_pf;
 
        mbx->stats.msgs_tx = 0;
        mbx->stats.msgs_rx = 0;
diff --git a/sys/dev/pci/ixgbe.h b/sys/dev/pci/ixgbe.h
index 5fbfba93d50..dd80b891edf 100644
--- a/sys/dev/pci/ixgbe.h
+++ b/sys/dev/pci/ixgbe.h
@@ -284,6 +284,7 @@ int32_t ixgbe_clear_vmdq(struct ixgbe_hw *hw, uint32_t rar, 
uint32_t vmdq);
 int32_t ixgbe_init_uta_tables(struct ixgbe_hw *hw);
 
 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, uint8_t *addr, uint32_t vmdq);
+int32_t ixgbe_mta_vector(struct ixgbe_hw *hw, uint8_t *mc_addr);
 void ixgbe_set_mta(struct ixgbe_hw *hw, uint8_t *mc_addr);
 
 void ixgbe_disable_rx(struct ixgbe_hw *hw);
@@ -360,8 +361,37 @@ int32_t ixgbe_write_i2c_combined_generic(struct ixgbe_hw 
*, uint8_t addr, uint16
 int32_t ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *, uint8_t 
addr,
                                                  uint16_t reg, uint16_t val);
 
+/* Virtual Functions */
+int32_t ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+uint32_t ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+uint32_t ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+int32_t ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, uint8_t *mac_addr);
+int32_t ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                           bool autoneg_wait_to_complete);
+int32_t ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                           bool *link_up, bool autoneg_wait_to_complete);
+int32_t ixgbe_set_rar_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr, 
uint32_t vmdq,
+                    uint32_t enable_addr);
+int32_t ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t 
*addr);
+int32_t ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, uint8_t 
*mc_addr_list,
+                                uint32_t mc_addr_count, ixgbe_mc_addr_itr,
+                                bool clear);
+int32_t ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode);
+int32_t ixgbe_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state);
+int32_t ixgbe_set_vfta_vf(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
+                     bool vlan_on, bool vlvf_bypass);
+int32_t ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, uint16_t max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+                      unsigned int *default_tc);
+
 /* MBX */
 int32_t ixgbe_read_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
+int32_t ixgbe_poll_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
 int32_t ixgbe_write_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
 int32_t ixgbe_read_posted_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, 
uint16_t);
 int32_t ixgbe_write_posted_mbx(struct ixgbe_hw *, uint32_t *, uint16_t, 
uint16_t);
@@ -369,6 +399,10 @@ int32_t ixgbe_check_for_msg(struct ixgbe_hw *, uint16_t);
 int32_t ixgbe_check_for_ack(struct ixgbe_hw *, uint16_t);
 int32_t ixgbe_check_for_rst(struct ixgbe_hw *, uint16_t);
 void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *);
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *, uint16_t);
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *, uint16_t);
 
 #endif /* _IXGBE_H_ */
diff --git a/sys/dev/pci/ixgbe_type.h b/sys/dev/pci/ixgbe_type.h
index 4b4dcb737f2..b1a1a866da7 100644
--- a/sys/dev/pci/ixgbe_type.h
+++ b/sys/dev/pci/ixgbe_type.h
@@ -463,8 +463,14 @@ struct ixgbe_nvm_version {
 #define IXGBE_PFMAILBOX(_i)    (0x04B00 + (4 * (_i))) /* 64 total */
 /* 64 Mailboxes, 16 DW each */
 #define IXGBE_PFMBMEM(_i)      (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR_INDEX(_i)        ((_i) >> 4)
+#define IXGBE_PFMBICR_SHIFT(_i)        ((_i) % 16)
 #define IXGBE_PFMBICR(_i)      (0x00710 + (4 * (_i))) /* 4 total */
 #define IXGBE_PFMBIMR(_i)      (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFVFLRE(_i)      ((((_i) & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_PFVFLREC(_i)     (0x00700 + ((_i) * 4))
+#define IXGBE_PFVFLRE_INDEX(_i)        ((_i) >> 5)
+#define IXGBE_PFVFLRE_SHIFT(_i)        ((_i) % 32)
 #define IXGBE_VFRE(_i)         (0x051E0 + ((_i) * 4))
 #define IXGBE_VFTE(_i)         (0x08110 + ((_i) * 4))
 #define IXGBE_VMECM(_i)                (0x08790 + ((_i) * 4))
@@ -3949,6 +3955,7 @@ struct ixgbe_mac_operations {
        int32_t (*update_mc_addr_list)(struct ixgbe_hw *, uint8_t *, uint32_t,
                                   ixgbe_mc_addr_itr, bool clear);
        int32_t (*update_xcast_mode)(struct ixgbe_hw *, int);
+       int32_t (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
        int32_t (*enable_mc)(struct ixgbe_hw *);
        int32_t (*disable_mc)(struct ixgbe_hw *);
        int32_t (*clear_vfta)(struct ixgbe_hw *);
@@ -3968,7 +3975,7 @@ struct ixgbe_mac_operations {
        /* Manageability interface */
        void (*disable_rx)(struct ixgbe_hw *hw);
        void (*enable_rx)(struct ixgbe_hw *hw);
-  void (*stop_mac_link_on_d3)(struct ixgbe_hw *);
+       void (*stop_mac_link_on_d3)(struct ixgbe_hw *);
        void (*set_source_address_pruning)(struct ixgbe_hw *, bool,
                                           unsigned int);
        int32_t (*dmac_update_tcs)(struct ixgbe_hw *hw);
@@ -4093,7 +4100,7 @@ struct ixgbe_phy_info {
 };
 
 #define IXGBE_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX          -100
+#define IXGBE_MAX_MULTICAST_ADDRESSES_VF  30
 
 #define IXGBE_VFMAILBOX                0x002FC
 #define IXGBE_VFMBMEM          0x00200
@@ -4115,22 +4122,25 @@ struct ixgbe_phy_info {
 #define IXGBE_PFMAILBOX_PFU    0x00000008 /* PF owns the mailbox buffer */
 #define IXGBE_PFMAILBOX_RVFU   0x00000010 /* Reset VFU - used when VF stuck */
 
-#define IXGBE_MBVFICR_VFREQ_MASK       0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1                0x00000001 /* bit for VF 1 
message */
-#define IXGBE_MBVFICR_VFACK_MASK       0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1                0x00010000 /* bit for VF 1 ack 
*/
+#define IXGBE_PFMBICR_VFREQ_MASK       0x0000FFFF /* bits for VF messages */
+#define IXGBE_PFMBICR_VFREQ_VF1                0x00000001 /* bit for VF 1 
message */
+#define IXGBE_PFMBICR_VFACK_MASK       0xFFFF0000 /* bits for VF acks */
+#define IXGBE_PFMBICR_VFACK_VF1                0x00010000 /* bit for VF 1 ack 
*/
 
 
 /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
  * PF.  The reverse is TRUE if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
  */
-#define IXGBE_VT_MSGTYPE_ACK   0x80000000 /* Messages below or'd with
-                                           * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK  0x40000000 /* Messages below or'd with
-                                           * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS   0x20000000 /* Indicates that VF is still
-                                           * clear to send requests */
+#define IXGBE_VT_MSGTYPE_SUCCESS       0x80000000 /* Messages or'd with this
+                                                   * have succeeded
+                                                   */
+#define IXGBE_VT_MSGTYPE_FAILURE       0x40000000 /* Messages or'd with this
+                                                   * have failed
+                                                   */
+#define IXGBE_VT_MSGTYPE_CTS           0x20000000 /* Indicates that VF is still
+                                                   * clear to send requests
+                                                   */
 #define IXGBE_VT_MSGINFO_SHIFT 16
 /* bits 23:16 are used for extra info for certain messages */
 #define IXGBE_VT_MSGINFO_MASK  (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -4147,6 +4157,9 @@ enum ixgbe_pfvf_api_rev {
        ixgbe_mbox_api_11,      /* API version 1.1, linux/freebsd VF driver */
        ixgbe_mbox_api_12,      /* API version 1.2, linux/freebsd VF driver */
        ixgbe_mbox_api_13,      /* API version 1.3, linux/freebsd VF driver */
+       /* API 1.4 is being used in the upstream for IPsec */
+       ixgbe_mbox_api_14,      /* API version 1.4, linux/freebsd VF driver */
+       ixgbe_mbox_api_15,      /* API version 1.5, linux/freebsd VF driver */
        /* This value should always be last */
        ixgbe_mbox_api_unknown, /* indicates that API version is not known */
 };
@@ -4169,6 +4182,7 @@ enum ixgbe_pfvf_api_rev {
 #define IXGBE_VF_GET_RETA      0x0a    /* VF request for RETA */
 #define IXGBE_VF_GET_RSS_KEY   0x0b    /* get RSS key */
 #define IXGBE_VF_UPDATE_XCAST_MODE     0x0c
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
 
 /* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
 enum ixgbevf_xcast_modes {
@@ -4207,9 +4221,61 @@ enum ixgbevf_xcast_modes {
 #define IXGBE_VF_MBX_INIT_TIMEOUT      2000 /* number of retries on mailbox */
 #define IXGBE_VF_MBX_INIT_DELAY                500  /* microseconds between 
retries */
 
+#define IXGBE_VF_IRQ_CLEAR_MASK        7
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS     8
+
+#define IXGBE_VFCTRL           0x00000
+#define IXGBE_VFSTATUS         0x00008
+#define IXGBE_VFLINKS          0x00010
+#define IXGBE_VFFRTIMER                0x00048
+#define IXGBE_VFRXMEMWRAP      0x03190
+#define IXGBE_VTEICR           0x00100
+#define IXGBE_VTEICS           0x00104
+#define IXGBE_VTEIMS           0x00108
+#define IXGBE_VTEIMC           0x0010C
+#define IXGBE_VTEIAC           0x00110
+#define IXGBE_VTEIAM           0x00114
+#define IXGBE_VTEITR(x)                (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x)                (0x00120 + (4 * (x)))
+#define IXGBE_VTIVAR_MISC      0x00140
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * (x)))
+/* define IXGBE_VFPBACL  still says TBD in EAS */
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * (x)))
+#define IXGBE_VFPSRTYPE                0x00300
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * (x)))
+#define IXGBE_VFGPRC           0x0101C
+#define IXGBE_VFGPTC           0x0201C
+#define IXGBE_VFGORC_LSB       0x01020
+#define IXGBE_VFGORC_MSB       0x01024
+#define IXGBE_VFGOTC_LSB       0x02020
+#define IXGBE_VFGOTC_MSB       0x02024
+#define IXGBE_VFMPRC           0x01034
+#define IXGBE_VFMRQC           0x3000
+#define IXGBE_VFRSSRK(x)       (0x3100 + ((x) * 4))
+#define IXGBE_VFRETA(x)                (0x3200 + ((x) * 4))
 
 struct ixgbe_mbx_operations {
        void (*init_params)(struct ixgbe_hw *hw);
+       void (*release)(struct ixgbe_hw *, uint16_t);
        int32_t  (*read)(struct ixgbe_hw *, uint32_t *, uint16_t,  uint16_t);
        int32_t  (*write)(struct ixgbe_hw *, uint32_t *, uint16_t, uint16_t);
        int32_t  (*read_posted)(struct ixgbe_hw *, uint32_t *, uint16_t,  
uint16_t);
@@ -4217,6 +4283,7 @@ struct ixgbe_mbx_operations {
        int32_t  (*check_for_msg)(struct ixgbe_hw *, uint16_t);
        int32_t  (*check_for_ack)(struct ixgbe_hw *, uint16_t);
        int32_t  (*check_for_rst)(struct ixgbe_hw *, uint16_t);
+       int32_t  (*clear)(struct ixgbe_hw *, uint16_t);
 };
 
 struct ixgbe_mbx_stats {
@@ -4233,7 +4300,7 @@ struct ixgbe_mbx_info {
        struct ixgbe_mbx_stats stats;
        uint32_t timeout;
        uint32_t usec_delay;
-       uint32_t v2p_mailbox;
+       uint32_t vf_mailbox;
        uint16_t size;
 };
 
@@ -4307,6 +4374,9 @@ struct ixgbe_hw {
 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE          -38
 #define IXGBE_ERR_FW_RESP_INVALID              -39
 #define IXGBE_ERR_TOKEN_RETRY                  -40
+#define IXGBE_ERR_MBX                          -41
+#define IXGBE_ERR_MBX_NOMSG                    -42
+#define IXGBE_ERR_TIMEOUT                      -43
 
 #define IXGBE_NOT_IMPLEMENTED                  0x7FFFFFFF
 
diff --git a/sys/dev/pci/ixgbe_vf.c b/sys/dev/pci/ixgbe_vf.c
new file mode 100644
index 00000000000..f3cffdfcca6
--- /dev/null
+++ b/sys/dev/pci/ixgbe_vf.c
@@ -0,0 +1,799 @@
+/*     $OpenBSD$       */
+
+/******************************************************************************
+
+  Copyright (c) 2001-2017, Intel Corporation
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+   3. Neither the name of the Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived from
+      this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <dev/pci/ixgbe.h>
+#include <dev/pci/ixgbe_type.h>
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+   Dummy handlers.
+   They are called from ix driver code,
+   and there is nothing to do for VF.
+ */
+static uint64_t
+ixgbe_dummy_uint64_handler_vf(struct ixgbe_hw *hw)
+{
+       return 0;
+}
+
+static int32_t
+ixgbe_dummy_handler_vf(struct ixgbe_hw *hw)
+{
+       return 0;
+}
+
+static void
+ixgbe_dummy_void_handler_vf(struct ixgbe_hw *hw)
+{
+       return;
+}
+
+/**
+ * ixgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+int32_t ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+       /* MAC */
+       hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+       hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+       hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+       /* Cannot clear stats on VF */
+       hw->mac.ops.clear_hw_cntrs = NULL;
+       hw->mac.ops.get_media_type = NULL;
+       hw->mac.ops.get_supported_physical_layer =
+               ixgbe_dummy_uint64_handler_vf;
+       hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+       hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+       hw->mac.ops.get_bus_info = NULL;
+       hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version;
+
+       /* Link */
+       hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+       hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+       hw->mac.ops.get_link_capabilities = NULL;
+
+       /* RAR, Multicast, VLAN */
+       hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+       hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+       hw->mac.ops.init_rx_addrs = NULL;
+       hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+       hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode;
+       hw->mac.ops.get_link_state = ixgbe_get_link_state_vf;
+       hw->mac.ops.enable_mc = NULL;
+       hw->mac.ops.disable_mc = NULL;
+       hw->mac.ops.clear_vfta = NULL;
+       hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+       hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf;
+
+       /* Flow Control */
+       hw->mac.ops.fc_enable = ixgbe_dummy_handler_vf;
+       hw->mac.ops.setup_fc = ixgbe_dummy_handler_vf;
+       hw->mac.ops.fc_autoneg = ixgbe_dummy_void_handler_vf;
+
+       hw->mac.max_tx_queues = 1;
+       hw->mac.max_rx_queues = 1;
+
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+       return IXGBE_SUCCESS;
+}
+
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+       int i;
+       uint32_t vfsrrctl;
+       uint32_t vfdca_rxctrl;
+       uint32_t vfdca_txctrl;
+
+       /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+       vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+       vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+       /* DCA_RXCTRL default value */
+       vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+                      IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+                      IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+       /* DCA_TXCTRL default value */
+       vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+                      IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+                      IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+       for (i = 0; i < 7; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+       }
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+int32_t ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+       /* Clear adapter stopped flag */
+       hw->adapter_stopped = FALSE;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+int32_t ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+       int32_t status = hw->mac.ops.start_hw(hw);
+
+       hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+       return status;
+}
+
+/**
+ * ixgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+int32_t ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       uint32_t timeout = IXGBE_VF_INIT_TIMEOUT;
+       int32_t ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+       uint32_t msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+       uint8_t *addr = (uint8_t *)(&msgbuf[1]);
+
+       DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       hw->mac.ops.stop_adapter(hw);
+
+       /* reset the api version */
+       hw->api_version = ixgbe_mbox_api_10;
+       ixgbe_init_mbx_params_vf(hw);
+
+       DEBUGOUT("Issuing a function level reset to MAC\n");
+
+       IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+       IXGBE_WRITE_FLUSH(hw);
+
+       msec_delay(50);
+
+       /* we cannot reset while the RSTI / RSTD bits are asserted */
+       while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+               timeout--;
+               usec_delay(5);
+       }
+
+       if (!timeout)
+               return IXGBE_ERR_RESET_FAILED;
+
+       /* Reset VF registers to initial values */
+       ixgbe_virt_clr_reg(hw);
+
+       /* mailbox timeout can now become active */
+       mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+       msgbuf[0] = IXGBE_VF_RESET;
+       ixgbe_write_mbx(hw, msgbuf, 1, 0);
+
+       msec_delay(10);
+
+       /*
+        * set our "perm_addr" based on info provided by PF
+        * also set up the mc_filter_type which is piggy backed
+        * on the mac address in word 3
+        */
+       ret_val = ixgbe_poll_mbx(hw, msgbuf,
+                                IXGBE_VF_PERMADDR_MSG_LEN, 0);
+       if (ret_val)
+               return ret_val;
+
+       if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+           msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
+               return IXGBE_ERR_INVALID_MAC_ADDR;
+
+       if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
+               memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+       hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+       return ret_val;
+}
+
+/**
+ * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+int32_t ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+       uint32_t reg_val;
+       uint16_t i;
+
+       /*
+        * Set the adapter_stopped flag so other driver functions stop touching
+        * the hardware
+        */
+       hw->adapter_stopped = TRUE;
+
+       /* Clear interrupt mask to stop from interrupts being generated */
+       IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+       /* Clear any pending interrupts, flush previous writes */
+       IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+       /* Disable the transmit unit.  Each queue must be disabled. */
+       for (i = 0; i < hw->mac.max_tx_queues; i++)
+               IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+       /* Disable the receive unit by stopping each queue */
+       for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+               reg_val &= ~IXGBE_RXDCTL_ENABLE;
+               IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+       }
+       /* Clear packet split and pool config */
+       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+       /* flush all queues disables */
+       IXGBE_WRITE_FLUSH(hw);
+       msec_delay(2);
+
+       return IXGBE_SUCCESS;
+}
+
+static int32_t ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, uint32_t *msg,
+                                     uint32_t *retmsg, uint16_t size)
+{
+       int32_t retval = ixgbe_write_mbx(hw, msg, size, 0);
+
+       if (retval)
+               return retval;
+
+       return ixgbe_poll_mbx(hw, retmsg, size, 0);
+}
+
+/**
+ * ixgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+int32_t ixgbe_set_rar_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
+                        uint32_t vmdq, uint32_t enable_addr)
+{
+       uint32_t msgbuf[3];
+       uint8_t *msg_addr = (uint8_t *)(&msgbuf[1]);
+       int32_t ret_val;
+
+       memset(msgbuf, 0, 12);
+       msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+       memcpy(msg_addr, addr, 6);
+       ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+       /* if nacked the address was rejected, use "perm_addr" */
+       if (!ret_val &&
+           (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
+               ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+               return IXGBE_ERR_MBX;
+       }
+
+       return ret_val;
+}
+
+/**
+ * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ * @clear: unused
+ *
+ * Updates the Multicast Table Array.
+ **/
+int32_t ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, uint8_t 
*mc_addr_list,
+                                    uint32_t mc_addr_count, ixgbe_mc_addr_itr 
next,
+                                    bool clear)
+{
+       uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
+       uint16_t *vector_list = (uint16_t *)&msgbuf[1];
+       uint32_t vector;
+       uint32_t cnt, i;
+       uint32_t vmdq;
+
+       DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+       /* Each entry in the list uses 1 16 bit word.  We have 30
+        * 16 bit words available in our HW msg buffer (minus 1 for the
+        * msg type).  That's 30 hash values if we pack 'em right.  If
+        * there are more than 30 MC addresses to add then punt the
+        * extras for now and then add code to handle more than 30 later.
+        * It would be unusual for a server to request that many multi-cast
+        * addresses except for in large enterprise network environments.
+        */
+
+       DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+       cnt = (mc_addr_count > IXGBE_MAX_MULTICAST_ADDRESSES_VF) ? 
IXGBE_MAX_MULTICAST_ADDRESSES_VF : mc_addr_count;
+       msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+       msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+       for (i = 0; i < cnt; i++) {
+               vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+               DEBUGOUT1("Hash value = 0x%03X\n", vector);
+               vector_list[i] = (uint16_t)vector;
+       }
+
+       return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 
IXGBE_VFMAILBOX_SIZE);
+}
+
+/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+int32_t ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+       uint32_t msgbuf[2];
+       int32_t err;
+
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_12:
+               /* New modes were introduced in 1.3 version */
+               if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+                       return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+               /* Fall through */
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_15:
+               break;
+       default:
+               return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+       }
+
+       msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+       msgbuf[1] = xcast_mode;
+
+       err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+       if (err)
+               return err;
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | 
IXGBE_VT_MSGTYPE_FAILURE))
+               return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ **/
+int32_t ixgbe_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+       uint32_t msgbuf[2];
+       int32_t err;
+       int32_t ret_val;
+
+       msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+       msgbuf[1] = 0x0;
+
+       err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+       if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+               ret_val = IXGBE_ERR_MBX;
+       } else {
+               ret_val = IXGBE_SUCCESS;
+               *link_state = msgbuf[1];
+       }
+
+       return ret_val;
+}
+
+/**
+ * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if TRUE then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+int32_t ixgbe_set_vfta_vf(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
+                     bool vlan_on, bool vlvf_bypass)
+{
+       uint32_t msgbuf[2];
+       int32_t ret_val;
+
+       msgbuf[0] = IXGBE_VF_SET_VLAN;
+       msgbuf[1] = vlan;
+       /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+       msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+       ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+       if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_SUCCESS))
+               return IXGBE_SUCCESS;
+
+       return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+uint32_t ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+       return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+uint32_t ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+       return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: the MAC address
+ **/
+int32_t ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, uint8_t *mac_addr)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+               mac_addr[i] = hw->mac.perm_addr[i];
+
+       return IXGBE_SUCCESS;
+}
+
+int32_t ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t 
*addr)
+{
+       uint32_t msgbuf[3], msgbuf_chk;
+       uint8_t *msg_addr = (uint8_t *)(&msgbuf[1]);
+       int32_t ret_val;
+
+       memset(msgbuf, 0, sizeof(msgbuf));
+       /*
+        * If index is one then this is the start of a new list and needs
+        * indication to the PF so it can do it's own list management.
+        * If it is zero then that tells the PF to just clear all of
+        * this VF's macvlans and there is no new list.
+        */
+       msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+       msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+       msgbuf_chk = msgbuf[0];
+       if (addr)
+               memcpy(msg_addr, addr, 6);
+
+       ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+       if (!ret_val) {
+               msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+               if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
+                       return IXGBE_ERR_OUT_OF_MEM;
+       }
+
+       return ret_val;
+}
+
+/**
+ * ixgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+int32_t ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                           bool autoneg_wait_to_complete)
+{
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: TRUE is link is up, FALSE otherwise
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+int32_t ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                           bool *link_up, bool autoneg_wait_to_complete)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       int32_t ret_val = IXGBE_SUCCESS;
+       uint32_t in_msg = 0;
+       uint32_t links_reg;
+
+       /* If we were hit with a reset drop the link */
+       if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+               mac->get_link_status = TRUE;
+
+       if (!mac->get_link_status)
+               goto out;
+
+       /* if link status is down no point in checking to see if pf is up */
+       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+       if (!(links_reg & IXGBE_LINKS_UP))
+               goto out;
+
+       /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+        * before the link status is correct
+        */
+       if (mac->type == ixgbe_mac_82599_vf) {
+               int i;
+
+               for (i = 0; i < 5; i++) {
+                       usec_delay(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+                       if (!(links_reg & IXGBE_LINKS_UP))
+                               goto out;
+               }
+       }
+
+       switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+       case IXGBE_LINKS_SPEED_10G_82599:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               if (hw->mac.type >= ixgbe_mac_X550_vf) {
+                       if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+                               *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+               }
+               break;
+       case IXGBE_LINKS_SPEED_1G_82599:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+       case IXGBE_LINKS_SPEED_100_82599:
+               *speed = IXGBE_LINK_SPEED_100_FULL;
+               if (hw->mac.type == ixgbe_mac_X550_vf) {
+                       if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+                               *speed = IXGBE_LINK_SPEED_5GB_FULL;
+               }
+               break;
+       case IXGBE_LINKS_SPEED_10_X550EM_A:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               /* Since Reserved in older MAC's */
+               if (hw->mac.type >= ixgbe_mac_X550_vf)
+                       *speed = IXGBE_LINK_SPEED_10_FULL;
+               break;
+       default:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+       }
+
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error
+        */
+       if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) {
+               if (hw->api_version >= ixgbe_mbox_api_15)
+                       mac->get_link_status = FALSE;
+               goto out;
+       }
+
+       if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+               /* msg is not CTS and is NACK we must have lost CTS status */
+               if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
+                       ret_val = IXGBE_ERR_MBX;
+               goto out;
+       }
+
+       /* the pf is talking, if we timed out in the past we reinit */
+       if (!mbx->timeout) {
+               ret_val = IXGBE_ERR_TIMEOUT;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link
+        */
+       mac->get_link_status = FALSE;
+
+out:
+       *link_up = !mac->get_link_status;
+       return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+int32_t ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, uint16_t max_size)
+{
+       uint32_t msgbuf[2];
+       int32_t retval;
+
+       msgbuf[0] = IXGBE_VF_SET_LPE;
+       msgbuf[1] = max_size;
+
+       retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+       if (retval)
+               return retval;
+       if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+           (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
+               return IXGBE_ERR_MBX;
+
+       return 0;
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+       int err;
+       uint32_t msg[3];
+
+       /* Negotiate the mailbox API version */
+       msg[0] = IXGBE_VF_API_NEGOTIATE;
+       msg[1] = api;
+       msg[2] = 0;
+
+       err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
+       if (!err) {
+               msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+               /* Store value and return 0 on success */
+               if (msg[0] == (IXGBE_VF_API_NEGOTIATE | 
IXGBE_VT_MSGTYPE_SUCCESS)) {
+                       hw->api_version = api;
+                       return 0;
+               }
+
+               err = IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+                      unsigned int *default_tc)
+{
+       int err;
+       uint32_t msg[5];
+
+       /* do nothing if API doesn't support ixgbevf_get_queues */
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_11:
+       case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_15:
+               break;
+       default:
+               return 0;
+       }
+
+       /* Fetch queue configuration from the PF */
+       msg[0] = IXGBE_VF_GET_QUEUES;
+       msg[1] = msg[2] = msg[3] = msg[4] = 0;
+
+       err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
+       if (!err) {
+               msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+               /*
+                * if we didn't get a SUCCESS there must have been
+                * some sort of mailbox error so we should treat it
+                * as such
+                */
+               if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS))
+                       return IXGBE_ERR_MBX;
+
+               /* record and validate values from message */
+               hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+               if (hw->mac.max_tx_queues == 0 ||
+                   hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+                       hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+               hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+               if (hw->mac.max_rx_queues == 0 ||
+                   hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+                       hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+               *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+               /* in case of unknown state assume we cannot tag frames */
+               if (*num_tcs > hw->mac.max_rx_queues)
+                       *num_tcs = 1;
+
+               *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+               /* default to queue 0 on out-of-bounds queue number */
+               if (*default_tc >= hw->mac.max_tx_queues)
+                       *default_tc = 0;
+       }
+
+       return err;
+}
diff --git a/sys/dev/pci/pcidevs b/sys/dev/pci/pcidevs
index 8f486074838..55a8b71772c 100644
--- a/sys/dev/pci/pcidevs
+++ b/sys/dev/pci/pcidevs
@@ -4019,6 +4019,7 @@ product INTEL 82580_SERDES        0x1510  82580
 product INTEL 82580_SGMII      0x1511  82580
 product INTEL 82524EF          0x1513  82524EF Thunderbolt
 product INTEL 82599_KX4_MEZZ   0x1514  82599
+product INTEL X540_VF          0x1515  X540 VF
 product INTEL 82580_COPPER_DUAL        0x1516  82580
 product INTEL 82599_KR         0x1517  82599
 product INTEL 82576_NS_SERDES  0x1518  82576NS
@@ -4033,6 +4034,8 @@ product INTEL 82580_QUAD_FIBER    0x1527  82580 QF
 product INTEL X540T            0x1528  X540T
 product INTEL 82599_SFP_FCOE   0x1529  82599
 product INTEL 82599_BPLANE_FCOE        0x152a  82599
+product INTEL 82599_VF_HV      0x152e  82599 VF HV
+product INTEL X540_VF_HV       0x1530  X540 VF HV
 product INTEL I210_COPPER      0x1533  I210
 product INTEL I210_COPPER_OEM1 0x1534  I210
 product INTEL I210_COPPER_IT   0x1535  I210
@@ -4053,6 +4056,8 @@ product INTEL I218_V              0x1559  I218-V
 product INTEL I218_LM          0x155a  I218-LM
 product INTEL X540T1           0x1560  X540T
 product INTEL X550T            0x1563  X550T
+product INTEL X550_VF_HV       0x1564  X550 VF HV
+product INTEL X550_VF          0x1565  X550 VF
 product INTEL DSL5520          0x156c  DSL5520 Thunderbolt
 product INTEL DSL5520_PCIE     0x156d  DSL5520 Thunderbolt
 product INTEL I219_LM          0x156f  I219-LM
@@ -4076,11 +4081,14 @@ product INTEL I218_LM_2         0x15a0  I218-LM
 product INTEL I218_V_2         0x15a1  I218-V
 product INTEL I218_LM_3                0x15a2  I218-LM
 product INTEL I218_V_3         0x15a3  I218-V
+product INTEL X550EM_X_VF      0x15a8  X552 VF
+product INTEL X550EM_X_VF_HV   0x15a9  X552 VF HV
 product INTEL X550EM_X_KX4     0x15aa  X552 Backplane
 product INTEL X550EM_X_KR      0x15ab  X552 Backplane
 product INTEL X550EM_X_SFP     0x15ac  X552 SFP+
 product INTEL X550EM_X_10G_T   0x15ad  X552/X557-AT
 product INTEL X550EM_X_1G_T    0x15ae  X552 1GbaseT
+product INTEL X550EM_A_VF_HV   0x15b4  X553 VF HV
 product INTEL I219_LM2         0x15b7  I219-LM
 product INTEL I219_V2          0x15b8  I219-V
 product INTEL I219_LM3         0x15b9  I219-LM
@@ -4094,6 +4102,7 @@ product INTEL JHL6240_XHCI        0x15c1  JHL6240 
Thunderbolt 3
 product INTEL X550EM_A_KR      0x15c2  X553 Backplane
 product INTEL X550EM_A_KR_L    0x15c3  X553 Backplane
 product INTEL X550EM_A_SFP_N   0x15c4  X553 SFP+
+product INTEL X550EM_A_VF      0x15c5  X553 VF
 product INTEL X550EM_A_SGMII   0x15c6  X553 SGMII
 product INTEL X550EM_A_SGMII_L 0x15c7  X553 SGMII
 product INTEL X550EM_A_10G_T   0x15c8  X553 10GBaseT
diff --git a/sys/dev/pci/pcireg.h b/sys/dev/pci/pcireg.h
index 53124eccecb..49e2fd1fc0d 100644
--- a/sys/dev/pci/pcireg.h
+++ b/sys/dev/pci/pcireg.h
@@ -655,6 +655,8 @@ typedef u_int8_t pci_revision_t;
 #define PCI_MSIX_VC(i)         ((i) * 16 + 12)
 #define  PCI_MSIX_VC_MASK      0x00000001
 
+#define PCIR_MSIX_CTRL         0x2
+
 /*
  * Interrupt Configuration Register; contains interrupt pin and line.
  */
diff --git a/sys/net/if_var.h b/sys/net/if_var.h
index 94480ac14c0..f5dc8dcb3b8 100644
--- a/sys/net/if_var.h
+++ b/sys/net/if_var.h
@@ -172,6 +172,7 @@ struct ifnet {                              /* and the 
entries */
        int     (*if_ioctl)(struct ifnet *, u_long, caddr_t); /* ioctl hook */
        void    (*if_watchdog)(struct ifnet *); /* timer routine */
        int     (*if_wol)(struct ifnet *, int); /* WoL routine **/
+       void    (*if_configure_vlan)(struct ifnet *, uint16_t, uint16_t);       
/* configure vlan id **/
 
        /* queues */
        struct  ifqueue if_snd;         /* transmit queue */
diff --git a/sys/net/if_vlan.c b/sys/net/if_vlan.c
index d660232deb4..d7187da67cd 100644
--- a/sys/net/if_vlan.c
+++ b/sys/net/if_vlan.c
@@ -868,10 +868,11 @@ vlan_setlladdr(struct vlan_softc *sc, struct ifreq *ifr)
 int
 vlan_set_vnetid(struct vlan_softc *sc, uint16_t tag)
 {
-       struct ifnet *ifp = &sc->sc_if;
+       struct ifnet *ifp0, *ifp = &sc->sc_if;
        struct vlan_list *tagh, *list;
        u_char link = ifp->if_link_state;
        uint64_t baud = ifp->if_baudrate;
+       uint16_t otag;
        int error;
 
        tagh = sc->sc_type == ETHERTYPE_QINQ ? svlan_tagh : vlan_tagh;
@@ -887,6 +888,7 @@ vlan_set_vnetid(struct vlan_softc *sc, uint16_t tag)
        if (error != 0)
                goto unlock;
 
+       otag = sc->sc_tag;
        if (ISSET(ifp->if_flags, IFF_RUNNING)) {
                list = &tagh[TAG_HASH(sc->sc_tag)];
                SMR_SLIST_REMOVE_LOCKED(list, sc, vlan_softc, sc_list);
@@ -898,6 +900,10 @@ vlan_set_vnetid(struct vlan_softc *sc, uint16_t tag)
        } else
                sc->sc_tag = tag;
 
+       ifp0 = if_get(sc->sc_ifidx0);
+       if (ifp0 != NULL && ifp0->if_configure_vlan != NULL)
+               (*ifp0->if_configure_vlan)(ifp0, tag, otag);
+
 unlock:
        rw_exit(&vlan_tagh_lk);
 
@@ -911,7 +917,7 @@ int
 vlan_set_parent(struct vlan_softc *sc, const char *parent)
 {
        struct ifnet *ifp = &sc->sc_if;
-       struct ifnet *ifp0;
+       struct ifnet *ifp0, *p;
        int error = 0;
 
        ifp0 = if_unit(parent);
@@ -938,6 +944,15 @@ vlan_set_parent(struct vlan_softc *sc, const char *parent)
                goto put;
 
        /* commit */
+       if (sc->sc_tag > 0) {
+               p = if_get(sc->sc_ifidx0);
+               if (p != NULL && p->if_configure_vlan != NULL)
+                       (*p->if_configure_vlan)(p, 0, sc->sc_tag);
+
+               if (ifp0->if_configure_vlan != NULL)
+                       (*ifp0->if_configure_vlan)(ifp0, sc->sc_tag, 0);
+       }
+
        sc->sc_ifidx0 = ifp0->if_index;
        if (!ISSET(sc->sc_flags, IFVF_LLADDR))
                if_setlladdr(ifp, LLADDR(ifp0->if_sadl));
@@ -950,12 +965,17 @@ put:
 int
 vlan_del_parent(struct vlan_softc *sc)
 {
-       struct ifnet *ifp = &sc->sc_if;
+       struct ifnet *ifp0, *ifp = &sc->sc_if;
 
        if (ISSET(ifp->if_flags, IFF_RUNNING))
                return (EBUSY);
 
        /* commit */
+       if (sc->sc_tag > 0) {
+               ifp0 = if_get(sc->sc_ifidx0);
+               if (ifp0 != NULL && ifp0->if_configure_vlan != NULL)
+                       (*ifp0->if_configure_vlan)(ifp0, 0, sc->sc_tag);
+       }
        sc->sc_ifidx0 = 0;
        if (!ISSET(sc->sc_flags, IFVF_LLADDR))
                if_setlladdr(ifp, etheranyaddr);

-- 
Yuichiro NAITO (naito.yuich...@gmail.com)

Reply via email to