Replace the Intel VMXNET3 driver with the one developed by Brocade.
The Intel code was a missing features, and had style and other issues.

Major changes:
      - supports multi-segment
      - supports vlan offload
      - per queue stats

Fixes:
      - link update reports link change correctly
      - allows reconfiguration of # of queues
        (original code would have too small memzone).

Signed-off-by: Stephen Hemminger <shemming at brocade.com>


---
 lib/librte_pmd_vmxnet3/Makefile                    |   23 
 lib/librte_pmd_vmxnet3/README                      |   20 
 lib/librte_pmd_vmxnet3/vmxnet3/README              |   50 
 lib/librte_pmd_vmxnet3/vmxnet3/includeCheck.h      |   40 
 lib/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h         |  117 -
 lib/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h |   32 
 lib/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h   |   32 
 lib/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h      |  751 -----------
 lib/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h     |   48 
 lib/librte_pmd_vmxnet3/vmxnet3_defs.h              |  717 ++++++++++
 lib/librte_pmd_vmxnet3/vmxnet3_dev.h               |  148 ++
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c            | 1087 +++++++---------
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.h            |  187 --
 lib/librte_pmd_vmxnet3/vmxnet3_logs.h              |   11 
 lib/librte_pmd_vmxnet3/vmxnet3_ring.h              |  176 --
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c              | 1387 +++++++++------------
 16 files changed, 2024 insertions(+), 2802 deletions(-)

--- a/lib/librte_pmd_vmxnet3/Makefile   2014-05-14 11:46:47.129439301 -0700
+++ b/lib/librte_pmd_vmxnet3/Makefile   2014-05-14 11:46:47.125439289 -0700
@@ -1,8 +1,8 @@
 #   BSD LICENSE
 # 
-#   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+#   Copyright(c) 2012-2014 Brocade Computer Systems
 #   All rights reserved.
-# 
+#
 #   Redistribution and use in source and binary forms, with or without
 #   modification, are permitted provided that the following conditions
 #   are met:
@@ -39,25 +39,6 @@ LIB = librte_pmd_vmxnet3_uio.a
 CFLAGS += -O3
 CFLAGS += $(WERROR_FLAGS)

-ifeq ($(CC), icc)
-#
-# CFLAGS for icc
-#
-CFLAGS_LAD = -wd174 -wd593 -wd869 -wd981 -wd2259
-else 
-#
-# CFLAGS for gcc
-#
-ifneq ($(shell test $(GCC_MAJOR_VERSION) -le 4 -a $(GCC_MINOR_VERSION) -le 3 
&& echo 1), 1)
-CFLAGS     += -Wno-deprecated
-endif
-CFLAGS_LAD = -Wno-unused-parameter -Wno-unused-value
-CFLAGS_LAD += -Wno-strict-aliasing -Wno-format-extra-args
-
-endif
-
-VPATH += $(RTE_SDK)/lib/librte_pmd_vmxnet3/vmxnet3
-
 #
 # all source are stored in SRCS-y
 #
--- a/lib/librte_pmd_vmxnet3/vmxnet3/README     2014-05-14 11:46:47.129439301 
-0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,50 +0,0 @@
-..
-     BSD LICENSE
-   
-     Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-     All rights reserved.
-   
-     Redistribution and use in source and binary forms, with or without
-     modification, are permitted provided that the following conditions
-     are met:
-   
-       * Redistributions of source code must retain the above copyright
-         notice, this list of conditions and the following disclaimer.
-       * Redistributions in binary form must reproduce the above copyright
-         notice, this list of conditions and the following disclaimer in
-         the documentation and/or other materials provided with the
-         distribution.
-       * Neither the name of Intel Corporation nor the names of its
-         contributors may be used to endorse or promote products derived
-         from this software without specific prior written permission.
-   
-     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Intel VMXNET3 driver
-===================
-
-This directory contains source code of FreeBSD VMXNET3 driver released by 
VMware.
-In which, upt1_defs.h and vmxnet3_defs.h is introduced without any change.
-The other 4 files: includeCheck.h, vmware_pack_begin.h, vmware_pack_end.h and 
vmxnet3_osdep.h
-are crated to adapt to the needs from above 2 files.
-
-Updating driver
-===============
-
-The following modifications have been made to this code to integrate it with 
the
-Intel DPDK:
-
-
--------------
-
-
--- a/lib/librte_pmd_vmxnet3/vmxnet3/includeCheck.h     2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,40 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _INCLUDECHECK_H
-#define _INCLUDECHECK_H
-
-#include "vmxnet3_osdep.h"
-
-#endif /* _INCLUDECHECK_H */
-
--- a/lib/librte_pmd_vmxnet3/vmxnet3/upt1_defs.h        2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,117 +0,0 @@
-/*********************************************************
- * Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *********************************************************/
-
-/* upt1_defs.h
- *
- *      Definitions for UPTv1
- *
- *      Some of the defs are duplicated in vmkapi_net_upt.h, because
- *      vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can
- *      only include vmkapi headers. Make sure they are kept in sync!
- */
-
-#ifndef _UPT1_DEFS_H
-#define _UPT1_DEFS_H
-
-#define UPT1_MAX_TX_QUEUES  64
-#define UPT1_MAX_RX_QUEUES  64
-
-#define UPT1_MAX_INTRS  (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES)
-
-typedef
-#include "vmware_pack_begin.h"
-struct UPT1_TxStats {
-   uint64 TSOPktsTxOK;  /* TSO pkts post-segmentation */
-   uint64 TSOBytesTxOK;
-   uint64 ucastPktsTxOK;
-   uint64 ucastBytesTxOK;
-   uint64 mcastPktsTxOK;
-   uint64 mcastBytesTxOK;
-   uint64 bcastPktsTxOK;
-   uint64 bcastBytesTxOK;
-   uint64 pktsTxError;
-   uint64 pktsTxDiscard;
-}
-#include "vmware_pack_end.h"
-UPT1_TxStats;
-
-typedef
-#include "vmware_pack_begin.h"
-struct UPT1_RxStats {
-   uint64 LROPktsRxOK;    /* LRO pkts */
-   uint64 LROBytesRxOK;   /* bytes from LRO pkts */
-   /* the following counters are for pkts from the wire, i.e., pre-LRO */
-   uint64 ucastPktsRxOK;
-   uint64 ucastBytesRxOK;
-   uint64 mcastPktsRxOK;
-   uint64 mcastBytesRxOK;
-   uint64 bcastPktsRxOK;
-   uint64 bcastBytesRxOK;
-   uint64 pktsRxOutOfBuf;
-   uint64 pktsRxError;
-}
-#include "vmware_pack_end.h"
-UPT1_RxStats;
-
-/* interrupt moderation level */
-#define UPT1_IML_NONE     0 /* no interrupt moderation */
-#define UPT1_IML_HIGHEST  7 /* least intr generated */
-#define UPT1_IML_ADAPTIVE 8 /* adpative intr moderation */
-
-/* values for UPT1_RSSConf.hashFunc */
-#define UPT1_RSS_HASH_TYPE_NONE      0x0
-#define UPT1_RSS_HASH_TYPE_IPV4      0x01
-#define UPT1_RSS_HASH_TYPE_TCP_IPV4  0x02
-#define UPT1_RSS_HASH_TYPE_IPV6      0x04
-#define UPT1_RSS_HASH_TYPE_TCP_IPV6  0x08
-
-#define UPT1_RSS_HASH_FUNC_NONE      0x0
-#define UPT1_RSS_HASH_FUNC_TOEPLITZ  0x01
-
-#define UPT1_RSS_MAX_KEY_SIZE        40
-#define UPT1_RSS_MAX_IND_TABLE_SIZE  128
-
-typedef 
-#include "vmware_pack_begin.h"
-struct UPT1_RSSConf {
-   uint16   hashType;
-   uint16   hashFunc;
-   uint16   hashKeySize;
-   uint16   indTableSize;
-   uint8    hashKey[UPT1_RSS_MAX_KEY_SIZE];
-   uint8    indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
-}
-#include "vmware_pack_end.h"
-UPT1_RSSConf;
-
-/* features */
-#define UPT1_F_RXCSUM      0x0001   /* rx csum verification */
-#define UPT1_F_RSS         0x0002
-#define UPT1_F_RXVLAN      0x0004   /* VLAN tag stripping */
-#define UPT1_F_LRO         0x0008
-
-#endif
--- a/lib/librte_pmd_vmxnet3/vmxnet3/vmware_pack_begin.h        2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,32 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
--- a/lib/librte_pmd_vmxnet3/vmxnet3/vmware_pack_end.h  2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,32 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
--- a/lib/librte_pmd_vmxnet3/vmxnet3/vmxnet3_defs.h     2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,751 +0,0 @@
-/*********************************************************
- * Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- *********************************************************/
-
-/*
- * vmxnet3_defs.h --
- *
- *      Definitions shared by device emulation and guest drivers for
- *      VMXNET3 NIC
- */
-
-#ifndef _VMXNET3_DEFS_H_
-#define _VMXNET3_DEFS_H_
-
-#define INCLUDE_ALLOW_USERLEVEL
-#define INCLUDE_ALLOW_VMKERNEL
-#define INCLUDE_ALLOW_DISTRIBUTE
-#define INCLUDE_ALLOW_VMKDRIVERS
-#define INCLUDE_ALLOW_VMCORE
-#define INCLUDE_ALLOW_MODULE
-#include "includeCheck.h"
-
-#include "upt1_defs.h"
-
-/* all registers are 32 bit wide */
-/* BAR 1 */
-#define VMXNET3_REG_VRRS  0x0    /* Vmxnet3 Revision Report Selection */
-#define VMXNET3_REG_UVRS  0x8    /* UPT Version Report Selection */
-#define VMXNET3_REG_DSAL  0x10   /* Driver Shared Address Low */
-#define VMXNET3_REG_DSAH  0x18   /* Driver Shared Address High */
-#define VMXNET3_REG_CMD   0x20   /* Command */
-#define VMXNET3_REG_MACL  0x28   /* MAC Address Low */
-#define VMXNET3_REG_MACH  0x30   /* MAC Address High */
-#define VMXNET3_REG_ICR   0x38   /* Interrupt Cause Register */
-#define VMXNET3_REG_ECR   0x40   /* Event Cause Register */
-
-#define VMXNET3_REG_WSAL  0xF00  /* Wireless Shared Address Lo  */
-#define VMXNET3_REG_WSAH  0xF08  /* Wireless Shared Address Hi  */
-#define VMXNET3_REG_WCMD  0xF18  /* Wireless Command */
-
-/* BAR 0 */
-#define VMXNET3_REG_IMR      0x0   /* Interrupt Mask Register */
-#define VMXNET3_REG_TXPROD   0x600 /* Tx Producer Index */
-#define VMXNET3_REG_RXPROD   0x800 /* Rx Producer Index for ring 1 */
-#define VMXNET3_REG_RXPROD2  0xA00 /* Rx Producer Index for ring 2 */
-
-#define VMXNET3_PT_REG_SIZE     4096    /* BAR 0 */
-#define VMXNET3_VD_REG_SIZE     4096    /* BAR 1 */
-
-/*
- * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at
- * offset 14h)  as well as the MSI-X BAR are combined into one PhysMem region:
- * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE-->
- * -------------------------------------------------------------------------
- * |Pass Thru Registers  | Virtual Dev Registers | MSI-X Vector/PBA Table  |
- * -------------------------------------------------------------------------
- * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h"
- */
-#define VMXNET3_PHYSMEM_PAGES   4
-
-#define VMXNET3_REG_ALIGN       8  /* All registers are 8-byte aligned. */
-#define VMXNET3_REG_ALIGN_MASK  0x7
-
-/* I/O Mapped access to registers */
-#define VMXNET3_IO_TYPE_PT              0
-#define VMXNET3_IO_TYPE_VD              1
-#define VMXNET3_IO_ADDR(type, reg)      (((type) << 24) | ((reg) & 0xFFFFFF))
-#define VMXNET3_IO_TYPE(addr)           ((addr) >> 24)
-#define VMXNET3_IO_REG(addr)            ((addr) & 0xFFFFFF)
-
-#ifndef __le16
-#define __le16 uint16
-#endif
-#ifndef __le32
-#define __le32 uint32
-#endif
-#ifndef __le64
-#define __le64 uint64
-#endif
-
-typedef enum {
-   VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
-   VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
-   VMXNET3_CMD_QUIESCE_DEV,
-   VMXNET3_CMD_RESET_DEV,
-   VMXNET3_CMD_UPDATE_RX_MODE,
-   VMXNET3_CMD_UPDATE_MAC_FILTERS,
-   VMXNET3_CMD_UPDATE_VLAN_FILTERS,
-   VMXNET3_CMD_UPDATE_RSSIDT,
-   VMXNET3_CMD_UPDATE_IML,
-   VMXNET3_CMD_UPDATE_PMCFG,
-   VMXNET3_CMD_UPDATE_FEATURE,
-   VMXNET3_CMD_STOP_EMULATION,
-   VMXNET3_CMD_LOAD_PLUGIN,
-   VMXNET3_CMD_ACTIVATE_VF,
-
-   VMXNET3_CMD_FIRST_GET = 0xF00D0000,
-   VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
-   VMXNET3_CMD_GET_STATS,
-   VMXNET3_CMD_GET_LINK,
-   VMXNET3_CMD_GET_PERM_MAC_LO,
-   VMXNET3_CMD_GET_PERM_MAC_HI,
-   VMXNET3_CMD_GET_DID_LO,
-   VMXNET3_CMD_GET_DID_HI,
-   VMXNET3_CMD_GET_DEV_EXTRA_INFO,
-   VMXNET3_CMD_GET_CONF_INTR,
-   VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
-} Vmxnet3_Cmd;
-
-/* Adaptive Ring Info Flags */
-#define VMXNET3_DISABLE_ADAPTIVE_RING 1
-
-/*
- *     Little Endian layout of bitfields -
- *     Byte 0 :        7.....len.....0
- *     Byte 1 :        rsvd gen 13.len.8
- *     Byte 2 :        5.msscof.0 ext1  dtype
- *     Byte 3 :        13...msscof...6
- *
- *     Big Endian layout of bitfields -
- *     Byte 0:         13...msscof...6
- *     Byte 1 :        5.msscof.0 ext1  dtype
- *     Byte 2 :        rsvd gen 13.len.8
- *     Byte 3 :        7.....len.....0
- *
- *     Thus, le32_to_cpu on the dword will allow the big endian driver to read
- *     the bit fields correctly. And cpu_to_le32 will convert bitfields
- *     bit fields written by big endian driver to format required by device.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxDesc {
-   __le64 addr;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 msscof:14;  /* MSS, checksum offset, flags */
-   uint32 ext1:1;
-   uint32 dtype:1;    /* descriptor type */
-   uint32 rsvd:1;
-   uint32 gen:1;      /* generation bit */
-   uint32 len:14;
-#else
-   uint32 len:14;
-   uint32 gen:1;      /* generation bit */
-   uint32 rsvd:1;
-   uint32 dtype:1;    /* descriptor type */
-   uint32 ext1:1;
-   uint32 msscof:14;  /* MSS, checksum offset, flags */
-#endif  /* __BIG_ENDIAN_BITFIELD */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 tci:16;     /* Tag to Insert */
-   uint32 ti:1;       /* VLAN Tag Insertion */
-   uint32 ext2:1;
-   uint32 cq:1;       /* completion request */
-   uint32 eop:1;      /* End Of Packet */
-   uint32 om:2;       /* offload mode */
-   uint32 hlen:10;    /* header len */
-#else
-   uint32 hlen:10;    /* header len */
-   uint32 om:2;       /* offload mode */
-   uint32 eop:1;      /* End Of Packet */
-   uint32 cq:1;       /* completion request */
-   uint32 ext2:1;
-   uint32 ti:1;       /* VLAN Tag Insertion */
-   uint32 tci:16;     /* Tag to Insert */
-#endif  /* __BIG_ENDIAN_BITFIELD */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxDesc;
-
-/* TxDesc.OM values */
-#define VMXNET3_OM_NONE  0
-#define VMXNET3_OM_CSUM  2
-#define VMXNET3_OM_TSO   3
-
-/* fields in TxDesc we access w/o using bit fields */
-#define VMXNET3_TXD_EOP_SHIFT 12
-#define VMXNET3_TXD_CQ_SHIFT  13
-#define VMXNET3_TXD_GEN_SHIFT 14
-#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
-#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
-
-#define VMXNET3_TXD_CQ  (1 << VMXNET3_TXD_CQ_SHIFT)
-#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
-#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
-
-#define VMXNET3_TXD_GEN_SIZE 1
-#define VMXNET3_TXD_EOP_SIZE 1
-
-#define VMXNET3_HDR_COPY_SIZE   128
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxDataDesc {
-   uint8 data[VMXNET3_HDR_COPY_SIZE];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxDataDesc;
-
-#define VMXNET3_TCD_GEN_SHIFT  31
-#define VMXNET3_TCD_GEN_SIZE   1
-#define VMXNET3_TCD_TXIDX_SHIFT        0
-#define VMXNET3_TCD_TXIDX_SIZE 12
-#define VMXNET3_TCD_GEN_DWORD_SHIFT    3
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxCompDesc {
-   uint32 txdIdx:12;    /* Index of the EOP TxDesc */
-   uint32 ext1:20;
-
-   __le32 ext2;
-   __le32 ext3;
-
-   uint32 rsvd:24;
-   uint32 type:7;       /* completion type */
-   uint32 gen:1;        /* generation bit */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxCompDesc;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxDesc {
-   __le64 addr;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 gen:1;        /* Generation bit */
-   uint32 rsvd:15;
-   uint32 dtype:1;      /* Descriptor type */
-   uint32 btype:1;      /* Buffer Type */
-   uint32 len:14;
-#else
-   uint32 len:14;
-   uint32 btype:1;      /* Buffer Type */
-   uint32 dtype:1;      /* Descriptor type */
-   uint32 rsvd:15;
-   uint32 gen:1;        /* Generation bit */
-#endif
-   __le32 ext1;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxDesc;
-
-/* values of RXD.BTYPE */
-#define VMXNET3_RXD_BTYPE_HEAD   0    /* head only */
-#define VMXNET3_RXD_BTYPE_BODY   1    /* body only */
-
-/* fields in RxDesc we access w/o using bit fields */
-#define VMXNET3_RXD_BTYPE_SHIFT  14
-#define VMXNET3_RXD_GEN_SHIFT    31
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxCompDesc {
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 ext2:1;
-   uint32 cnc:1;        /* Checksum Not Calculated */
-   uint32 rssType:4;    /* RSS hash type used */
-   uint32 rqID:10;      /* rx queue/ring ID */
-   uint32 sop:1;        /* Start of Packet */
-   uint32 eop:1;        /* End of Packet */
-   uint32 ext1:2;
-   uint32 rxdIdx:12;    /* Index of the RxDesc */
-#else
-   uint32 rxdIdx:12;    /* Index of the RxDesc */
-   uint32 ext1:2;
-   uint32 eop:1;        /* End of Packet */
-   uint32 sop:1;        /* Start of Packet */
-   uint32 rqID:10;      /* rx queue/ring ID */
-   uint32 rssType:4;    /* RSS hash type used */
-   uint32 cnc:1;        /* Checksum Not Calculated */
-   uint32 ext2:1;
-#endif  /* __BIG_ENDIAN_BITFIELD */
-
-   __le32 rssHash;      /* RSS hash value */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 tci:16;       /* Tag stripped */
-   uint32 ts:1;         /* Tag is stripped */
-   uint32 err:1;        /* Error */
-   uint32 len:14;       /* data length */
-#else
-   uint32 len:14;       /* data length */
-   uint32 err:1;        /* Error */
-   uint32 ts:1;         /* Tag is stripped */
-   uint32 tci:16;       /* Tag stripped */
-#endif  /* __BIG_ENDIAN_BITFIELD */
-
-
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32 gen:1;        /* generation bit */
-   uint32 type:7;       /* completion type */
-   uint32 fcs:1;        /* Frame CRC correct */
-   uint32 frg:1;        /* IP Fragment */
-   uint32 v4:1;         /* IPv4 */
-   uint32 v6:1;         /* IPv6 */
-   uint32 ipc:1;        /* IP Checksum Correct */
-   uint32 tcp:1;        /* TCP packet */
-   uint32 udp:1;        /* UDP packet */
-   uint32 tuc:1;        /* TCP/UDP Checksum Correct */
-   uint32 csum:16;
-#else
-   uint32 csum:16;
-   uint32 tuc:1;        /* TCP/UDP Checksum Correct */
-   uint32 udp:1;        /* UDP packet */
-   uint32 tcp:1;        /* TCP packet */
-   uint32 ipc:1;        /* IP Checksum Correct */
-   uint32 v6:1;         /* IPv6 */
-   uint32 v4:1;         /* IPv4 */
-   uint32 frg:1;        /* IP Fragment */
-   uint32 fcs:1;        /* Frame CRC correct */
-   uint32 type:7;       /* completion type */
-   uint32 gen:1;        /* generation bit */
-#endif  /* __BIG_ENDIAN_BITFIELD */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxCompDesc;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxCompDescExt {
-   __le32 dword1;
-   uint8  segCnt;       /* Number of aggregated packets */
-   uint8  dupAckCnt;    /* Number of duplicate Acks */
-   __le16 tsDelta;      /* TCP timestamp difference */
-   __le32 dword2[2];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxCompDescExt;
-
-/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
-#define VMXNET3_RCD_TUC_SHIFT  16
-#define VMXNET3_RCD_IPC_SHIFT  19
-
-/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
-#define VMXNET3_RCD_TYPE_SHIFT 56
-#define VMXNET3_RCD_GEN_SHIFT  63
-
-/* csum OK for TCP/UDP pkts over IP */
-#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | 1 << 
VMXNET3_RCD_IPC_SHIFT)
-
-/* value of RxCompDesc.rssType */
-#define VMXNET3_RCD_RSS_TYPE_NONE     0
-#define VMXNET3_RCD_RSS_TYPE_IPV4     1
-#define VMXNET3_RCD_RSS_TYPE_TCPIPV4  2
-#define VMXNET3_RCD_RSS_TYPE_IPV6     3
-#define VMXNET3_RCD_RSS_TYPE_TCPIPV6  4
-
-/* a union for accessing all cmd/completion descriptors */
-typedef union Vmxnet3_GenericDesc {
-   __le64                qword[2];
-   __le32                dword[4];
-   __le16                word[8];
-   Vmxnet3_TxDesc        txd;
-   Vmxnet3_RxDesc        rxd;
-   Vmxnet3_TxCompDesc    tcd;
-   Vmxnet3_RxCompDesc    rcd;
-   Vmxnet3_RxCompDescExt rcdExt;
-} Vmxnet3_GenericDesc;
-
-#define VMXNET3_INIT_GEN       1
-
-/* Max size of a single tx buffer */
-#define VMXNET3_MAX_TX_BUF_SIZE  (1 << 14)
-
-/* # of tx desc needed for a tx buffer size */
-#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / 
VMXNET3_MAX_TX_BUF_SIZE)
-
-/* max # of tx descs for a non-tso pkt */
-#define VMXNET3_MAX_TXD_PER_PKT 16
-
-/* Max size of a single rx buffer */
-#define VMXNET3_MAX_RX_BUF_SIZE  ((1 << 14) - 1)
-/* Minimum size of a type 0 buffer */
-#define VMXNET3_MIN_T0_BUF_SIZE  128
-#define VMXNET3_MAX_CSUM_OFFSET  1024
-
-/* Ring base address alignment */
-#define VMXNET3_RING_BA_ALIGN   512
-#define VMXNET3_RING_BA_MASK    (VMXNET3_RING_BA_ALIGN - 1)
-
-/* Ring size must be a multiple of 32 */
-#define VMXNET3_RING_SIZE_ALIGN 32
-#define VMXNET3_RING_SIZE_MASK  (VMXNET3_RING_SIZE_ALIGN - 1)
-
-/* Max ring size */
-#define VMXNET3_TX_RING_MAX_SIZE   4096
-#define VMXNET3_TC_RING_MAX_SIZE   4096
-#define VMXNET3_RX_RING_MAX_SIZE   4096
-#define VMXNET3_RC_RING_MAX_SIZE   8192
-
-/* a list of reasons for queue stop */
-
-#define VMXNET3_ERR_NOEOP        0x80000000  /* cannot find the EOP desc of a 
pkt */
-#define VMXNET3_ERR_TXD_REUSE    0x80000001  /* reuse a TxDesc before tx 
completion */
-#define VMXNET3_ERR_BIG_PKT      0x80000002  /* too many TxDesc for a pkt */
-#define VMXNET3_ERR_DESC_NOT_SPT 0x80000003  /* descriptor type not supported 
*/
-#define VMXNET3_ERR_SMALL_BUF    0x80000004  /* type 0 buffer too small */
-#define VMXNET3_ERR_STRESS       0x80000005  /* stress option firing in 
vmkernel */
-#define VMXNET3_ERR_SWITCH       0x80000006  /* mode switch failure */
-#define VMXNET3_ERR_TXD_INVALID  0x80000007  /* invalid TxDesc */
-
-/* completion descriptor types */
-#define VMXNET3_CDTYPE_TXCOMP      0    /* Tx Completion Descriptor */
-#define VMXNET3_CDTYPE_RXCOMP      3    /* Rx Completion Descriptor */
-#define VMXNET3_CDTYPE_RXCOMP_LRO  4    /* Rx Completion Descriptor for LRO */
-
-#define VMXNET3_GOS_BITS_UNK    0   /* unknown */
-#define VMXNET3_GOS_BITS_32     1
-#define VMXNET3_GOS_BITS_64     2
-
-#define VMXNET3_GOS_TYPE_UNK        0 /* unknown */
-#define VMXNET3_GOS_TYPE_LINUX      1
-#define VMXNET3_GOS_TYPE_WIN        2
-#define VMXNET3_GOS_TYPE_SOLARIS    3
-#define VMXNET3_GOS_TYPE_FREEBSD    4
-#define VMXNET3_GOS_TYPE_PXE        5
-
-/* All structures in DriverShared are padded to multiples of 8 bytes */
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_GOSInfo {
-#ifdef __BIG_ENDIAN_BITFIELD
-   uint32   gosMisc: 10;    /* other info about gos */
-   uint32   gosVer:  16;    /* gos version */
-   uint32   gosType: 4;     /* which guest */
-   uint32   gosBits: 2;     /* 32-bit or 64-bit? */
-#else
-   uint32   gosBits: 2;     /* 32-bit or 64-bit? */
-   uint32   gosType: 4;     /* which guest */
-   uint32   gosVer:  16;    /* gos version */
-   uint32   gosMisc: 10;    /* other info about gos */
-#endif  /* __BIG_ENDIAN_BITFIELD */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_GOSInfo;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_DriverInfo {
-   __le32          version;        /* driver version */
-   Vmxnet3_GOSInfo gos;
-   __le32          vmxnet3RevSpt;  /* vmxnet3 revision supported */
-   __le32          uptVerSpt;      /* upt version supported */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_DriverInfo;
-
-#define VMXNET3_REV1_MAGIC  0xbabefee1
-
-/* 
- * QueueDescPA must be 128 bytes aligned. It points to an array of
- * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
- * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
- * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
- */
-#define VMXNET3_QUEUE_DESC_ALIGN  128
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_MiscConf {
-   Vmxnet3_DriverInfo driverInfo;
-   __le64             uptFeatures;
-   __le64             ddPA;         /* driver data PA */
-   __le64             queueDescPA;  /* queue descriptor table PA */
-   __le32             ddLen;        /* driver data len */
-   __le32             queueDescLen; /* queue descriptor table len, in bytes */
-   __le32             mtu;
-   __le16             maxNumRxSG;
-   uint8              numTxQueues;
-   uint8              numRxQueues;
-   __le32             reserved[4];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_MiscConf;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxQueueConf {
-   __le64    txRingBasePA;
-   __le64    dataRingBasePA;
-   __le64    compRingBasePA;
-   __le64    ddPA;         /* driver data */
-   __le64    reserved;
-   __le32    txRingSize;   /* # of tx desc */
-   __le32    dataRingSize; /* # of data desc */
-   __le32    compRingSize; /* # of comp desc */
-   __le32    ddLen;        /* size of driver data */
-   uint8     intrIdx;
-   uint8     _pad[7];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxQueueConf;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxQueueConf {
-   __le64    rxRingBasePA[2];
-   __le64    compRingBasePA;
-   __le64    ddPA;            /* driver data */
-   __le64    reserved;
-   __le32    rxRingSize[2];   /* # of rx desc */
-   __le32    compRingSize;    /* # of rx comp desc */
-   __le32    ddLen;           /* size of driver data */
-   uint8     intrIdx;
-   uint8     _pad[7];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxQueueConf;
-
-enum vmxnet3_intr_mask_mode {
-   VMXNET3_IMM_AUTO   = 0,
-   VMXNET3_IMM_ACTIVE = 1,
-   VMXNET3_IMM_LAZY   = 2
-};
-
-enum vmxnet3_intr_type {
-   VMXNET3_IT_AUTO = 0,
-   VMXNET3_IT_INTX = 1,
-   VMXNET3_IT_MSI  = 2,
-   VMXNET3_IT_MSIX = 3
-};
-
-#define VMXNET3_MAX_TX_QUEUES  8
-#define VMXNET3_MAX_RX_QUEUES  16
-/* addition 1 for events */
-#define VMXNET3_MAX_INTRS      25
-
-/* value of intrCtrl */
-#define VMXNET3_IC_DISABLE_ALL  0x1   /* bit 0 */
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_IntrConf {
-   Bool   autoMask;
-   uint8  numIntrs;      /* # of interrupts */
-   uint8  eventIntrIdx;
-   uint8  modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */
-   __le32 intrCtrl;
-   __le32 reserved[2];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_IntrConf;
-
-/* one bit per VLAN ID, the size is in the units of uint32 */
-#define VMXNET3_VFT_SIZE  (4096 / (sizeof(uint32) * 8))
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_QueueStatus {
-   Bool    stopped;
-   uint8   _pad[3];
-   __le32  error;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_QueueStatus;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxQueueCtrl {
-   __le32  txNumDeferred;
-   __le32  txThreshold;
-   __le64  reserved;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxQueueCtrl;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxQueueCtrl {
-   Bool    updateRxProd;
-   uint8   _pad[7];
-   __le64  reserved;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxQueueCtrl;
-
-#define VMXNET3_RXM_UCAST     0x01  /* unicast only */
-#define VMXNET3_RXM_MCAST     0x02  /* multicast passing the filters */
-#define VMXNET3_RXM_BCAST     0x04  /* broadcast only */
-#define VMXNET3_RXM_ALL_MULTI 0x08  /* all multicast */
-#define VMXNET3_RXM_PROMISC   0x10  /* promiscuous */
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxFilterConf {
-   __le32   rxMode;       /* VMXNET3_RXM_xxx */
-   __le16   mfTableLen;   /* size of the multicast filter table */
-   __le16   _pad1;
-   __le64   mfTablePA;    /* PA of the multicast filters table */
-   __le32   vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxFilterConf;
-
-#define VMXNET3_PM_MAX_FILTERS        6
-#define VMXNET3_PM_MAX_PATTERN_SIZE   128
-#define VMXNET3_PM_MAX_MASK_SIZE      (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
-
-#define VMXNET3_PM_WAKEUP_MAGIC       0x01  /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER      0x02  /* wake up on pkts matching 
filters */
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_PM_PktFilter {
-   uint8 maskSize;
-   uint8 patternSize;
-   uint8 mask[VMXNET3_PM_MAX_MASK_SIZE];
-   uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
-   uint8 pad[6];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_PM_PktFilter;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_PMConf {
-   __le16               wakeUpEvents;  /* VMXNET3_PM_WAKEUP_xxx */
-   uint8                numFilters;
-   uint8                pad[5];
-   Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_PMConf;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_VariableLenConfDesc {
-   __le32              confVer;
-   __le32              confLen;
-   __le64              confPA;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_VariableLenConfDesc;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_DSDevRead {
-   /* read-only region for device, read by dev in response to a SET cmd */
-   Vmxnet3_MiscConf     misc;
-   Vmxnet3_IntrConf     intrConf;
-   Vmxnet3_RxFilterConf rxFilterConf;
-   Vmxnet3_VariableLenConfDesc  rssConfDesc;
-   Vmxnet3_VariableLenConfDesc  pmConfDesc;
-   Vmxnet3_VariableLenConfDesc  pluginConfDesc;
-}
-#include "vmware_pack_end.h"
-Vmxnet3_DSDevRead;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_TxQueueDesc {
-   Vmxnet3_TxQueueCtrl ctrl;
-   Vmxnet3_TxQueueConf conf;
-   /* Driver read after a GET command */
-   Vmxnet3_QueueStatus status;
-   UPT1_TxStats        stats;
-   uint8               _pad[88]; /* 128 aligned */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_TxQueueDesc;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_RxQueueDesc {
-   Vmxnet3_RxQueueCtrl ctrl;
-   Vmxnet3_RxQueueConf conf;
-   /* Driver read after a GET command */
-   Vmxnet3_QueueStatus status;
-   UPT1_RxStats        stats;
-   uint8               _pad[88]; /* 128 aligned */
-}
-#include "vmware_pack_end.h"
-Vmxnet3_RxQueueDesc;
-
-typedef
-#include "vmware_pack_begin.h"
-struct Vmxnet3_DriverShared {
-   __le32               magic;
-   __le32               pad; /* make devRead start at 64-bit boundaries */
-   Vmxnet3_DSDevRead    devRead;
-   __le32               ecr;
-   __le32               reserved[5];
-}
-#include "vmware_pack_end.h"
-Vmxnet3_DriverShared;
-
-#define VMXNET3_ECR_RQERR       (1 << 0)
-#define VMXNET3_ECR_TQERR       (1 << 1)
-#define VMXNET3_ECR_LINK        (1 << 2)
-#define VMXNET3_ECR_DIC         (1 << 3)
-#define VMXNET3_ECR_DEBUG       (1 << 4)
-
-/* flip the gen bit of a ring */
-#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
-
-/* only use this if moving the idx won't affect the gen bit */
-#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
-do {\
-   (idx)++;\
-   if (UNLIKELY((idx) == (ring_size))) {\
-      (idx) = 0;\
-   }\
-} while (0)
-
-#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
-   vfTable[vid >> 5] |= (1 << (vid & 31))
-#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
-   vfTable[vid >> 5] &= ~(1 << (vid & 31))
-
-#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
-   ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
-
-#define VMXNET3_MAX_MTU     9000
-#define VMXNET3_MIN_MTU     60
-
-#define VMXNET3_LINK_UP         (10000 << 16 | 1)    // 10 Gbps, up
-#define VMXNET3_LINK_DOWN       0
-
-#define VMXWIFI_DRIVER_SHARED_LEN 8192
-
-#define VMXNET3_DID_PASSTHRU    0xFFFF
-
-#endif /* _VMXNET3_DEFS_H_ */
--- a/lib/librte_pmd_vmxnet3/vmxnet3/vmxnet3_osdep.h    2014-05-14 
11:46:47.129439301 -0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VMXNET3_OSDEP_H
-#define _VMXNET3_OSDEP_H
-
-typedef uint64_t       uint64;
-typedef uint32_t       uint32;
-typedef uint16_t       uint16;
-typedef uint8_t                uint8;
-typedef int            bool;
-typedef char           Bool;
-
-#ifndef UNLIKELY
-#define UNLIKELY(x)  __builtin_expect((x),0)
-#endif /* unlikely */
-
-#endif /* _VMXNET3_OSDEP_H */
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c   2014-05-14 11:46:47.129439301 
-0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c   2014-05-14 11:48:07.617690469 
-0700
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ *   Copyright (c) 2012-2014 Brocade Communications Systems, Inc.
  *   All rights reserved.
  * 
  *   Redistribution and use in source and binary forms, with or without
@@ -35,731 +35,660 @@
 #include <stdio.h>
 #include <errno.h>
 #include <stdint.h>
-#include <string.h>
-#include <unistd.h>
 #include <stdarg.h>
-#include <fcntl.h>
-#include <inttypes.h>
-#include <rte_byteorder.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
+#include <string.h>

+#include <rte_common.h>
 #include <rte_interrupts.h>
+#include <rte_byteorder.h>
 #include <rte_log.h>
 #include <rte_debug.h>
 #include <rte_pci.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
 #include <rte_memory.h>
 #include <rte_memzone.h>
 #include <rte_tailq.h>
 #include <rte_eal.h>
-#include <rte_alarm.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
 #include <rte_atomic.h>
-#include <rte_string_fns.h>
+#include <rte_spinlock.h>
 #include <rte_malloc.h>
+#include <rte_string_fns.h>

-#include "vmxnet3/vmxnet3_defs.h"
+#include "vmxnet3_dev.h"

-#include "vmxnet3_ring.h"
-#include "vmxnet3_logs.h"
-#include "vmxnet3_ethdev.h"
-
-#define PROCESS_SYS_EVENTS 0
-
-static int eth_vmxnet3_dev_init(struct eth_driver *eth_drv,
-               struct rte_eth_dev *eth_dev);
-static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
-static int vmxnet3_dev_start(struct rte_eth_dev *dev);
-static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
-static void vmxnet3_dev_close(struct rte_eth_dev *dev);
-static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, 
int set);
-static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
-                               int wait_to_complete);
-static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
-                               struct rte_eth_stats *stats);
-static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
-                               struct rte_eth_dev_info *dev_info);
-#if PROCESS_SYS_EVENTS == 1
-static void vmxnet3_process_events(struct vmxnet3_hw *);
-#endif
-/*
- * The set of PCI devices this driver supports
- */
-static struct rte_pci_id pci_id_vmxnet3_map[] = {
+/* PCI identifier */
+#define PCI_VENDOR_ID_VMWARE           0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMXNET3   0x07B0

+#define VMXNET3_DRIVER_VERSION         "0.0.1"
+#define VMXNET3_DRIVER_VERSION_NUM     0x00000100
+
+static struct rte_pci_id pci_id_vmxnet3_map[] = {
 #define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
 #include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
+       { .vendor_id = 0, }
 };

-static struct eth_dev_ops vmxnet3_eth_dev_ops = {
-       .dev_configure        = vmxnet3_dev_configure,
-       .dev_start            = vmxnet3_dev_start,
-       .dev_stop             = vmxnet3_dev_stop,
-       .dev_close            = vmxnet3_dev_close,
-       .promiscuous_enable   = vmxnet3_dev_promiscuous_enable,
-       .promiscuous_disable  = vmxnet3_dev_promiscuous_disable,
-       .allmulticast_enable  = vmxnet3_dev_allmulticast_enable,
-       .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
-       .link_update          = vmxnet3_dev_link_update,
-       .stats_get            = vmxnet3_dev_stats_get,
-       .dev_infos_get        = vmxnet3_dev_info_get,
-       .rx_queue_setup       = vmxnet3_dev_rx_queue_setup,
-       .rx_queue_release     = vmxnet3_dev_rx_queue_release,
-       .tx_queue_setup       = vmxnet3_dev_tx_queue_setup,
-       .tx_queue_release     = vmxnet3_dev_tx_queue_release,
-};
+static int vmxnet3_link_update(struct rte_eth_dev *, int);

-static const struct rte_memzone *
-gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
-               const char *post_string, int socket_id, uint16_t align)
+/* Reserve contiguous memory zone.
+ * Lookup first before allocating
+ */
+static const struct rte_memzone *dma_zone_reserve(const char *name,
+                                                 uint32_t size, int socket_id,
+                                                 uint32_t align)
 {
-       char z_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;

-       rte_snprintf(z_name, sizeof(z_name), "%s_%d_%s",
-                                       dev->driver->pci_drv.name, 
dev->data->port_id, post_string);
-
-       mz = rte_memzone_lookup(z_name);
+       mz = rte_memzone_lookup(name);
        if (mz)
                return mz;

-       return rte_memzone_reserve_aligned(z_name, size,
-                       socket_id, 0, align);
+       return rte_memzone_reserve_aligned(name, size, socket_id,
+                                          0, align);
 }

-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- *   - Pointer to the structure rte_eth_dev to read from.
- *   - Pointer to the buffer to be saved with the link status.
- *
- * @return
- *   - On success, zero.
- *   - On failure, negative value.
- */
-static inline int
-rte_vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
-                               struct rte_eth_link *link)
-{
-       struct rte_eth_link *dst = &(dev->data->dev_link);
-       struct rte_eth_link *src = link;
-
-       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
-                                       *(uint64_t *)src) == 0)
-               return -1;
+static uint32_t
+vmxnet3_cmd(struct vmxnet3_dev *dp, uint32_t cmd)
+{
+       rte_spinlock_lock(&dp->cmd_lock);
+       vmxnet3_bar1_put32(dp, VMXNET3_REG_CMD, cmd);
+       cmd = vmxnet3_bar1_get32(dp, VMXNET3_REG_CMD);
+       rte_spinlock_unlock(&dp->cmd_lock);

-       return 0;
+       return cmd;
 }

-/*
- * This function is based on vmxnet3_disable_intr()
- */
 static void
-vmxnet3_disable_intr(struct vmxnet3_hw *hw)
+vmxnet3_vlan_filter_disable(struct vmxnet3_dev *dp)
 {
-       int i;
-
-       PMD_INIT_FUNC_TRACE();
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       uint32_t *vfTable = ds->devRead.rxFilterConf.vfTable;

-       hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
-       for (i = 0; i < VMXNET3_MAX_INTRS; i++)
-                       VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+       memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(uint32_t));
+       vmxnet3_cmd(dp, VMXNET3_CMD_UPDATE_VLAN_FILTERS);
 }

-/*
- * It returns 0 on success.
- */
-static int
-eth_vmxnet3_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
-                    struct rte_eth_dev *eth_dev)
+static void
+vmxnet3_vlan_filter_restore(struct vmxnet3_dev *dp)
 {
-       struct rte_pci_device *pci_dev;
-       struct vmxnet3_hw *hw =
-               VMXNET3_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-       uint32_t mac_hi, mac_lo, ver;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       uint32_t *vfTable = ds->devRead.rxFilterConf.vfTable;

-       PMD_INIT_FUNC_TRACE();
+       memcpy(vfTable, dp->shadow_vfta, VMXNET3_VFT_SIZE * sizeof(uint32_t));
+       vmxnet3_cmd(dp, VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}

-       eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
-       eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
-       pci_dev = eth_dev->pci_dev;
+static void
+vmxnet3_refresh_rxfilter(struct vmxnet3_dev *dp)
+{
+       Vmxnet3_DriverShared *ds = dp->shared->addr;

-       /* 
-       * for secondary processes, we don't initialise any further as primary
-       * has already done this work. 
-       */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return 0;
+       ds->devRead.rxFilterConf.rxMode = rte_cpu_to_le_32(dp->rx_mode);
+       vmxnet3_cmd(dp, VMXNET3_CMD_UPDATE_RX_MODE);
+}

-       /* Vendor and Device ID need to be set before init of shared code */
-       hw->device_id = pci_dev->id.device_id;
-       hw->vendor_id = pci_dev->id.vendor_id;
-       hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
-       hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
-
-       hw->num_rx_queues = 1;
-       hw->num_tx_queues = 1;
-       hw->cur_mtu = ETHER_MTU;
-       hw->bufs_per_pkt = 1;
-
-       /* Check h/w version compatibility with driver. */
-    ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
-    PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
-    if (ver & 0x1)
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
-    else {
-       PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1\n");
-       return -EIO;
-    }
-
-    /* Check UPT version compatibility with driver. */
-    ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
-    PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
-    if (ver & 0x1)
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
-    else {
-       PMD_INIT_LOG(ERR, "Incompatiable UPT version.\n");
-       return -EIO;
-    }
-
-       /* Getting MAC Address */
-       mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
-       mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
-       memcpy(hw->perm_addr  , &mac_lo, 4);
-       memcpy(hw->perm_addr+4, &mac_hi, 2);
-
-       /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
-                       VMXNET3_MAX_MAC_ADDRS, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
-               PMD_INIT_LOG(ERR,
-                       "Failed to allocate %d bytes needed to store MAC 
addresses",
-                       ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
-               return -ENOMEM;
-       }
-       /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *) hw->perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
-
-       PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x \n",
-                      hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
-                      hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+static void vmxnet3_read_mac(struct vmxnet3_dev *dp, uint8_t *mac_addr)
+{
+       uint32_t t;

-       /* Put device in Quiesce Mode */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
+       t = vmxnet3_bar1_get32(dp, VMXNET3_REG_MACL);
+       *(uint32_t *) mac_addr = t;

-       return 0;
-}
+       t = vmxnet3_bar1_get32(dp, VMXNET3_REG_MACH);
+       mac_addr[4] = t & 0xff;
+       mac_addr[5] = (t >> 8) & 0xff;

-static struct eth_driver rte_vmxnet3_pmd = {
-       {
-               .name = "rte_vmxnet3_pmd",
-               .id_table = pci_id_vmxnet3_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-       },
-       .eth_dev_init = eth_vmxnet3_dev_init,
-       .dev_private_size = sizeof(struct vmxnet3_adapter),
-};
+       PMD_INIT_LOG(DEBUG, "mac address %x:%x:%x:%x:%x:%x",
+                    mac_addr[0], mac_addr[1], mac_addr[2],
+                    mac_addr[3], mac_addr[4], mac_addr[5]);
+}

-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
- */
-int
-rte_vmxnet3_pmd_init(void)
+static int
+vmxnet3_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       uint32_t *vfTable = ds->devRead.rxFilterConf.vfTable;
+
+       /* save state for restore */
+       if (on)
+               VMXNET3_SET_VFTABLE_ENTRY(dp->shadow_vfta, vid);
+       else
+               VMXNET3_CLEAR_VFTABLE_ENTRY(dp->shadow_vfta, vid);

-       rte_eth_driver_register(&rte_vmxnet3_pmd);
+       if (dp->rx_mode & VMXNET3_RXM_PROMISC)
+               return 0;
+
+       /* set in hardware */
+       if (on)
+               VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+       else
+               VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+
+       vmxnet3_cmd(dp, VMXNET3_CMD_UPDATE_VLAN_FILTERS);
        return 0;
 }

-static int
-vmxnet3_dev_configure(struct rte_eth_dev *dev)
+static void
+vmxnet3_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       const struct rte_memzone *mz;
-       struct vmxnet3_hw *hw =
-                       VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       size_t size;
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       Vmxnet3_DSDevRead *devRead = &ds->devRead;

-       PMD_INIT_FUNC_TRACE();
+       if (mask & ETH_VLAN_STRIP_MASK)
+               devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+       else
+               devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;

-       if (dev->data->nb_rx_queues > UINT8_MAX ||
-                       dev->data->nb_tx_queues > UINT8_MAX)
-               return (-EINVAL);
-
-       size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
-                 dev->data->nb_tx_queues * sizeof (struct Vmxnet3_RxQueueDesc);
-
-       if (size > UINT16_MAX)
-               return (-EINVAL);
-
-       hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
-       hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
-
-       /* 
-        * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead 
-        * on current socket
-        */
-       mz = gpa_zone_reserve(dev, sizeof (struct Vmxnet3_DriverShared),
-               "shared", rte_socket_id(), 8);
-                               
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating shared zone\n");
-               return (-ENOMEM);
-       }
-       memset(mz->addr, 0, mz->len);
+       if (mask & ETH_VLAN_FILTER_MASK)
+               vmxnet3_vlan_filter_restore(dp);
+       else
+               vmxnet3_vlan_filter_disable(dp);
+}

-       hw->shared = mz->addr;
-       hw->sharedPA = mz->phys_addr;
+static void
+vmxnet3_write_mac(struct vmxnet3_dev *dp, const uint8_t *macaddr)
+{
+       uint32_t val32;

-       /* 
-       * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc 
-       * on current socket
-       */
-       mz = gpa_zone_reserve(dev, size, "queuedesc",
-                                       rte_socket_id(), 
VMXNET3_QUEUE_DESC_ALIGN);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-               return (-ENOMEM);
-       }
-       memset(mz->addr, 0, mz->len);
+       memcpy(&val32, macaddr, sizeof(uint32_t));
+       vmxnet3_bar1_put32(dp, VMXNET3_REG_MACL, val32);
+       val32 = macaddr[5] << 8 | macaddr[4];
+       vmxnet3_bar1_put32(dp, VMXNET3_REG_MACH, val32);
+}

-       hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
-       hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + 
hw->num_tx_queues);
+static const uint8_t rss_default_key[UPT1_RSS_MAX_KEY_SIZE] = {
+        0xd9, 0x20, 0x1d, 0xb5, 0x51, 0x90, 0xd1, 0x32, 0x2e, 0xde,
+        0xec, 0x8e, 0xe7, 0xc2, 0x8e, 0xe8, 0xe9, 0x42, 0x9b, 0xe4,
+        0x67, 0x12, 0x22, 0x2d, 0xf6, 0x18, 0x40, 0x64, 0xcc, 0xcd,
+        0x6c, 0x95, 0xb9, 0x22, 0x4a, 0x30, 0x3c, 0x8d, 0x12, 0x3e,
+};

-       hw->queueDescPA = mz->phys_addr;
-       hw->queue_desc_len = (uint16_t)size;
+static int vmxnet3_rss_setup(struct rte_eth_dev *eth_dev)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       const struct rte_eth_conf *eth_conf = &eth_dev->data->dev_conf;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       Vmxnet3_DSDevRead *devRead = &ds->devRead;
+       const struct rte_memzone *rz;
+       struct UPT1_RSSConf *rssConf;
+       const uint8_t *hash_key;
+       unsigned i;
+       char z_name[RTE_MEMZONE_NAMESIZE];

-       if(dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
-
-               /* Allocate memory structure for UPT1_RSSConf and configure */
-               mz = gpa_zone_reserve(dev, sizeof (struct VMXNET3_RSSConf), 
"rss_conf",
-                               rte_socket_id(), CACHE_LINE_SIZE);
-               if (mz == NULL) {
-                       PMD_INIT_LOG(ERR, "ERROR: Creating rss_conf structure 
zone\n");
-                       return (-ENOMEM);
-               }
-               memset(mz->addr, 0, mz->len);
+       /* If no RSS hash types then RSS unnecessary. */
+       if (eth_conf->rx_adv_conf.rss_conf.rss_hf == 0)
+               return 0;

-               hw->rss_conf = mz->addr;
-               hw->rss_confPA = mz->phys_addr;
-       }
+       rte_snprintf(z_name, sizeof(z_name), "vmxnet3_rss_%u",
+                    eth_dev->data->port_id);
+
+       rz = dma_zone_reserve(z_name, sizeof(*rssConf), SOCKET_ID_ANY,
+                             CACHE_LINE_SIZE);
+       if (rz == NULL)
+               return -ENOMEM;
+
+       /* NB: RSS definitions from VMXNET3 are a subset of DPDK
+          probably because both inherited from Msft NDIS spec? */
+       rssConf = rz->addr;
+       rssConf->hashType = eth_conf->rx_adv_conf.rss_conf.rss_hf & 0xf;
+       rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+       rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE; /* 40 */
+       hash_key = eth_conf->rx_adv_conf.rss_conf.rss_key;
+       if (hash_key == NULL)
+               hash_key = rss_default_key;
+
+       memcpy(rssConf->hashKey, hash_key, UPT1_RSS_MAX_KEY_SIZE);
+
+       rssConf->indTableSize = UPT1_RSS_MAX_IND_TABLE_SIZE;
+       for (i = 0; i < rssConf->indTableSize; i++)
+               rssConf->indTable[i] = i % eth_dev->data->nb_rx_queues;
+
+       devRead->misc.uptFeatures |= UPT1_F_RSS;
+       devRead->rssConfDesc.confVer = 1;
+       devRead->rssConfDesc.confLen = sizeof(*rssConf);
+       devRead->rssConfDesc.confPA = rz->phys_addr;

        return 0;
 }

-static int
-vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
+static void vmxnet3_shared_setup(struct rte_eth_dev *eth_dev)
 {
-       struct rte_eth_conf port_conf = dev->data->dev_conf;
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       Vmxnet3_DriverShared *shared = hw->shared;
-       Vmxnet3_DSDevRead *devRead = &shared->devRead;
-       uint32_t *mac_ptr;
-       uint32_t val, i;
-       int ret;
-
-       shared->magic = VMXNET3_REV1_MAGIC;
-       devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
-
-       /* Setting up Guest OS information */
-       devRead->misc.driverInfo.gos.gosBits   = sizeof(void *) == 4 ?
-                                                                               
        VMXNET3_GOS_BITS_32 :
-                                                                               
        VMXNET3_GOS_BITS_64;
-       devRead->misc.driverInfo.gos.gosType   = VMXNET3_GOS_TYPE_LINUX;
-       devRead->misc.driverInfo.vmxnet3RevSpt = 1;
-       devRead->misc.driverInfo.uptVerSpt     = 1;
-
-       devRead->misc.queueDescPA  = hw->queueDescPA;
-       devRead->misc.queueDescLen = hw->queue_desc_len;
-       devRead->misc.mtu          = hw->cur_mtu;
-       devRead->misc.numTxQueues  = hw->num_tx_queues;
-       devRead->misc.numRxQueues  = hw->num_rx_queues;
-
-       /*
-       * Set number of interrupts to 1
-       * PMD disables all the interrupts but this is MUST to activate device
-       * It needs at least one interrupt for link events to handle
-       * So we'll disable it later after device activation if needed
-       */
-       devRead->intrConf.numIntrs = 1;
-       devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       Vmxnet3_DSDevRead *devRead = &ds->devRead;
+       unsigned num_txq = eth_dev->data->nb_tx_queues;
+       unsigned num_rxq = eth_dev->data->nb_rx_queues;
+       uint32_t mtu = eth_dev->data->max_frame_size;
+       unsigned i;
+       size_t size;

-       for (i = 0; i < hw->num_tx_queues; i++) {
-               Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
-               vmxnet3_tx_queue_t *txq   = dev->data->tx_queues[i];
-
-               tqd->ctrl.txNumDeferred  = 0;
-               tqd->ctrl.txThreshold    = 1;
-               tqd->conf.txRingBasePA   = txq->cmd_ring.basePA;
-               tqd->conf.compRingBasePA = txq->comp_ring.basePA;
-
-               tqd->conf.txRingSize   = txq->cmd_ring.size;
-               tqd->conf.compRingSize = txq->comp_ring.size;
-               tqd->conf.intrIdx      = txq->comp_ring.intr_idx;
-               tqd->status.stopped    = TRUE;
-               tqd->status.error      = 0;
-               memset(&tqd->stats, 0, sizeof(tqd->stats));
-       }
+       /* Setup shared area */
+       memset(ds, 0, sizeof(*ds));

-       for (i = 0; i < hw->num_rx_queues; i++) {
-               Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
-               vmxnet3_rx_queue_t *rxq    = dev->data->rx_queues[i];
-
-               rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
-               rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
-               rqd->conf.compRingBasePA  = rxq->comp_ring.basePA;
-
-               rqd->conf.rxRingSize[0]   = rxq->cmd_ring[0].size;
-               rqd->conf.rxRingSize[1]   = rxq->cmd_ring[1].size;
-               rqd->conf.compRingSize    = rxq->comp_ring.size;
-               rqd->conf.intrIdx         = rxq->comp_ring.intr_idx;
-               rqd->status.stopped       = TRUE;
-               rqd->status.error         = 0;
-               memset(&rqd->stats, 0, sizeof(rqd->stats));
-       }
+       ds->magic = rte_cpu_to_le_32(VMXNET3_REV1_MAGIC);
+       devRead->misc.driverInfo.version
+               = rte_cpu_to_le_32(VMXNET3_DRIVER_VERSION_NUM);

-       /* RxMode set to 0 of VMXNET3_RXM_xxx */
-       devRead->rxFilterConf.rxMode = 0;
+       if (sizeof(void *) == 4)
+               devRead->misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_32;
+       else
+               devRead->misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_64;

-       /* Setting up feature flags */
-       if(dev->data->dev_conf.rxmode.hw_ip_checksum) {
-               devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
-       }
+       devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+       devRead->misc.driverInfo.vmxnet3RevSpt = 1;
+       devRead->misc.driverInfo.uptVerSpt = 1;

-       if(dev->data->dev_conf.rxmode.hw_vlan_strip) {
-               devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;
+       devRead->misc.uptFeatures = 0;
+       if (eth_dev->data->dev_conf.rxmode.hw_ip_checksum)
+               devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
+       vmxnet3_vlan_offload_set(eth_dev,
+                                ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+       devRead->misc.mtu = rte_cpu_to_le_32(mtu);
+
+       /* Setup Tx/Rx queue descriptor area */
+       size = num_txq * sizeof(struct Vmxnet3_TxQueueDesc);
+       size += num_rxq * sizeof(struct Vmxnet3_RxQueueDesc);
+
+       devRead->misc.queueDescPA = rte_cpu_to_le_64(dp->qdesc->phys_addr);
+       devRead->misc.queueDescLen = rte_cpu_to_le_32(size);
+
+       /* tx queue setup */
+       devRead->misc.numTxQueues = num_txq;
+       dp->tqd_start = dp->qdesc->addr;
+       for (i = 0; i < num_txq; i++) {
+               struct vmxnet3_tx_queue *txq = dp->tx_queue + i;
+               Vmxnet3_TxQueueDesc *txd = dp->tqd_start + i;
+
+               txq->shared = &txd->ctrl;
+               txd->conf.txRingBasePA
+                       = rte_cpu_to_le_64(txq->tx_ring.phys_addr);
+               txd->conf.txRingSize
+                       = rte_cpu_to_le_32(txq->tx_ring.size);
+               txd->conf.compRingBasePA
+                       = rte_cpu_to_le_64(txq->comp_ring.phys_addr);
+               txd->conf.compRingSize
+                       = rte_cpu_to_le_32(txq->comp_ring.size);
+               txd->conf.dataRingBasePA = 0;
+               txd->conf.dataRingSize = 0;
+       }
+
+       devRead->misc.numRxQueues = num_rxq;
+       dp->rqd_start = (struct Vmxnet3_RxQueueDesc *)(dp->tqd_start + num_txq);
+       for (i = 0; i < num_rxq; i++) {
+               struct vmxnet3_rx_queue *rxq = dp->rx_queue + i;
+               Vmxnet3_RxQueueDesc *rxd = dp->rqd_start + i;
+
+               rxq->shared = &rxd->ctrl;
+               rxd->conf.rxRingBasePA[0]
+                       = rte_cpu_to_le_64(rxq->rx_ring.phys_addr);
+               rxd->conf.rxRingSize[0]
+                       = rte_cpu_to_le_32(rxq->rx_ring.size);
+
+               rxd->conf.rxRingBasePA[1] = 0;
+               rxd->conf.rxRingSize[1] = 0;
+
+               rxd->conf.compRingBasePA
+                       = rte_cpu_to_le_64(rxq->comp_ring.phys_addr);
+               rxd->conf.compRingSize
+                       = rte_cpu_to_le_32(rxq->comp_ring.size);
        }

-       if(port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
-               ret = vmxnet3_rss_configure(dev);
-               if(ret != VMXNET3_SUCCESS) {
-                       return ret;
-               }
-               devRead->misc.uptFeatures |= VMXNET3_F_RSS;
-               devRead->rssConfDesc.confVer = 1;
-               devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
-               devRead->rssConfDesc.confPA  = hw->rss_confPA;
-       }
+       /* interrupt settings */
+       devRead->intrConf.autoMask = 1;
+       devRead->intrConf.numIntrs = 1;
+       devRead->intrConf.eventIntrIdx = 0;
+       devRead->intrConf.modLevels[0] = UPT1_IML_ADAPTIVE;
+       devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL);
+}

-       if(dev->data->dev_conf.rxmode.hw_vlan_filter) {
-               ret = vmxnet3_vlan_configure(dev);
-               if(ret != VMXNET3_SUCCESS) {
-                       return ret;
-               }
-       }
+/* Called after queues are setup */
+static int
+vmxnet3_configure(struct rte_eth_dev *eth_dev)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       Vmxnet3_DriverShared *ds = dp->shared->addr;
+       uint32_t *vfTable = ds->devRead.rxFilterConf.vfTable;

-       PMD_INIT_LOG(DEBUG, "Writing MAC Address : 
%02x:%02x:%02x:%02x:%02x:%02x \n",
-                                       hw->perm_addr[0], hw->perm_addr[1], 
hw->perm_addr[2],
-                                       hw->perm_addr[3], hw->perm_addr[4], 
hw->perm_addr[5]);
+       PMD_INIT_LOG(DEBUG, "configure");

-       /* Write MAC Address back to device */
-       mac_ptr = (uint32_t *)hw->perm_addr;
-       val = *mac_ptr;
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
+       /* Default vlan filter: allow untagged traffic */
+       VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+       dp->shadow_vfta[0] = 1;

-       val = (hw->perm_addr[5] << 8) | hw->perm_addr[4];
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
+       /* Initialize Link state */
+       vmxnet3_link_update(eth_dev, 0);

-       return VMXNET3_SUCCESS;
+       return 0;
 }

-/*
- * Configure device link speed and setup link.
- * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
- * It returns 0 on success.
- */
 static int
-vmxnet3_dev_start(struct rte_eth_dev *dev)
+vmxnet3_start(struct rte_eth_dev *eth_dev)
 {
-       int status, ret;
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       uint32_t ret;
+       int err;

        PMD_INIT_FUNC_TRACE();

-       ret = vmxnet3_setup_driver_shared(dev);
-       if(ret != VMXNET3_SUCCESS) {
-               return ret;
-       }
+       vmxnet3_shared_setup(eth_dev);

-       /* Exchange shared data with device */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
-                             hw->sharedPA));
-    VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
-                                                 hw->sharedPA));
-
-    /* Activate device by register write */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
-       status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
-
-       if (status != 0) {
-               PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL\n", 
__func__);
-               return -1;
+       if (vmxnet3_rss_setup(eth_dev) < 0) {
+               PMD_INIT_LOG(ERR, "Failed to setup RSS");
+               return -ENOMEM;
        }

-       /* Disable interrupts */
-       vmxnet3_disable_intr(hw);
+       err = vmxnet3_rx_init(eth_dev);
+       if (err)
+               return err;
+
+       /* Activate device */
+       ret = vmxnet3_cmd(dp, VMXNET3_CMD_ACTIVATE_DEV);
+       if (ret != 0) {
+               PMD_INIT_LOG(ERR, "Failed to activate port %u error %u",
+                            eth_dev->data->port_id, ret);
+
+               return -EINVAL;
+       }
+
+       /* Turn on Rx queues */
+       vmxnet3_rx_start(eth_dev);
+
+       /* Reset receive filter */
+       vmxnet3_vlan_filter_restore(dp);
+       dp->rx_mode |= VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST;
+       vmxnet3_refresh_rxfilter(dp);

-       /*
-        * Load RX queues with blank mbufs and update next2fill index for device
-        * Update RxMode of the device
-        */
-       ret = vmxnet3_dev_rxtx_init(dev);
-       if(ret != VMXNET3_SUCCESS) {
-               PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL\n", 
__func__);
-               return ret;
-       }
+       return 0;
+}

-       /* Setting proper Rx Mode and issue Rx Mode Update command */
-       vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_ALL_MULTI, 
1);
+static void
+vmxnet3_stop(struct rte_eth_dev *eth_dev)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;

-       /*
-        * Don't need to handle events for now
-        */
-#if PROCESS_SYS_EVENTS == 1
-       events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR);
-       PMD_INIT_LOG(DEBUG, "Reading events: 0x%X\n\n", events);
-       vmxnet3_process_events(hw);
-#endif
-       return status;
+       PMD_INIT_FUNC_TRACE();
+
+       vmxnet3_cmd(dp, VMXNET3_CMD_QUIESCE_DEV);
+
+       vmxnet3_rx_flush_all(eth_dev);
+       vmxnet3_tx_flush_all(eth_dev);
 }

-/*
- * Stop device: disable rx and tx functions to allow for reconfiguring.
- */
 static void
-vmxnet3_dev_stop(struct rte_eth_dev *dev)
+vmxnet3_close(struct rte_eth_dev *eth_dev)
 {
-       struct rte_eth_link link;
-       struct vmxnet3_hw *hw =
-               VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;

        PMD_INIT_FUNC_TRACE();

-       if(hw->adapter_stopped == TRUE) {
-               PMD_INIT_LOG(DEBUG, "Device already closed.\n");
-               return;
-       }
+       vmxnet3_cmd(dp, VMXNET3_CMD_RESET_DEV);
+}

-       /* disable interrupts */
-       vmxnet3_disable_intr(hw);
+static void
+vmxnet3_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;

-       /* quiesce the device first */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
-
-       /* reset the device */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
-       PMD_INIT_LOG(DEBUG, "Device reset.\n");
-       hw->adapter_stopped = FALSE;
+       PMD_INIT_FUNC_TRACE();

-       vmxnet3_dev_clear_queues(dev);
+       vmxnet3_vlan_filter_disable(dp);

-       /* Clear recorded link status */
-       memset(&link, 0, sizeof(link));
-       rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
+       dp->rx_mode |= VMXNET3_RXM_PROMISC;
+       vmxnet3_refresh_rxfilter(dp);
 }

-/*
- * Reset and stop device.
- */
 static void
-vmxnet3_dev_close(struct rte_eth_dev *dev)
+vmxnet3_promiscuous_disable(struct rte_eth_dev *eth_dev)
 {
-       struct vmxnet3_hw *hw =
-               VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;

        PMD_INIT_FUNC_TRACE();

-       vmxnet3_dev_stop(dev);
-       hw->adapter_stopped = TRUE;
+       vmxnet3_vlan_filter_restore(dp);

+       dp->rx_mode &= ~VMXNET3_RXM_PROMISC;
+       vmxnet3_refresh_rxfilter(dp);
 }

 static void
-vmxnet3_dev_stats_get( struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+vmxnet3_allmulticast_enable(struct rte_eth_dev *eth_dev)
 {
-       unsigned int i;
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
-
-       stats->opackets = 0;
-       stats->obytes = 0;
-       stats->oerrors = 0;
-       stats->ipackets = 0;
-       stats->ibytes = 0;
-       stats->rx_nombuf = 0;
-       stats->ierrors = 0;
-       stats->imcasts  = 0;
-       stats->fdirmatch = 0;
-       stats->fdirmiss = 0;
-
-       for (i = 0; i < hw->num_tx_queues; i++) {
-               stats->opackets += hw->tqd_start[i].stats.ucastPktsTxOK +
-                               hw->tqd_start[i].stats.mcastPktsTxOK +
-                               hw->tqd_start[i].stats.bcastPktsTxOK;
-               stats->obytes   += hw->tqd_start[i].stats.ucastBytesTxOK +
-                               hw->tqd_start[i].stats.mcastBytesTxOK +
-                               hw->tqd_start[i].stats.bcastBytesTxOK;
-               stats->oerrors  += hw->tqd_start[i].stats.pktsTxError +
-                               hw->tqd_start[i].stats.pktsTxDiscard;
-       }
-
-       for (i = 0; i < hw->num_rx_queues; i++) {
-               stats->ipackets  += hw->rqd_start[i].stats.ucastPktsRxOK +
-                               hw->rqd_start[i].stats.mcastPktsRxOK +
-                               hw->rqd_start[i].stats.bcastPktsRxOK;
-               stats->ibytes    += hw->rqd_start[i].stats.ucastBytesRxOK +
-                               hw->rqd_start[i].stats.mcastBytesRxOK +
-                               hw->rqd_start[i].stats.bcastBytesRxOK;
-               stats->rx_nombuf += hw->rqd_start[i].stats.pktsRxOutOfBuf;
-               stats->ierrors   += hw->rqd_start[i].stats.pktsRxError;
-       }
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;

+       dp->rx_mode |= VMXNET3_RXM_ALL_MULTI;
+       vmxnet3_refresh_rxfilter(dp);
 }

 static void
-vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
+vmxnet3_allmulticast_disable(struct rte_eth_dev *eth_dev)
 {
-       dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
-       dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
-       dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
-       dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
-       dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+
+       dp->rx_mode &= ~VMXNET3_RXM_ALL_MULTI;
+       vmxnet3_refresh_rxfilter(dp);
 }

-/* return 0 means link status changed, -1 means not changed */
+/* update link status value. */
 static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int 
wait_to_complete)
+vmxnet3_link_update(struct rte_eth_dev *dev,
+                   int wait_to_complete __rte_unused)
 {
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_link link;
-       uint32_t ret;
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       struct rte_eth_link old, link;
+       uint32_t status;

-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
-       ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+       memset(&link, 0, sizeof(link));

-       if (!ret) {
-               PMD_INIT_LOG(ERR, "Link Status Negative : %s()\n", __func__);
-               return -1;
-       }
+       rte_eth_dev_get_link(dev, &old);

-       if (ret & 0x1) {
-               link.link_status = 1;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               link.link_speed = ETH_LINK_SPEED_10000;
+       status = vmxnet3_cmd(dp, VMXNET3_CMD_GET_LINK);

-               rte_vmxnet3_dev_atomic_write_link_status(dev, &link);
+       PMD_DRV_LOG(INFO, "link status %#x", status);

-               return 0;
+       if (status & 1) {
+               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_speed = status >> 16;
+               link.link_status = 1;
        }

-       return -1;
+       rte_eth_dev_set_link(dev, &link);
+
+       return (old.link_status == link.link_status) ? -1 : 0;
 }

-/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
 static void
-vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
+vmxnet3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       unsigned i;

-       struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
-       if(set)
-               rxConf->rxMode = rxConf->rxMode | feature;
-       else
-               rxConf->rxMode = rxConf->rxMode & (~feature);
+       if (!dp->tqd_start  || !dp->rqd_start)
+               return;

-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
-}
+       /* Collect stats */
+       vmxnet3_cmd(dp, VMXNET3_CMD_GET_STATS);

-/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in 
adapter */
-static void
-vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
-{
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
-}
+       /* NB: RTE_ETHDEV_QUEUE_STAT_CNTRS (16) > VMXNET3_MAX_TX_QUEUES (8)*/
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               struct UPT1_TxStats *txStats = &dp->tqd_start[i].stats;

-/* Promiscuous supported only if Vmxnet3_DriverShared is initialized in 
adapter */
-static void
-vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
-{
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
-}
+               stats->q_opackets[i] = txStats->ucastPktsTxOK +
+                       txStats->mcastPktsTxOK +
+                       txStats->bcastPktsTxOK;
+               stats->q_obytes[i] = txStats->ucastBytesTxOK +
+                       txStats->mcastBytesTxOK +
+                       txStats->bcastBytesTxOK;

-/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in 
adapter */
-static void
-vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
-{
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
+               stats->opackets += stats->q_opackets[i];
+               stats->obytes += stats->q_obytes[i];
+               stats->oerrors += txStats->pktsTxError;
+       }
+
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               struct UPT1_RxStats *rxStats = &dp->rqd_start[i].stats;
+
+               stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
+                       rxStats->mcastPktsRxOK +
+                       rxStats->bcastPktsRxOK;
+
+               stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
+                       rxStats->mcastBytesRxOK +
+                       rxStats->bcastBytesRxOK;
+
+               stats->ipackets += stats->q_ipackets[i];
+               stats->ibytes += stats->q_ibytes[i];
+
+               stats->q_errors[i] = rxStats->pktsRxError;
+               stats->ierrors += rxStats->pktsRxError;
+               stats->imcasts += rxStats->mcastPktsRxOK;
+
+               struct vmxnet3_rx_queue *rxq = &dp->rx_queue[i];
+               stats->ierrors += rxq->fcs_errors + rxq->drop_errors;
+       }
 }

-/* Allmulticast supported only if Vmxnet3_DriverShared is initialized in 
adapter */
 static void
-vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
+vmxnet3_infos_get(struct rte_eth_dev *eth_dev __rte_unused,
+                 struct rte_eth_dev_info *dev_info)
 {
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
+       dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
+       dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
+       dev_info->min_rx_bufsize = VMXNET3_MIN_T0_BUF_SIZE;
+       dev_info->max_rx_pktlen = ETHER_MAX_LEN;
+       // in future can be VMXNET3_MAX_MTU + VMXNET3_MAX_ETH_HDR
+       dev_info->max_mac_addrs = 1;
 }

-#if PROCESS_SYS_EVENTS == 1
 static void
-vmxnet3_process_events(struct vmxnet3_hw *hw)
+vmxnet3_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+{
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+
+       vmxnet3_write_mac(dp, mac_addr->addr_bytes);
+}
+
+static struct eth_dev_ops vmxnet3_dev_ops = {
+       .dev_configure        = vmxnet3_configure,
+       .dev_start            = vmxnet3_start,
+       .dev_stop             = vmxnet3_stop,
+       .dev_close            = vmxnet3_close,
+       .promiscuous_enable   = vmxnet3_promiscuous_enable,
+       .promiscuous_disable  = vmxnet3_promiscuous_disable,
+       .allmulticast_enable  = vmxnet3_allmulticast_enable,
+       .allmulticast_disable = vmxnet3_allmulticast_disable,
+       .link_update          = vmxnet3_link_update,
+       .stats_get            = vmxnet3_stats_get,
+       .dev_infos_get        = vmxnet3_infos_get,
+       .vlan_filter_set      = vmxnet3_vlan_filter_set,
+       .vlan_offload_set     = vmxnet3_vlan_offload_set,
+       .rx_queue_setup       = vmxnet3_rx_queue_setup,
+       .rx_queue_release     = vmxnet3_rx_queue_release,
+       .tx_queue_setup       = vmxnet3_tx_queue_setup,
+       .tx_queue_release     = vmxnet3_tx_queue_release,
+       .mac_addr_set         = vmxnet3_mac_addr_set,
+};
+
+static int
+vmxnet3_dev_init(struct eth_driver *eth_drv __rte_unused,
+                struct rte_eth_dev *eth_dev)
 {
-       uint32_t events = hw->shared->ecr;
-       if (!events){
-               PMD_INIT_LOG(ERR, "No events to process in %s()\n", __func__);
-               return;
-       }
+       struct vmxnet3_dev *dp = eth_dev->data->dev_private;
+       struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+       uint32_t ver;
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       size_t size;

-       /* 
-       * ECR bits when written with 1b are cleared. Hence write
-       * events back to ECR so that the bits which were set will be reset.
-       */
-       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
-
-       /* Check if link state has changed */
-   if (events & VMXNET3_ECR_LINK){
-          PMD_INIT_LOG(ERR, "Process events in %s(): VMXNET3_ECR_LINK 
event\n", __func__);
-   }
-
-       /* Check if there is an error on xmit/recv queues */
-       if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
-               VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 
VMXNET3_CMD_GET_QUEUE_STATUS);
-
-               if (hw->tqd_start->status.stopped)
-                       PMD_INIT_LOG(ERR, "tq error 0x%x\n",
-                     hw->tqd_start->status.error);
-
-               if (hw->rqd_start->status.stopped)
-                       PMD_INIT_LOG(ERR, "rq error 0x%x\n",
-                     hw->rqd_start->status.error);
+       PMD_INIT_FUNC_TRACE();

-      /* Reset the device */
-      /* Have to reset the device */
-       }
+       eth_dev->dev_ops = &vmxnet3_dev_ops;
+       eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
+       eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;

-       if (events & VMXNET3_ECR_DIC)
-               PMD_INIT_LOG(ERR, "Device implementation change event.\n");
+       dp->bar0 = pci_dev->mem_resource[0].addr;
+       if (!dp->bar0)
+               rte_panic("missing bar0 resource 0\n");
+
+       dp->bar1 = pci_dev->mem_resource[1].addr;
+       if (!dp->bar1)
+               rte_panic("missing bar1 resource 0\n");
+
+       rte_spinlock_init(&dp->cmd_lock);
+
+       /* Check the version numbers of the virtual device */
+       ver = vmxnet3_bar1_get32(dp, VMXNET3_REG_VRRS);
+       if (ver & 1)
+               vmxnet3_bar1_put32(dp, VMXNET3_REG_VRRS, 1);
+       else {
+               PMD_INIT_LOG(ERR, "incompatible h/w version %#x",
+                            ver);
+               return -EBUSY;
+       }
+
+       ver = vmxnet3_bar1_get32(dp, VMXNET3_REG_UVRS);
+       if (ver & 1)
+               vmxnet3_bar1_put32(dp, VMXNET3_REG_UVRS, 1);
+       else {
+               PMD_INIT_LOG(ERR, "incompatible upt version %#x", ver);
+               return -EBUSY;
+       }
+
+       /* Only single mac address */
+       vmxnet3_read_mac(dp, dp->mac_addr.addr_bytes);
+       if (!is_valid_assigned_ether_addr(&dp->mac_addr)) {
+               PMD_INIT_LOG(ERR, "invalid mac address");
+               return -EINVAL;
+       }
+
+       eth_dev->data->mac_addrs = &dp->mac_addr;
+
+       /* Get memory zone for shared */
+       rte_snprintf(z_name, sizeof(z_name), "vmxnet3_shared_%u",
+                    eth_dev->data->port_id);
+       dp->shared = dma_zone_reserve(z_name,
+                                     sizeof(struct Vmxnet3_DriverShared),
+                                     SOCKET_ID_ANY, CACHE_LINE_SIZE);
+       if (!dp->shared)
+               return -ENOMEM;

-       if (events & VMXNET3_ECR_DEBUG)
-               PMD_INIT_LOG(ERR, "Debug event generated by device.\n");
+       uint64_t paddr = dp->shared->phys_addr;
+       vmxnet3_bar1_put32(dp, VMXNET3_REG_DSAL, paddr);
+       vmxnet3_bar1_put32(dp, VMXNET3_REG_DSAH, paddr >> 32);
+
+       /* Get worst case zone for queue descriptors,
+          since can't be resized later */
+       rte_snprintf(z_name, sizeof(z_name), "vmxnet3_queues_%u",
+                    eth_dev->data->port_id);
+
+       size = VMXNET3_MAX_TX_QUEUES * sizeof(struct Vmxnet3_TxQueueDesc);
+       size += VMXNET3_MAX_RX_QUEUES * sizeof(struct Vmxnet3_RxQueueDesc);
+       dp->qdesc = dma_zone_reserve(z_name, size, SOCKET_ID_ANY,
+                                    VMXNET3_QUEUE_DESC_ALIGN);
+       if (!dp->qdesc)
+               return -ENOMEM;

+       return 0;
 }
+
+static struct eth_driver rte_vmxnet3_pmd = {
+       {
+               .name = "rte_vmxnet3_pmd",
+               .id_table = pci_id_vmxnet3_map,
+#ifdef RTE_EAL_UNBIND_PORTS
+               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
 #endif
+       },
+       .eth_dev_init = vmxnet3_dev_init,
+       .dev_private_size = sizeof(struct vmxnet3_dev),
+};
+
+int rte_vmxnet3_pmd_init(void)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       rte_eth_driver_register(&rte_vmxnet3_pmd);
+       return 0;
+}
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.h   2014-05-14 11:46:47.129439301 
-0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,187 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VMXNET3_ETHDEV_H_
-#define _VMXNET3_ETHDEV_H_
-
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-#define VMXNET3_ASSERT(x) do { \
-       if(!(x)) rte_panic("VMXNET3: x"); \
-} while(0)
-#endif
-
-#define VMXNET3_MAX_MAC_ADDRS 1
-
-/* UPT feature to negotiate */
-#define VMXNET3_F_RXCSUM      0x0001
-#define VMXNET3_F_RSS         0x0002
-#define VMXNET3_F_RXVLAN      0x0004
-#define VMXNET3_F_LRO         0x0008
-
-/* Hash Types supported by device */
-#define VMXNET3_RSS_HASH_TYPE_NONE      0x0
-#define VMXNET3_RSS_HASH_TYPE_IPV4      0x01
-#define VMXNET3_RSS_HASH_TYPE_TCP_IPV4  0x02
-#define VMXNET3_RSS_HASH_TYPE_IPV6      0x04
-#define VMXNET3_RSS_HASH_TYPE_TCP_IPV6  0x08
-
-#define VMXNET3_RSS_HASH_FUNC_NONE      0x0
-#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ  0x01
-
-#define VMXNET3_RSS_MAX_KEY_SIZE        40
-#define VMXNET3_RSS_MAX_IND_TABLE_SIZE  128
-
-/* RSS configuration structure - shared with device through GPA */
-typedef
-struct VMXNET3_RSSConf {
-   uint16_t   hashType;
-   uint16_t   hashFunc;
-   uint16_t   hashKeySize;
-   uint16_t   indTableSize;
-   uint8_t    hashKey[VMXNET3_RSS_MAX_KEY_SIZE];
-   /*
-    * indTable is only element that can be changed without
-    * device quiesce-reset-update-activation cycle
-    */
-   uint8_t    indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
-} VMXNET3_RSSConf;
-
-typedef
-struct vmxnet3_mf_table {
-       void          *mfTableBase; /* Multicast addresses list */
-       uint64_t      mfTablePA;    /* Physical address of the list */
-       uint16_t      num_addrs;    /* number of multicast addrs */
-} vmxnet3_mf_table_t;
-
-struct vmxnet3_hw {
-
-       uint8_t *hw_addr0;      /* BAR0: PT-Passthrough Regs    */
-       uint8_t *hw_addr1;      /* BAR1: VD-Virtual Device Regs */
-       /* BAR2: MSI-X Regs */
-       /* BAR3: Port IO    */
-       void *back;
-
-       uint16_t device_id;
-       uint16_t vendor_id;
-       uint16_t subsystem_device_id;
-       uint16_t subsystem_vendor_id;
-       bool adapter_stopped;
-
-       uint8_t perm_addr[ETHER_ADDR_LEN];
-       uint8_t num_tx_queues;
-       uint8_t num_rx_queues;
-       uint8_t bufs_per_pkt;
-       uint16_t cur_mtu;
-
-       Vmxnet3_TxQueueDesc   *tqd_start;       /* start address of all tx 
queue desc */
-       Vmxnet3_RxQueueDesc   *rqd_start;       /* start address of all rx 
queue desc */
-
-       Vmxnet3_DriverShared  *shared;
-       uint64_t              sharedPA;
-
-       uint64_t              queueDescPA;
-       uint16_t              queue_desc_len;
-
-       VMXNET3_RSSConf          *rss_conf;
-       uint64_t                         rss_confPA;
-       vmxnet3_mf_table_t   *mf_table;
-};
-
-/*
- * Structure to store private data for each driver instance (for each port).
- */
-struct vmxnet3_adapter {
-       struct vmxnet3_hw              hw;
-};
-
-#define VMXNET3_DEV_PRIVATE_TO_HW(adapter)\
-       (&((struct vmxnet3_adapter *)adapter)->hw)
-
-#define VMXNET3_GET_ADDR_LO(reg)   ((uint32_t)(reg))
-#define VMXNET3_GET_ADDR_HI(reg)   ((uint32_t)(((uint64_t)(reg)) >> 32))
-
-/* Config space read/writes */
-
-#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
-
-static inline uint32_t vmxnet3_read_addr(volatile void* addr)
-{
-       return VMXNET3_PCI_REG(addr);
-}
-
-#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
-       VMXNET3_PCI_REG((reg)) = (value); \
-} while(0)
-
-#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
-       ((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
-#define VMXNET3_READ_BAR0_REG(hw, reg) \
-       vmxnet3_read_addr(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)))
-#define VMXNET3_WRITE_BAR0_REG(hw, reg, value) \
-       VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR0_REG_ADDR((hw), (reg)), (value))
-
-#define VMXNET3_PCI_BAR1_REG_ADDR(hw, reg) \
-       ((volatile uint32_t *)((char *)(hw)->hw_addr1 + (reg)))
-#define VMXNET3_READ_BAR1_REG(hw, reg) \
-       vmxnet3_read_addr(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)))
-#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
-       VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
-
-/*
- * RX/TX function prototypes
- */
-
-void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
-
-void vmxnet3_dev_rx_queue_release(void *rxq);
-void vmxnet3_dev_tx_queue_release(void *txq);
-
-int  vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
-               uint16_t nb_rx_desc, unsigned int socket_id,
-               const struct rte_eth_rxconf *rx_conf,
-               struct rte_mempool *mb_pool);
-int  vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
-               uint16_t nb_tx_desc, unsigned int socket_id,
-               const struct rte_eth_txconf *tx_conf);
-
-int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
-
-int vmxnet3_rss_configure(struct rte_eth_dev *dev);
-int vmxnet3_vlan_configure(struct rte_eth_dev *dev);
-
-uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts);
-uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
-
-#endif /* _VMXNET3_ETHDEV_H_ */
--- a/lib/librte_pmd_vmxnet3/vmxnet3_logs.h     2014-05-14 11:46:47.129439301 
-0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_logs.h     2014-05-14 11:46:47.125439289 
-0700
@@ -1,13 +1,13 @@
 /*-
  *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
- * 
+ *
  *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
  *     * Redistributions in binary form must reproduce the above copyright
@@ -17,7 +17,7 @@
  *     * Neither the name of Intel Corporation nor the names of its
  *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
- * 
+ *
  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -29,6 +29,7 @@
  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
  */

 #ifndef _VMXNET3_LOGS_H_
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ring.h     2014-05-14 11:46:47.129439301 
-0700
+++ /dev/null   1970-01-01 00:00:00.000000000 +0000
@@ -1,176 +0,0 @@
-/*-
- *   BSD LICENSE
- * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- * 
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- * 
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- * 
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VMXNET3_RING_H_
-#define _VMXNET3_RING_H_
-
-#define VMXNET3_RX_CMDRING_SIZE        2
-
-#define VMXNET3_DRIVER_VERSION_NUM  0x01012000
-
-/* Default ring size */
-#define VMXNET3_DEF_TX_RING_SIZE       512
-#define VMXNET3_DEF_RX_RING_SIZE       128
-
-#define VMXNET3_SUCCESS        0
-#define VMXNET3_FAIL      -1
-
-#define TRUE  1
-#define FALSE 0
-
-
-typedef struct vmxnet3_buf_info {
-       uint16_t               len;
-       struct rte_mbuf       *m;
-       uint64_t             bufPA;
-}vmxnet3_buf_info_t;
-
-typedef struct vmxnet3_cmd_ring {
-       vmxnet3_buf_info_t     *buf_info;
-       uint32_t               size;
-       uint32_t               next2fill;
-       uint32_t               next2comp;
-       uint8_t                gen;
-       uint8_t                rid;
-       Vmxnet3_GenericDesc    *base;
-       uint64_t               basePA;
-} vmxnet3_cmd_ring_t;
-
-static inline void
-vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
-{
-       ring->next2fill++;
-       if (unlikely(ring->next2fill == ring->size)) {
-               ring->next2fill = 0;
-               ring->gen = (uint8_t)(ring->gen ^ 1);
-       }
-}
-
-static inline void
-vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
-{
-   VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
-}
-
-static inline uint32_t
-vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
-{
-       return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
-                  ring->next2comp - ring->next2fill - 1;
-}
-
-static inline bool
-vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
-{
-       return (ring->next2comp == ring->next2fill);
-}
-
-typedef struct vmxnet3_comp_ring {
-       uint32_t               size;
-       uint32_t               next2proc;
-       uint8_t                gen;
-       uint8_t                intr_idx;
-       Vmxnet3_GenericDesc    *base;
-       uint64_t               basePA;
-} vmxnet3_comp_ring_t;
-
-static inline void
-vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
-{
-       ring->next2proc++;
-       if (unlikely(ring->next2proc == ring->size)) {
-               ring->next2proc = 0;
-               ring->gen = (uint8_t)(ring->gen ^ 1);
-       }
-}
-
-struct vmxnet3_txq_stats {
-       uint64_t               drop_total; /* # of pkts dropped by the driver, 
the
-                                                                          * 
counters below track droppings due to
-                                                                          * 
different reasons
-                                                                          */
-       uint64_t               drop_oversized;
-       uint64_t               drop_hdr_inspect_err;
-       uint64_t               drop_tso;
-       uint64_t               deferred;
-       uint64_t               tx_ring_full;
-       uint64_t               linearized;  /* # of pkts linearized */
-};
-
-typedef struct vmxnet3_tx_ctx {
-       int      ip_type;
-       bool     is_vlan;
-       bool     is_cso;
-
-       uint16_t evl_tag;               /* only valid when is_vlan == TRUE */
-       uint32_t eth_hdr_size;  /* only valid for pkts requesting tso or csum
-                                                        * offloading */
-       uint32_t ip_hdr_size;
-       uint32_t l4_hdr_size;
-} vmxnet3_tx_ctx_t;
-
-typedef struct vmxnet3_tx_queue {
-       struct vmxnet3_hw                *hw;
-       struct vmxnet3_cmd_ring      cmd_ring;
-       struct vmxnet3_comp_ring     comp_ring;
-       uint32_t                     qid;
-       struct Vmxnet3_TxQueueDesc   *shared;
-       struct vmxnet3_txq_stats     stats;
-       bool                         stopped;
-       uint16_t                         queue_id;      /**< Device TX queue 
index. */
-       uint8_t                          port_id;       /**< Device port 
identifier. */
-} vmxnet3_tx_queue_t;
-
-
-struct vmxnet3_rxq_stats {
-       uint64_t                     drop_total;
-       uint64_t                     drop_err;
-       uint64_t                     drop_fcs;
-       uint64_t                     rx_buf_alloc_failure;
-};
-
-typedef struct vmxnet3_rx_queue {
-       struct rte_mempool                      *mp;
-       struct vmxnet3_hw               *hw;
-       struct vmxnet3_cmd_ring     cmd_ring[VMXNET3_RX_CMDRING_SIZE];
-       struct vmxnet3_comp_ring    comp_ring;
-       uint32_t                    qid1;
-       uint32_t                    qid2;
-       Vmxnet3_RxQueueDesc         *shared;
-       struct vmxnet3_rxq_stats    stats;
-       bool                        stopped;
-       uint16_t                                queue_id;      /**< Device RX 
queue index. */
-       uint8_t                         port_id;       /**< Device port 
identifier. */
-} vmxnet3_rx_queue_t;
-
-#endif /* _VMXNET3_RING_H_ */
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c     2014-05-14 11:46:47.129439301 
-0700
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c     2014-05-14 11:47:23.369551831 
-0700
@@ -1,9 +1,9 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   Copyright (c) 2012-2014 Brocade Communications Systems, Inc.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
@@ -32,923 +32,782 @@
  */

 #include <sys/queue.h>
-
-#include <endian.h>
 #include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
 #include <errno.h>
 #include <stdint.h>
 #include <stdarg.h>
-#include <unistd.h>
-#include <inttypes.h>
+#include <string.h>

-#include <rte_byteorder.h>
 #include <rte_common.h>
-#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
 #include <rte_log.h>
 #include <rte_debug.h>
-#include <rte_interrupts.h>
 #include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
 #include <rte_memory.h>
 #include <rte_memzone.h>
-#include <rte_launch.h>
 #include <rte_tailq.h>
 #include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
 #include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
+#include <rte_spinlock.h>
 #include <rte_malloc.h>
-#include <rte_mbuf.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_prefetch.h>
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
 #include <rte_string_fns.h>
-#include <rte_errno.h>
-
-#include "vmxnet3/vmxnet3_defs.h"
-#include "vmxnet3_ring.h"

-#include "vmxnet3_logs.h"
-#include "vmxnet3_ethdev.h"
+#include "vmxnet3_dev.h"

+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_ring_prefetch(p)   rte_prefetch0(p)
+#define rte_packet_prefetch(p)  rte_prefetch1(p)
+#else
+#define rte_ring_prefetch(p)   do {} while (0)
+#define rte_packet_prefetch(p)  do {} while (0)
+#endif

-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
-       (char *)(mb)->buf_addr))
+#define RTE_MBUF_DATA_DMA_ADDR(mb)             \
+       (uint64_t) ((mb)->buf_physaddr +       \
+       (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))

 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
        (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)

-static uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
-
-static inline int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* , uint8_t);
-static inline void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
-static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
-#endif
-
-static inline struct rte_mbuf *
+/* Allocate mbuf from pool and do sanity checks */
+static struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
 {
        struct rte_mbuf *m;

        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
-       return (m);
+       return m;
 }

-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-static void
-vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
+/* Determine max size for mbuf's allocated from pool */
+static uint16_t rte_mbuf_buf_size(const struct rte_mempool *pool)
 {
-       uint32_t avail = 0;
-       if (rxq == NULL)
-               return;
-
-       PMD_RX_LOG(DEBUG, "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring 
base : 0x%p.\n",
-                       rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, 
rxq->comp_ring.base);
-       PMD_RX_LOG(DEBUG, "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp 
ring basePA : 0x%lx.\n",
-                               (unsigned long)rxq->cmd_ring[0].basePA, 
(unsigned long)rxq->cmd_ring[1].basePA,
-                       (unsigned long)rxq->comp_ring.basePA);
-
-       avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
-       PMD_RX_LOG(DEBUG, "RXQ:cmd0: size=%u; free=%u; next2proc=%u; 
queued=%u\n",
-                   (uint32_t)rxq->cmd_ring[0].size, avail, 
rxq->comp_ring.next2proc,
-                   rxq->cmd_ring[0].size - avail);
-
-       avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
-       PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; 
queued=%u\n",
-                       (uint32_t)rxq->cmd_ring[1].size, avail, 
rxq->comp_ring.next2proc,
-                       rxq->cmd_ring[1].size - avail);
+       const struct rte_pktmbuf_pool_private *mbp_priv
+                = (const struct rte_pktmbuf_pool_private *)
+               ((const char *)pool + sizeof(struct rte_mempool));

+       return mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 }

-static void
-vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
+/* Since this is pure para-virtualized device, don't need a real hardware
+ * barrier. But do need keep compiler from doing silly optimizations
+ */
+#ifdef __GNUC__
+#define mem_barrier() asm volatile("": : :"memory")
+#else
+#define mem_barrier() __memory_barrier()       /* Intel intrinsic */
+#endif
+
+/*
+ * Create memzone for HW rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+                     uint16_t queue_id, uint32_t ring_size, int socket_id)
 {
-       uint32_t avail = 0;
-       if (txq == NULL)
-               return;
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;

-       PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.\n",
-                               txq->cmd_ring.base, txq->comp_ring.base);
-       PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.\n",
-                               (unsigned long)txq->cmd_ring.basePA, (unsigned 
long)txq->comp_ring.basePA);
-
-       avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
-       PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u\n",
-                       (uint32_t)txq->cmd_ring.size, avail,
-                       txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+                    dev->driver->pci_drv.name, ring_name,
+                    dev->data->port_id, queue_id);
+
+       mz = rte_memzone_lookup(z_name);
+       if (mz)
+               return mz;
+
+       return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0,
+                                          VMXNET3_RING_BA_ALIGN);
 }
-#endif

-static inline void
-vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
+/* (Re)set dynamic tx_queue fields to defaults */
+static void
+vmxnet3_reset_tx_queue(struct vmxnet3_tx_queue *txq)
 {
-       while (ring->next2comp != ring->next2fill) {
-               /* No need to worry about tx desc ownership, device is quiesced 
by now. */
-               vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
-               if(buf_info->m) {
-                       rte_pktmbuf_free(buf_info->m);
-                       buf_info->m = NULL;
-                       buf_info->bufPA = 0;
-                       buf_info->len = 0;
+       txq->tx_ring.next = 0;
+       txq->next_to_clean = 0;
+       txq->tx_ring.gen = VMXNET3_INIT_GEN;
+       memset(txq->tx_ring.base, 0,
+              sizeof(Vmxnet3_GenericDesc) * txq->nb_tx_desc);
+
+       txq->comp_ring.next = 0;
+       txq->comp_ring.gen = VMXNET3_INIT_GEN;
+       memset(txq->comp_ring.base, 0,
+              sizeof(Vmxnet3_GenericDesc) * txq->nb_tx_desc);
+}
+
+static void vmxnet3_tx_queue_flush(struct vmxnet3_tx_queue *txq)
+{
+       /* Free up outstanding transmits */
+       while (txq->next_to_clean != txq->tx_ring.next) {
+               struct rte_mbuf *mb = txq->sw_ring[txq->next_to_clean];
+               if (mb) {
+                       rte_pktmbuf_free_seg(mb);
+                       txq->sw_ring[txq->next_to_clean] = NULL;
                }
-               vmxnet3_cmd_ring_adv_next2comp(ring);
+               txq->next_to_clean = (txq->next_to_clean + 1)
+                       & (txq->tx_ring.size - 1);
        }
-       rte_free(ring->buf_info);
-}

-void
-vmxnet3_dev_tx_queue_release(void *txq)
-{
-       vmxnet3_tx_queue_t *tq = txq;
-       if (txq != NULL) {
-               /* Release the cmd_ring */
-               vmxnet3_cmd_ring_release(&tq->cmd_ring);
-       }
+       vmxnet3_reset_tx_queue(txq);
 }

-void
-vmxnet3_dev_rx_queue_release(void *rxq)
+void vmxnet3_tx_flush_all(struct rte_eth_dev *dev)
 {
-       int i;
-       vmxnet3_rx_queue_t *rq = rxq;
-       if (rxq != NULL) {
-               /* Release both the cmd_rings */
-               for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) 
-                       vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
-       }
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       unsigned i;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++)
+               vmxnet3_tx_queue_flush(dp->tx_queue + i);
 }

 void
-vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
+vmxnet3_tx_queue_release(void *arg)
 {
-       unsigned i;
+       struct vmxnet3_tx_queue *txq = arg;

        PMD_INIT_FUNC_TRACE();

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
-               if (txq != NULL) {
-                       txq->stopped = TRUE;
-                       vmxnet3_dev_tx_queue_release(txq);
-               }
-       }
+       if (txq == NULL)
+               return;

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
-               if(rxq != NULL) {
-                       rxq->stopped = TRUE;
-                       vmxnet3_dev_rx_queue_release(rxq);
-               }
-       }
+       vmxnet3_tx_queue_flush(txq);
+       rte_free(txq->sw_ring);
 }

-static inline void
-vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
-{
-   int completed = 0;
-   struct rte_mbuf *mbuf;
-   vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
-   struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
-                                    (comp_ring->base + comp_ring->next2proc);
-
-   while (tcd->gen == comp_ring->gen) {
-
-          /* Release cmd_ring descriptor and free mbuf */
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-           VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
-#endif
-           mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
-               if (unlikely(mbuf == NULL))
-                       rte_panic("EOP desc does not point to a valid mbuf");
-               else 
-                       rte_pktmbuf_free(mbuf);
-               
-
-               txq->cmd_ring.buf_info[tcd->txdIdx].m = NULL;
-               /* Mark the txd for which tcd was generated as completed */
-               vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
+int
+vmxnet3_tx_queue_setup(struct rte_eth_dev *dev,
+                      uint16_t queue_idx,
+                      uint16_t nb_desc,
+                      unsigned int socket_id,
+                      const struct rte_eth_txconf *tx_conf)
+{
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       struct vmxnet3_tx_queue *txq = dp->tx_queue + queue_idx;
+       const struct rte_memzone *tz;

-               vmxnet3_comp_ring_adv_next2proc(comp_ring);
-               tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
-                                                                               
  comp_ring->next2proc);
-               completed++;
-   }
+       PMD_INIT_FUNC_TRACE();

-   PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
-}
+       /* Device does not support checksum offload (yet) */
+       if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
+           != ETH_TXQ_FLAGS_NOXSUMS) {
+               PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
+               return -EINVAL;
+       }

-uint16_t
-vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts)
-{
-       uint16_t nb_tx;
-       Vmxnet3_TxDesc *txd = NULL;
-       vmxnet3_buf_info_t *tbi = NULL;
-       struct vmxnet3_hw *hw;
-       struct rte_mbuf *txm;
-       vmxnet3_tx_queue_t *txq = tx_queue;
+       /* Validate number of transmit descriptors. */
+       if (nb_desc == 0 || nb_desc > VMXNET3_TX_RING_MAX_SIZE ||
+           (nb_desc & (nb_desc - 1)) != 0) {   /* power of 2 */
+               PMD_INIT_LOG(ERR, "Invalid number of TX descriptors\n");
+               return -EINVAL;
+       }

-       hw = txq->hw;
+       /* Round up to VMXNET3_RING_SIZE_ALIGN */
+       nb_desc = (nb_desc + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK;

-       if(txq->stopped) {
-               PMD_TX_LOG(DEBUG, "Tx queue is stopped.\n");
-               return 0;
+       /* Free memory prior to re-allocation if needed... */
+       vmxnet3_tx_queue_release(dev->data->tx_queues[queue_idx]);
+       dev->data->tx_queues[queue_idx] = NULL;
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                          sizeof(Vmxnet3_TxDesc) * VMXNET3_TX_RING_MAX_SIZE,
+                          socket_id);
+       if (tz == NULL) {
+               vmxnet3_tx_queue_release(txq);
+               return -ENOMEM;
        }

-       /* Free up the comp_descriptors aggressively */
-       vmxnet3_tq_tx_complete(txq);
-
-       nb_tx = 0;
-       while(nb_tx < nb_pkts) {
+       txq->queue_id = queue_idx;
+       txq->port_id = dev->data->port_id;
+       txq->nb_tx_desc = nb_desc;
+       txq->tx_free_thresh = tx_conf->tx_free_thresh ? : 32;
+       txq->tx_prod = dp->bar0 + VMXNET3_REG_TXPROD + txq->queue_id * 8;
+
+       txq->tx_ring.base = tz->addr;
+       txq->tx_ring.size = nb_desc;
+       txq->tx_ring.phys_addr = tz->phys_addr;
+
+       tz = ring_dma_zone_reserve(dev, "tx_comp", queue_idx,
+                          sizeof(Vmxnet3_TxCompDesc) * 
VMXNET3_TX_RING_MAX_SIZE,
+                          socket_id);
+       if (tz == NULL) {
+               vmxnet3_tx_queue_release(txq);
+               return -ENOMEM;
+       }
+       txq->comp_ring.base = tz->addr;
+       txq->comp_ring.size = nb_desc;
+       txq->comp_ring.phys_addr = tz->phys_addr;
+
+       /* Allocate software ring
+        * Only needs to be sized based on request, since it is in
+        * freeable memory.
+        */
+       txq->sw_ring = rte_zmalloc_socket("txq_sw_ring",
+                                         sizeof(struct rte_mbuf *) * nb_desc,
+                                         CACHE_LINE_SIZE, socket_id);
+       if (txq->sw_ring == NULL) {
+               vmxnet3_tx_queue_release(txq);
+               return -ENOMEM;
+       }

-               if(vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+                    txq->sw_ring, txq->tx_ring.base, txq->tx_ring.phys_addr);

-                       txm = tx_pkts[nb_tx];
-                       /* Don't support scatter packets yet, free them if met 
*/
-                       if (txm->pkt.nb_segs != 1) {
-                               PMD_TX_LOG(DEBUG, "Don't support scatter 
packets yet, drop!\n");
-                               rte_pktmbuf_free(tx_pkts[nb_tx]);
-                               txq->stats.drop_total++;
+       vmxnet3_reset_tx_queue(txq);
+       dev->data->tx_queues[queue_idx] = txq;

-                               nb_tx++;
-                               continue;
-                       }
+       return 0;
+}

-                       /* Needs to minus ether header len */
-                       if(txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
-                               PMD_TX_LOG(DEBUG, "Packet data_len higher than 
MTU\n");
-                               rte_pktmbuf_free(tx_pkts[nb_tx]);
-                               txq->stats.drop_total++;
+/*
+ * Read transmit completion ring and free up mbufs from completed
+ * transmits.
+ */
+static void vmxnet3_xmit_cleanup(struct vmxnet3_tx_queue *txq)
+{
+       const Vmxnet3_TxCompDesc *tcd;
+       uint16_t last_completed;

-                               nb_tx++;
-                               continue;
-                       }
+       tcd = (Vmxnet3_TxCompDesc *)
+               (txq->comp_ring.base + txq->comp_ring.next);

-                       txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + 
txq->cmd_ring.next2fill);
+       if (tcd->gen != txq->comp_ring.gen)
+               return;

-                       /* Fill the tx descriptor */
-                       tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
-                       tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
-                       txd->addr = tbi->bufPA;
-                       txd->len = txm->pkt.data_len;
-
-                       /* Mark the last descriptor as End of Packet. */
-                       txd->cq = 1;
-                       txd->eop = 1;
-
-                       /* Record current mbuf for freeing it later in tx 
complete */
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-                       VMXNET3_ASSERT(txm);
-#endif
-                       tbi->m = txm;
+       mem_barrier();

-                       /* Set the offloading mode to default */
-                       txd->hlen = 0;
-                       txd->om = VMXNET3_OM_NONE;
-                       txd->msscof = 0;
-
-                       /* finally flip the GEN bit of the SOP desc  */
-                       txd->gen = txq->cmd_ring.gen;
-                       txq->shared->ctrl.txNumDeferred++;
-
-                       /* move to the next2fill descriptor */
-                       vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
-                       nb_tx++;
+       do {
+               last_completed = tcd->txdIdx;
+               if (last_completed > txq->tx_ring.size)
+                       rte_panic("%s(): index %u out of tx ring\n",
+                                 __func__, last_completed);
+
+               vmxnet3_ring_advance(&txq->comp_ring);
+               tcd = (Vmxnet3_TxCompDesc *)
+                       (txq->comp_ring.base + txq->comp_ring.next);
+       } while (tcd->gen == txq->comp_ring.gen);
+
+       PMD_TX_FREE_LOG(DEBUG,
+                       "Cleaning TX descriptors: %4u to %4u "
+                       "(port=%d queue=%d)",
+                       txq->next_to_clean, last_completed,
+                       txq->port_id, txq->queue_id);
+
+       struct rte_mbuf **sw_ring = txq->sw_ring;
+       uint16_t i = txq->next_to_clean;
+       for (;;) {
+               rte_pktmbuf_free_seg(sw_ring[i]);
+               sw_ring[i] = NULL;

-               } else {
-                       PMD_TX_LOG(DEBUG, "No free tx cmd desc(s)\n");
-                       txq->stats.drop_total += (nb_pkts - nb_tx);
+               if (i == last_completed)
                        break;
-               }
-       }
-
-       PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", 
txq->shared->ctrl.txThreshold);

-       if (txq->shared->ctrl.txNumDeferred >= txq->shared->ctrl.txThreshold) {
-
-               txq->shared->ctrl.txNumDeferred = 0;
-               /* Notify vSwitch that packets are available. */
-               VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id 
* VMXNET3_REG_ALIGN),
-                               txq->cmd_ring.next2fill);
+               i = (i + 1) & (txq->tx_ring.size - 1);
        }
+       txq->next_to_clean = (i + 1) & (txq->tx_ring.size - 1);
+}

-       return (nb_tx);
+static inline unsigned
+vmxnet3_tx_used(const struct vmxnet3_tx_queue *txq)
+{
+       return (txq->tx_ring.next - txq->next_to_clean)
+               & (txq->tx_ring.size - 1);
+}
+
+static inline unsigned
+vmxnet3_tx_avail(const struct vmxnet3_tx_queue *txq)
+{
+       return txq->tx_ring.size - vmxnet3_tx_used(txq) - 1;
 }

 /*
- *  Allocates mbufs and clusters. Post rx descriptors with buffer details
- *  so that device can receive packets in those buffers.
- *     Ring layout:
- *      Among the two rings, 1st ring contains buffers of type 0 and type1.
- *      bufs_per_pkt is set such that for non-LRO cases all the buffers 
required
- *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
- *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
- *      only for LRO.
- *
+ * Transmit a block of packets
  */
-static inline int
-vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* rxq, uint8_t ring_id)
+uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                          uint16_t nb_pkts)
 {
-   int err = 0;
-   uint32_t i = 0, val = 0;
-   struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
-
-   while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
-
-               struct Vmxnet3_RxDesc *rxd;
-               struct rte_mbuf *mbuf;
-               vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
-               rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
-
-               if (ring->rid == 0) {
-                        /* Usually: One HEAD type buf per packet
-                          * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ? 
-                          * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
-                          */
-
-                       /* We use single packet buffer so all heads here */
-                       val = VMXNET3_RXD_BTYPE_HEAD;
-               } else {
-                       /* All BODY type buffers for 2nd ring; which won't be 
used at all by ESXi */
-                       val = VMXNET3_RXD_BTYPE_BODY;
-               }
+       struct vmxnet3_tx_queue *txq = tx_queue;
+       Vmxnet3_TxQueueCtrl *txqCtrl = txq->shared;
+       unsigned nb_tx, num_desc = 0;
+       uint32_t tx_id;
+
+       if (vmxnet3_tx_used(txq) >= txq->tx_free_thresh)
+               vmxnet3_xmit_cleanup(txq);
+
+       /* TX loop */
+       tx_id = txq->tx_ring.next;
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               struct rte_mbuf *tx_pkt = *tx_pkts++;
+               uint32_t tx_first = tx_id;
+               Vmxnet3_GenericDesc *txd;

-               /* Allocate blank mbuf for the current Rx Descriptor */
-               mbuf = rte_rxmbuf_alloc(rxq->mp);
-               if (mbuf == NULL) {
-                       PMD_RX_LOG(ERR, "Error allocating mbuf in %s\n", 
__func__);
-                       rxq->stats.rx_buf_alloc_failure++;
-                       err = ENOMEM;
+               /*
+                * The number of descriptors that must be allocated for a
+                * packet is the number of segments of that packet.
+                */
+               if (tx_pkt->pkt.nb_segs > vmxnet3_tx_avail(txq))
                        break;
+
+               PMD_TX_LOG(DEBUG,
+                          "port_id=%u queue_id=%u pktlen=%u segs=%u first=%u",
+                          txq->port_id, txq->queue_id,
+                          tx_pkt->pkt.pkt_len, tx_pkt->pkt.nb_segs, tx_id);
+
+               struct rte_mbuf *m_seg = tx_pkt;
+               uint32_t cur_gen = !txq->tx_ring.gen;
+               do {
+                       /* Remember the transmit buffer for cleanup */
+                       txq->sw_ring[tx_id] = m_seg;
+
+                       /* NB: the following assumes that VMXNET3 maximum
+                          transmit buffer size (16K) is greater than
+                          maximum sizeof mbuf segment (2K). */
+                       uint32_t dw2;
+                       dw2 = m_seg->pkt.data_len;
+                       dw2 |= cur_gen << VMXNET3_TXD_GEN_SHIFT;
+
+                       txd = &txq->tx_ring.base[tx_id];
+                       txd->txd.addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       txd->dword[2] = rte_cpu_to_le_32(dw2);
+                       txd->dword[3] = 0;
+
+                       m_seg = m_seg->pkt.next;
+                       tx_id = vmxnet3_ring_advance(&txq->tx_ring);
+                       cur_gen = txq->tx_ring.gen;
+
+                       ++num_desc;
+               } while (m_seg != NULL);
+
+               /* Update the EOP descriptor */
+               txd->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
+
+               /* Update the SOP descriptor. Must be done last */
+               txd = txq->tx_ring.base + tx_first;
+               if (tx_pkt->ol_flags & PKT_TX_VLAN_PKT) {
+                       txd->txd.ti = 1;
+                       txd->txd.tci = tx_pkt->pkt.vlan_macip.f.vlan_tci;
                }

-               /*
-                * Load mbuf pointer into buf_info[ring_size]
-                * buf_info structure is equivalent to cookie for 
virtio-virtqueue
-                */
-               buf_info->m = mbuf;
-               buf_info->len = (uint16_t)(mbuf->buf_len -
-                       RTE_PKTMBUF_HEADROOM);
-               buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
-
-               /* Load Rx Descriptor with the buffer's GPA */
-               rxd->addr = buf_info->bufPA;
-
-               /* After this point rxd->addr MUST not be NULL */
-               rxd->btype = val;
-               rxd->len = buf_info->len;
-               /* Flip gen bit at the end to change ownership */
-               rxd->gen = ring->gen;
-
-               vmxnet3_cmd_ring_adv_next2fill(ring);
-               i++;
-   }
-
-   /* Return error only if no buffers are posted at present */
-   if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size -1))
-      return -err;
-   else
-      return i;
-}
+               /* TODO: Add transmit checksum offload here */

-/*
- * Process the Rx Completion Ring of given vmxnet3_rx_queue
- * for nb_pkts burst and return the number of packets received
- */
-uint16_t
-vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
-       uint16_t nb_rx;
-       uint32_t nb_rxd, idx;
-       uint8_t ring_idx;
-       vmxnet3_rx_queue_t *rxq;
-       Vmxnet3_RxCompDesc *rcd;
-       vmxnet3_buf_info_t *rbi;
-       Vmxnet3_RxDesc *rxd;
-       struct rte_mbuf *rxm = NULL;
-       struct vmxnet3_hw *hw;
+               /* flip the GEN bit on the SOP */
+               mem_barrier();
+               txd->dword[2] ^= rte_cpu_to_le_32(VMXNET3_TXD_GEN);
+       }

-       nb_rx = 0;
-       ring_idx = 0;
-       nb_rxd = 0;
-       idx = 0;
+       /* avoid ring update if no packets sent */
+       if (likely(num_desc > 0)) {
+               /* Increment pending value */
+               uint32_t defer = rte_le_to_cpu_32(txqCtrl->txNumDeferred);
+               defer += num_desc;
+               if (defer >= rte_le_to_cpu_32(txqCtrl->txThreshold)) {
+                       txqCtrl->txNumDeferred = 0;
+                       vmxnet3_write_reg(txq->tx_prod, tx_id);
+               } else
+                       txqCtrl->txNumDeferred = rte_cpu_to_le_32(defer);
+       }

-       rxq = rx_queue;
-       hw = rxq->hw;
+       return nb_tx;
+}

-       rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
+static void vmxnet3_rx_queue_flush(struct vmxnet3_rx_queue *rxq)
+{
+       unsigned i;

-       if(rxq->stopped) {
-               PMD_RX_LOG(DEBUG, "Rx queue is stopped.\n");
-               return 0;
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               struct rte_mbuf *mb = rxq->sw_ring[i];
+               if (mb) {
+                       rte_pktmbuf_free_seg(mb);
+                       rxq->sw_ring[i] = NULL;
+               }
        }
+}

-       while (rcd->gen == rxq->comp_ring.gen) {
+void vmxnet3_rx_flush_all(struct rte_eth_dev *dev)
+{
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       unsigned i;

-               if(nb_rx >= nb_pkts)
-                       break;
-               idx = rcd->rxdIdx;
-               ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
-               rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
-               rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
+       for (i = 0; i < dev->data->nb_rx_queues; i++)
+               vmxnet3_rx_queue_flush(dp->rx_queue + i);
+}

-               if(rcd->sop !=1 || rcd->eop != 1) {
-                       rte_pktmbuf_free_seg(rbi->m);
+void
+vmxnet3_rx_queue_release(void *arg)
+{
+       struct vmxnet3_rx_queue *rxq = arg;

-                       PMD_RX_LOG(DEBUG, "Packet spread across multiple 
buffers\n)");
-                       goto rcd_done;
+       PMD_INIT_FUNC_TRACE();

-               } else {
+       if (rxq == NULL)
+               return;

-                       PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.\n", idx, 
ring_idx);
+       vmxnet3_rx_queue_flush(rxq);
+       rte_free(rxq->sw_ring);
+}

-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-                       VMXNET3_ASSERT(rcd->len <= rxd->len);
-                       VMXNET3_ASSERT(rbi->m);
-#endif
-                       if (rcd->len == 0) {
-                               PMD_RX_LOG(DEBUG, "Rx buf was skipped. 
rxring[%d][%d]\n)",
-                                                        ring_idx, idx);
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-                               VMXNET3_ASSERT(rcd->sop && rcd->eop);
-#endif
-                               rte_pktmbuf_free_seg(rbi->m);
+int
+vmxnet3_rx_queue_setup(struct rte_eth_dev *dev,
+                      uint16_t queue_idx,
+                      uint16_t nb_desc,
+                      unsigned int socket_id,
+                      const struct rte_eth_rxconf *rx_conf __rte_unused,
+                      struct rte_mempool *mp)
+{
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       struct vmxnet3_rx_queue *rxq = dp->rx_queue + queue_idx;
+       const struct rte_memzone *rz;

-                               goto rcd_done;
-                       }
+       PMD_INIT_FUNC_TRACE();

-                       /* Assuming a packet is coming in a single packet 
buffer */
-                       if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
-                               PMD_RX_LOG(DEBUG, "Alert : Misbehaving device, 
incorrect "
-                                                 " buffer type used. iPacket 
dropped.\n");
-                               rte_pktmbuf_free_seg(rbi->m);
-                               goto rcd_done;
-                       }
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-                       VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
-#endif
-                       /* Get the packet buffer pointer from buf_info */
-                       rxm = rbi->m;
+       /* Validate number of receive descriptors. */
+       if (nb_desc == 0 || nb_desc > VMXNET3_RX_RING_MAX_SIZE ||
+           (nb_desc & (nb_desc - 1)) != 0) {   /* power of 2 */
+               PMD_INIT_LOG(ERR, "Invalid number of RX descriptors\n");
+               return -EINVAL;
+       }

-                       /* Clear descriptor associated buf_info to be reused */
-                       rbi->m = NULL;
-                       rbi->bufPA = 0;
-
-                       /* Update the index that we received a packet */
-                       rxq->cmd_ring[ring_idx].next2comp = idx;
-
-                       /* For RCD with EOP set, check if there is frame error 
*/
-                       if (rcd->err) {
-                               rxq->stats.drop_total++;
-                               rxq->stats.drop_err++;
-
-                               if(!rcd->fcs) {
-                                       rxq->stats.drop_fcs++;
-                                       PMD_RX_LOG(ERR, "Recv packet dropped 
due to frame err.\n");
-                               }
-                               PMD_RX_LOG(ERR, "Error in received packet 
rcd#:%d rxd:%d\n",
-                                                (int)(rcd - (struct 
Vmxnet3_RxCompDesc *)
-                                                          
rxq->comp_ring.base), rcd->rxdIdx);
-                               rte_pktmbuf_free_seg(rxm);
+       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > rte_mbuf_buf_size(mp)) {
+               PMD_INIT_LOG(ERR, "VMXNET3 don't support scatter packets 
yet\n");
+               return -EINVAL;
+       }

-                               goto rcd_done;
-                       }
+       /* Round up to VMXNET3_RING_SIZE_ALIGN */
+       nb_desc = (nb_desc + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK;

-                       /* Check for hardware stripped VLAN tag */
-                       if (rcd->ts) {
+       /* Free memory from previous allocation */
+       vmxnet3_rx_queue_release(dev->data->rx_queues[queue_idx]);
+       dev->data->rx_queues[queue_idx] = NULL;
+
+       /* First allocate the rx queue data structure */
+       rxq->mb_pool = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->queue_id = queue_idx;
+       rxq->port_id = dev->data->port_id;

-                               PMD_RX_LOG(ERR, "Received packet with vlan ID: 
%d.\n",
-                                                rcd->tci);
-                               rxm->ol_flags = PKT_RX_VLAN_PKT;
-
-#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
-                               VMXNET3_ASSERT(rxm &&
-                                       rte_pktmbuf_mtod(rxm, void *));
-#endif
-                               //Copy vlan tag in packet buffer
-                               rxm->pkt.vlan_macip.f.vlan_tci =
-                                       rte_le_to_cpu_16((uint16_t)rcd->tci);
-
-                       } else
-                               rxm->ol_flags = 0;
-
-                       /* Initialize newly received packet buffer */
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.nb_segs = 1;
-                       rxm->pkt.next = NULL;
-                       rxm->pkt.pkt_len = (uint16_t)rcd->len;
-                       rxm->pkt.data_len = (uint16_t)rcd->len;
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.vlan_macip.f.vlan_tci = 0;
-                       rxm->pkt.data = (char *)rxm->buf_addr + 
RTE_PKTMBUF_HEADROOM;
-
-                       rx_pkts[nb_rx++] = rxm;
-
-rcd_done:
-                       rxq->cmd_ring[ring_idx].next2comp = idx;
-                       
VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, 
rxq->cmd_ring[ring_idx].size);
-
-                       /* It's time to allocate some new buf and renew 
descriptors */
-                       vmxnet3_post_rx_bufs(rxq, ring_idx);
-                       if (unlikely(rxq->shared->ctrl.updateRxProd)) {
-                               VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] 
+ (rxq->queue_id * VMXNET3_REG_ALIGN),
-                                                                 
rxq->cmd_ring[ring_idx].next2fill);
-                       }
+       /*
+        * Allocate RX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+                       VMXNET3_RX_RING_MAX_SIZE * sizeof(Vmxnet3_RxDesc),
+                       socket_id);
+       if (rz == NULL) {
+               vmxnet3_rx_queue_release(rxq);
+               return -ENOMEM;
+       }
+
+       rxq->rx_ring.base = rz->addr;
+       rxq->rx_ring.size = nb_desc;
+       rxq->rx_ring.phys_addr = rz->phys_addr;
+       rxq->rx_prod = dp->bar0 + VMXNET3_REG_RXPROD
+               + rxq->queue_id * VMXNET3_REG_ALIGN;
+
+       rz = ring_dma_zone_reserve(dev, "rx_comp", queue_idx,
+                       VMXNET3_RX_RING_MAX_SIZE * sizeof(Vmxnet3_RxCompDesc),
+                       socket_id);
+       if (rz == NULL) {
+               vmxnet3_rx_queue_release(rxq);
+               return -ENOMEM;
+       }
+
+       rxq->comp_ring.base = rz->addr;
+       rxq->comp_ring.size = nb_desc;
+       rxq->comp_ring.phys_addr = rz->phys_addr;
+
+       /* Allocate software ring */
+       rxq->sw_ring = rte_zmalloc_socket("rxq_sw_ring",
+                                         sizeof(struct rte_mbuf *) * nb_desc,
+                                         CACHE_LINE_SIZE, socket_id);
+       if (rxq->sw_ring == NULL) {
+               vmxnet3_rx_queue_release(rxq);
+               return -ENOMEM;
+       }

-                       /* Advance to the next descriptor in comp_ring */
-                       vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+                    rxq->sw_ring, rxq->rx_ring.base, rxq->rx_ring.phys_addr);

-                       rcd = 
&rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
-                       nb_rxd++;
-                       if (nb_rxd > rxq->cmd_ring[0].size) {
-                               PMD_RX_LOG(ERR, "Used up quota of receiving 
packets,"
-                                                " relinquish control.\n");
-                               break;
-                       }
-               }
-       }
+       dev->data->rx_queues[queue_idx] = rxq;

-       return (nb_rx);
+       return 0;
 }

 /*
- * Create memzone for device rings. malloc can't be used as the physical 
address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
+ * Setup receive queue ring.
+ * All the slots are filled, but the last one is not marked
+ * as available, to avoid the problem where full == empty.
  */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-                     uint16_t queue_id, uint32_t ring_size, int socket_id)
+static int vmxnet3_rxqueue_init(struct vmxnet3_rx_queue *rxq)
 {
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
+       unsigned i;
+       uint16_t buf_size = rte_mbuf_buf_size(rxq->mb_pool);

-       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                       dev->data->port_id, queue_id);
+       for (i = 0; i < rxq->rx_ring.size; i++) {
+               struct rte_mbuf *m;

-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
+               m = rxq->sw_ring[i];
+               if (!m) {
+                       m = rte_rxmbuf_alloc(rxq->mb_pool);
+                       if (!m) {
+                               PMD_INIT_LOG(ERR,
+                                            "RX mbuf alloc failed qid=%hu",
+                                            rxq->queue_id);
+                               return -ENOMEM;
+                       }
+                       rxq->sw_ring[i] = m;
+               }
+
+               Vmxnet3_GenericDesc *gd = rxq->rx_ring.base + i;
+               uint64_t pa = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(m);
+               gd->rxd.addr = rte_cpu_to_le_64(pa);
+
+               uint32_t dw2 = !VMXNET3_INIT_GEN << VMXNET3_RXD_GEN_SHIFT |
+                       VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT |
+                       buf_size;
+               gd->dword[2] = rte_cpu_to_le_32(dw2);
+
+               if (i == rxq->rx_ring.size - 1)
+                       break;

-       return rte_memzone_reserve_aligned(z_name, ring_size,
-                       socket_id, 0, VMXNET3_RING_BA_ALIGN);
+               gd->rxd.gen = VMXNET3_INIT_GEN;
+               vmxnet3_ring_advance(&rxq->rx_ring);
+       }
+
+       return 0;
 }

-int
-vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
-                        uint16_t queue_idx,
-                        uint16_t nb_desc,
-                        unsigned int socket_id,
-                        __attribute__((unused)) const struct rte_eth_txconf 
*tx_conf)
+int vmxnet3_rx_init(struct rte_eth_dev *dev)
 {
-       const struct rte_memzone *mz;
-       struct vmxnet3_tx_queue *txq;
-       struct vmxnet3_hw     *hw;
-    struct vmxnet3_cmd_ring *ring;
-    struct vmxnet3_comp_ring *comp_ring;
-    int size;
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       unsigned i;
+       int ret;

        PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);

-       if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
-               ETH_TXQ_FLAGS_NOMULTSEGS) {
-               PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
-               return (-EINVAL);
-       }
+       /* Configure and enable each RX queue. */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct vmxnet3_rx_queue *rxq = dp->rx_queue + i;

-       if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) != 
-               ETH_TXQ_FLAGS_NOOFFLOADS) {
-               PMD_INIT_LOG(ERR, "TX not support offload function yet\n");
-               return (-EINVAL);
+               rxq->rx_ring.next = 0;
+               memset(rxq->rx_ring.base, 0,
+                      rxq->rx_ring.size * sizeof(Vmxnet3_RxDesc));
+               rxq->rx_ring.gen = VMXNET3_INIT_GEN;
+               rxq->start_seg = NULL;
+               rxq->prev_seg = &rxq->start_seg;
+
+               /* Allocate buffers for descriptor rings and set up queue */
+               ret = vmxnet3_rxqueue_init(rxq);
+               if (ret)
+                       return ret;
+
+               rxq->comp_ring.next = 0;
+               memset(rxq->comp_ring.base, 0,
+                      rxq->comp_ring.size * sizeof(Vmxnet3_RxCompDesc));
+               rxq->comp_ring.gen = VMXNET3_INIT_GEN;
        }

-       txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), 
CACHE_LINE_SIZE);
-       if (txq == NULL) {
-               PMD_INIT_LOG(ERR, "Can not allocate tx queue structure\n");
-               return (-ENOMEM);
-       }
+       return 0;
+}

-       txq->queue_id = queue_idx;
-       txq->port_id = dev->data->port_id;
-       txq->shared = &hw->tqd_start[queue_idx];
-    txq->hw = hw;
-    txq->qid = queue_idx;
-    txq->stopped = TRUE;
-
-    ring = &txq->cmd_ring;
-    comp_ring = &txq->comp_ring;
-
-    /* Tx vmxnet ring length should be between 512-4096 */
-    if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
-       PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u\n",
-                                       VMXNET3_DEF_TX_RING_SIZE);
-       return -EINVAL;
-       } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u\n",
-                                       VMXNET3_TX_RING_MAX_SIZE);
-               return -EINVAL;
-    } else {
-       ring->size = nb_desc;
-       ring->size &= ~VMXNET3_RING_SIZE_MASK;
-    }
-    comp_ring->size = ring->size;
-
-    /* Tx vmxnet rings structure initialization*/
-    ring->next2fill = 0;
-    ring->next2comp = 0;
-    ring->gen = VMXNET3_INIT_GEN;
-    comp_ring->next2proc = 0;
-    comp_ring->gen = VMXNET3_INIT_GEN;
-
-    size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
-    size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
-
-    mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-               return (-ENOMEM);
-       }
-       memset(mz->addr, 0, mz->len);
-
-       /* cmd_ring initialization */
-       ring->base = mz->addr;
-       ring->basePA = mz->phys_addr;
-
-       /* comp_ring initialization */
-    comp_ring->base = ring->base + ring->size;
-    comp_ring->basePA = ring->basePA +
-                                       (sizeof(struct Vmxnet3_TxDesc) * 
ring->size);
-
-    /* cmd_ring0 buf_info allocation */
-       ring->buf_info = rte_zmalloc("tx_ring_buf_info",
-                                               ring->size * 
sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
-       if (ring->buf_info == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure\n");
-               return (-ENOMEM);
+void vmxnet3_rx_start(struct rte_eth_dev *dev)
+{
+       struct vmxnet3_dev *dp = dev->data->dev_private;
+       unsigned i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct vmxnet3_rx_queue *rxq = dp->rx_queue + i;
+               vmxnet3_write_reg(rxq->rx_prod, rxq->rx_ring.next);
        }
+}

-       /* Update the data portion with txq */
-       dev->data->tx_queues[queue_idx] = txq;
+/* Partial packets with error are dropped. */
+static void vmxnet3_rx_error(struct vmxnet3_rx_queue *rxq,
+                            const Vmxnet3_RxCompDesc *rcd)
+{
+       ++rxq->drop_errors;
+       if (rcd->fcs)
+               ++rxq->fcs_errors;

-       return 0;
+       if (rxq->start_seg) {
+               rte_pktmbuf_free(rxq->start_seg);
+               rxq->start_seg = NULL;
+               rxq->prev_seg = &rxq->start_seg;
+       }
 }

-int
-vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
-                        uint16_t queue_idx,
-                        uint16_t nb_desc,
-                        unsigned int socket_id,
-                        __attribute__((unused)) const struct rte_eth_rxconf 
*rx_conf,
-                        struct rte_mempool *mp)
+/* TODO optimize this with a table */
+static uint32_t vmxnet3_pkt_flags(const Vmxnet3_RxCompDesc *rcd)
 {
-       const struct rte_memzone *mz;
-       struct vmxnet3_rx_queue *rxq;
-       struct vmxnet3_hw     *hw;
-       struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
-       struct vmxnet3_comp_ring *comp_ring;
-       int size;
-       uint8_t i;
-       char mem_name[32];
-       uint16_t buf_size;
-       struct rte_pktmbuf_pool_private *mbp_priv;

-       PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint16_t pkt_flags = 0;

-       mbp_priv = (struct rte_pktmbuf_pool_private *)
-                               rte_mempool_get_priv(mp);
-       buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
-                                  RTE_PKTMBUF_HEADROOM);
+       if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE)
+               pkt_flags |= PKT_RX_RSS_HASH;
+       if (rcd->ts)
+               pkt_flags |= PKT_RX_VLAN_PKT;
+       if (rcd->v4)
+               pkt_flags |= PKT_RX_IPV4_HDR;
+       if (rcd->v6)
+               pkt_flags |= PKT_RX_IPV6_HDR;

-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
-               PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
-                               "VMXNET3 don't support scatter packets yet\n",
-                               buf_size, 
dev->data->dev_conf.rxmode.max_rx_pkt_len);
-               return (-EINVAL);
-       }
+       /* if Checksum has been checked */
+       if (!rcd->cnc) {
+               /* is IP header good */
+               if (!rcd->ipc)
+                       pkt_flags |= PKT_RX_IP_CKSUM_BAD;

-       rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), 
CACHE_LINE_SIZE);
-       if (rxq == NULL) {
-               PMD_INIT_LOG(ERR, "Can not allocate rx queue structure\n");
-               return (-ENOMEM);
-       }
-
-       rxq->mp = mp;
-       rxq->queue_id = queue_idx;
-       rxq->port_id = dev->data->port_id;
-       rxq->shared = &hw->rqd_start[queue_idx];
-       rxq->hw = hw;
-       rxq->qid1 = queue_idx;
-       rxq->qid2 = queue_idx + hw->num_rx_queues;
-       rxq->stopped = TRUE;
-
-       ring0 = &rxq->cmd_ring[0];
-       ring1 = &rxq->cmd_ring[1];
-       comp_ring = &rxq->comp_ring;
-
-       /* Rx vmxnet rings length should be between 256-4096 */
-       if(nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256\n");
-               return -EINVAL;
-       } else if(nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
-               PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096\n");
-               return -EINVAL;
-       } else {
-               ring0->size = nb_desc;
-               ring0->size &= ~VMXNET3_RING_SIZE_MASK;
-               ring1->size = ring0->size;
-       }
-
-       comp_ring->size = ring0->size + ring1->size;
-
-       /* Rx vmxnet rings structure initialization */
-       ring0->next2fill = 0;
-       ring1->next2fill = 0;
-       ring0->next2comp = 0;
-       ring1->next2comp = 0;
-       ring0->gen = VMXNET3_INIT_GEN;
-       ring1->gen = VMXNET3_INIT_GEN;
-       comp_ring->next2proc = 0;
-       comp_ring->gen = VMXNET3_INIT_GEN;
-
-       size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
-       size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
-
-       mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
-               return (-ENOMEM);
-       }
-       memset(mz->addr, 0, mz->len);
-
-       /* cmd_ring0 initialization */
-       ring0->base = mz->addr;
-       ring0->basePA = mz->phys_addr;
-
-       /* cmd_ring1 initialization */
-       ring1->base = ring0->base + ring0->size;
-       ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * 
ring0->size;
-
-       /* comp_ring initialization */
-       comp_ring->base = ring1->base +  ring1->size;
-       comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
-                                          ring1->size;
-
-       /* cmd_ring0-cmd_ring1 buf_info allocation */
-       for(i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
-
-         ring = &rxq->cmd_ring[i];
-         ring->rid = i;
-         rte_snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
-
-         ring->buf_info = rte_zmalloc(mem_name, ring->size * 
sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
-         if (ring->buf_info == NULL) {
-                 PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
-                 return (-ENOMEM);
-         }
+               /* is Transport header good? */
+               if ((rcd->v4 || rcd->v6) && !rcd->tuc)
+                       pkt_flags |= PKT_RX_L4_CKSUM_BAD;
        }

-    /* Update the data portion with rxq */
-    dev->data->rx_queues[queue_idx] = rxq;
-
-       return 0;
+       return pkt_flags;
 }

+static void vmxnet3_rx_ring_advance(struct vmxnet3_cmd_ring *rx_ring,
+                                   uint16_t idx)
+{
+       while (rx_ring->next != idx) {
+               Vmxnet3_GenericDesc *rxd = rx_ring->base + rx_ring->next;
+               rxd->rxd.gen = rx_ring->gen;
+               vmxnet3_ring_advance(rx_ring);
+       }
+}
 /*
- * Initializes Receive Unit
- * Load mbufs in rx queue in advance
+ * Receive burst of packets
  */
-int
-vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
+uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+                          uint16_t nb_pkts)
 {
-       struct vmxnet3_hw *hw;
-       int i, ret;
-       uint8_t j;
+       struct vmxnet3_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
+       uint16_t idx = 0, num_rxd = 0;
+       Vmxnet3_GenericDesc *rxd;
+
+       while (num_rxd < nb_pkts) {
+               struct rte_mbuf *mb, *nmb;
+               const Vmxnet3_RxCompDesc *rcd;

-       PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               rcd = (Vmxnet3_RxCompDesc *)
+                       (rxq->comp_ring.base + rxq->comp_ring.next);

-       for (i = 0; i < hw->num_rx_queues; i++) {
+               /* If buffer is not filled yet */
+               if (rcd->gen != rxq->comp_ring.gen)
+                       break;

-               vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
-               for(j = 0;j < VMXNET3_RX_CMDRING_SIZE;j++) {
-                       /* Passing 0 as alloc_num will allocate full ring */
-                       ret = vmxnet3_post_rx_bufs(rxq, j);
-                       if (ret <= 0) {
-                         PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers 
ring: %d\n", i, j);
-                         return (-ret);
-                       }
-                       /* Updating device with the index:next2fill to fill the 
mbufs for coming packets */
-                       if (unlikely(rxq->shared->ctrl.updateRxProd)) {
-                               VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + 
(rxq->queue_id * VMXNET3_REG_ALIGN),
-                                               rxq->cmd_ring[j].next2fill);
-                       }
-               }
-               rxq->stopped = FALSE;
-       }
+               mem_barrier();

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
-               txq->stopped = FALSE;
-       }
+               idx = rcd->rxdIdx;
+               PMD_RX_LOG(DEBUG,
+                          "port=%u queue=%u id=%u len=%u%s%s%s",
+                          rxq->port_id, rxq->queue_id, idx, rcd->len,
+                          rcd->sop ? " sop" : "",
+                          rcd->eop ? " eop" : "",
+                          rcd->err ? " err" : "");
+
+               /* error detected? */
+               if (unlikely(rcd->eop && rcd->err)) {
+                       PMD_DRV_LOG(ERR, "receive error");
+                       vmxnet3_rx_error(rxq, rcd);
+                       goto next_completion;
+               }

-       return 0;
-}
+               /* Bogus zero length packet?? */
+               if (unlikely(rcd->len == 0)) {
+                       PMD_DRV_LOG(ERR, "zero length packet");
+                       goto next_completion;
+               }

-static uint8_t rss_intel_key[40] = {
-       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
-       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
-       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
-       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
-       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
-};
+               /* Check host data */
+               if (unlikely(rcd->rqID != rxq->queue_id))
+                       rte_panic("%s(): completion qid %u != %u\n",
+                               __func__, rcd->rqID, rxq->queue_id);
+
+               if (unlikely(idx > rxq->rx_ring.size))
+                       rte_panic("%s(): id %u out of range %u\n",
+                               __func__, idx, rxq->rx_ring.size);
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (unlikely(nmb == NULL)) {
+                       PMD_DRV_LOG(ERR,
+                                  "RX alloc failed port=%u queue=%u",
+                                  rxq->port_id, rxq->queue_id);
+                       dev->data->rx_mbuf_alloc_failed++;
+                       goto next_completion;
+               }

-/*
- * Configure RSS feature
- */
-int
-vmxnet3_rss_configure(struct rte_eth_dev *dev)
-{
-       struct vmxnet3_hw *hw;
-       struct VMXNET3_RSSConf *dev_rss_conf;
-       struct rte_eth_rss_conf *port_rss_conf;
-       uint8_t i, j;
+               /* Refill receive descriptor
+                * Length of new mbuf is same as old mbuf
+                */
+               rxd = rxq->rx_ring.base + idx;
+               uint64_t pa = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb);
+               rxd->rxd.addr = rte_cpu_to_le_64(pa);
+
+               /* Update mbuf */
+               mb = rxq->sw_ring[idx];
+               if (unlikely(mb == NULL))
+                       rte_panic("%s(): no mbuf in ring[%u]\n",
+                                 __func__, idx);
+
+               rxq->sw_ring[idx] = nmb;
+
+               /* assume pkt.data is correct from pktmbuf_reset */
+               mb->pkt.data_len = rcd->len;
+               *rxq->prev_seg = mb;
+               rxq->prev_seg = &mb->pkt.next;
+
+               /* First part of packet of possibly fragmented */
+               if (rcd->sop) {
+                       rxq->start_seg = mb;
+
+                       rte_packet_prefetch(mb->pkt.data);
+                       mb->pkt.pkt_len = rcd->len;
+                       mb->pkt.nb_segs = 1;
+                       mb->pkt.next = NULL;
+
+                       mb->pkt.in_port = rxq->port_id;
+
+                       /* Set offload values, only valid if flag is set. */
+                       mb->pkt.vlan_macip.f.vlan_tci = rcd->tci;
+                       mb->pkt.hash.rss = rcd->rssHash;
+                       mb->ol_flags = vmxnet3_pkt_flags(rcd);
+               } else {
+                       rxq->start_seg->pkt.pkt_len += rcd->len;
+                       rxq->start_seg->pkt.nb_segs++;
+               }

-       PMD_INIT_FUNC_TRACE();
-       hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       dev_rss_conf = hw->rss_conf;
-       port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
-
-       /* loading hashFunc */
-       dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
-       /* loading hashKeySize */
-       dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
-       /* loading indTableSize : Must not exceed 
VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
-       dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
-
-       if (port_rss_conf->rss_key == NULL) {
-               /* Default hash key */
-               port_rss_conf->rss_key = rss_intel_key;
-       }
-
-       /* loading hashKey */
-       memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, 
dev_rss_conf->hashKeySize);
-
-       /* loading indTable */
-       for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
-               if (j == dev->data->nb_rx_queues)
-                       j = 0;
-               dev_rss_conf->indTable[i] = j;
-       }
-
-       /* loading hashType */
-       dev_rss_conf->hashType = 0;
-       if (port_rss_conf->rss_hf & ETH_RSS_IPV4)
-               dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-       if (port_rss_conf->rss_hf & ETH_RSS_IPV4_TCP)
-               dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-       if (port_rss_conf->rss_hf & ETH_RSS_IPV6)
-               dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-       if (port_rss_conf->rss_hf & ETH_RSS_IPV6_TCP)
-               dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
+               if (rcd->eop) {
+                       rx_pkts[num_rxd++] = rxq->start_seg;

-       return VMXNET3_SUCCESS;
-}
+                       rxq->start_seg = NULL;
+                       rxq->prev_seg = &rxq->start_seg;
+               }

-/*
- * Configure VLAN Filter feature
- */
-int
-vmxnet3_vlan_configure(struct rte_eth_dev *dev)
-{
-       uint8_t i;
-       struct vmxnet3_hw *hw = 
VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+       next_completion:
+               /* force updates to rxd before advancing */
+               mem_barrier();

-       PMD_INIT_FUNC_TRACE();
+               /* Advance receive ring (re-enable rx descriptor) */
+               vmxnet3_rx_ring_advance(&rxq->rx_ring, idx);

-       /* Verify if this tag is already set */
-       for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
-               /* Filter all vlan tags out by default */
-               vf_table[i] = 0;
-               /* To-Do: Provide another routine in dev_ops for user config */
+               if (unlikely(rxq->shared->updateRxProd))
+                       vmxnet3_write_reg(rxq->rx_prod, rxq->rx_ring.next);

-               PMD_INIT_LOG(DEBUG, "Registering VLAN portid: %"PRIu8" tag 
%u\n",
-                                       dev->data->port_id, vf_table[i]);
+               /* advance to next completion */
+               vmxnet3_ring_advance(&rxq->comp_ring);
        }

-       return VMXNET3_SUCCESS;
+       return num_rxd;
 }
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ b/lib/librte_pmd_vmxnet3/README     2014-05-14 11:46:47.129439301 -0700
@@ -0,0 +1,20 @@
+Brocade VMXNET3 driver
+=====================
+
+This directory contains the source code written based off of the
+driver for OpenSolaris.
+
+Some design notes about Vmxnet3 driver.
+
+1. Link state interrupt is not possible since irq is shared between
+   TX/RX and event. Might be possible to use MSI-X but then what if
+   MSI-X is not available.
+
+2. Transmit completion is done in lazy fashion by looking back at transmit
+   ring (no completion interrupt)
+
+3. Receiver support RSS, but Vmware does not support extensions
+   that Intel has for UDP.
+
+4. Receiver does not want LRO since LRO is broken for routing
+   and forwarding
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_defs.h     2014-05-14 11:46:47.129439301 
-0700
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2007, 2011 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
+ *
+ */
+
+/*
+ * The contents of this file are subject to the terms of the Common
+ * Development and Distribution License (the "License") version 1.0
+ * and no later version.  You may not use this file except in
+ * compliance with the License.
+ *
+ * You can obtain a copy of the License at
+ *         http://www.opensource.org/licenses/cddl1.php
+ *
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ */
+
+/*
+ * vmxnet3_defs.h --
+ *
+ *      Definitions shared by device emulation and guest drivers for
+ *      VMXNET3 NIC
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define        _VMXNET3_DEFS_H_
+
+/*
+ *      Definitions for UPTv1
+ *
+ *      Some of the defs are duplicated in vmkapi_net_upt.h, because
+ *      vmkapi_net_upt.h cannot distribute with OSS yet and vmkapi headers can
+ *      only include vmkapi headers. Make sure they are kept in sync!
+ */
+
+#define        UPT1_MAX_TX_QUEUES  64
+#define        UPT1_MAX_RX_QUEUES  64
+
+#define        UPT1_MAX_INTRS  (UPT1_MAX_TX_QUEUES + UPT1_MAX_RX_QUEUES)
+
+typedef struct UPT1_TxStats {
+       uint64 TSOPktsTxOK;  /* TSO pkts post-segmentation */
+       uint64 TSOBytesTxOK;
+       uint64 ucastPktsTxOK;
+       uint64 ucastBytesTxOK;
+       uint64 mcastPktsTxOK;
+       uint64 mcastBytesTxOK;
+       uint64 bcastPktsTxOK;
+       uint64 bcastBytesTxOK;
+       uint64 pktsTxError;
+       uint64 pktsTxDiscard;
+} __attribute__((__packed__)) UPT1_TxStats;
+
+typedef struct UPT1_RxStats {
+       uint64 LROPktsRxOK;    /* LRO pkts */
+       uint64 LROBytesRxOK;   /* bytes from LRO pkts */
+       /* the following counters are for pkts from the wire, i.e., pre-LRO */
+       uint64 ucastPktsRxOK;
+       uint64 ucastBytesRxOK;
+       uint64 mcastPktsRxOK;
+       uint64 mcastBytesRxOK;
+       uint64 bcastPktsRxOK;
+       uint64 bcastBytesRxOK;
+       uint64 pktsRxOutOfBuf;
+       uint64 pktsRxError;
+} __attribute__((__packed__)) UPT1_RxStats;
+
+/* interrupt moderation level */
+#define        UPT1_IML_NONE           0 /* no interrupt moderation */
+#define        UPT1_IML_HIGHEST        7 /* least intr generated */
+#define        UPT1_IML_ADAPTIVE       8 /* adpative intr moderation */
+
+/* values for UPT1_RSSConf.hashFunc */
+#define        UPT1_RSS_HASH_TYPE_NONE 0x0
+#define        UPT1_RSS_HASH_TYPE_IPV4 0x01
+#define        UPT1_RSS_HASH_TYPE_TCP_IPV4 0x02
+#define        UPT1_RSS_HASH_TYPE_IPV6 0x04
+#define        UPT1_RSS_HASH_TYPE_TCP_IPV6 0x08
+
+#define        UPT1_RSS_HASH_FUNC_NONE         0x0
+#define        UPT1_RSS_HASH_FUNC_TOEPLITZ     0x01
+
+#define        UPT1_RSS_MAX_KEY_SIZE           40
+#define        UPT1_RSS_MAX_IND_TABLE_SIZE     128
+
+typedef struct UPT1_RSSConf {
+       uint16   hashType;
+       uint16   hashFunc;
+       uint16   hashKeySize;
+       uint16   indTableSize;
+       uint8    hashKey[UPT1_RSS_MAX_KEY_SIZE];
+       uint8    indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+} __attribute__((__packed__)) UPT1_RSSConf;
+
+/* features */
+#define        UPT1_F_RXCSUM 0x0001   /* rx csum verification */
+#define        UPT1_F_RSS 0x0002
+#define        UPT1_F_RXVLAN 0x0004   /* VLAN tag stripping */
+#define        UPT1_F_LRO 0x0008
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+#define        VMXNET3_REG_VRRS  0x0    /* Vmxnet3 Revision Report Selection */
+#define        VMXNET3_REG_UVRS  0x8    /* UPT Version Report Selection */
+#define        VMXNET3_REG_DSAL  0x10   /* Driver Shared Address Low */
+#define        VMXNET3_REG_DSAH  0x18   /* Driver Shared Address High */
+#define        VMXNET3_REG_CMD   0x20   /* Command */
+#define        VMXNET3_REG_MACL  0x28   /* MAC Address Low */
+#define        VMXNET3_REG_MACH  0x30   /* MAC Address High */
+#define        VMXNET3_REG_ICR   0x38   /* Interrupt Cause Register */
+#define        VMXNET3_REG_ECR   0x40   /* Event Cause Register */
+
+#define        VMXNET3_REG_WSAL  0xF00  /* Wireless Shared Address Lo  */
+#define        VMXNET3_REG_WSAH  0xF08  /* Wireless Shared Address Hi  */
+#define        VMXNET3_REG_WCMD  0xF18  /* Wireless Command */
+
+/* BAR 0 */
+#define        VMXNET3_REG_IMR 0x0   /* Interrupt Mask Register */
+#define        VMXNET3_REG_TXPROD 0x600 /* Tx Producer Index */
+#define        VMXNET3_REG_RXPROD 0x800 /* Rx Producer Index for ring 1 */
+#define        VMXNET3_REG_RXPROD2 0xA00 /* Rx Producer Index for ring 2 */
+
+#define        VMXNET3_PT_REG_SIZE 4096    /* BAR 0 */
+#define        VMXNET3_VD_REG_SIZE 4096    /* BAR 1 */
+
+/*
+ * The two Vmxnet3 MMIO Register PCI BARs (BAR 0 at offset 10h and BAR 1 at
+ * offset 14h)  as well as the MSI-X BAR are combined into one PhysMem region:
+ * <-VMXNET3_PT_REG_SIZE-><-VMXNET3_VD_REG_SIZE-><-VMXNET3_MSIX_BAR_SIZE-->
+ * -------------------------------------------------------------------------
+ * |Pass Thru Registers  | Virtual Dev Registers | MSI-X Vector/PBA Table  |
+ * -------------------------------------------------------------------------
+ * VMXNET3_MSIX_BAR_SIZE is defined in "vmxnet3Int.h"
+ */
+#define        VMXNET3_PHYSMEM_PAGES 4
+
+#define        VMXNET3_REG_ALIGN 8  /* All registers are 8-byte aligned. */
+#define        VMXNET3_REG_ALIGN_MASK  0x7
+
+/* I/O Mapped access to registers */
+#define        VMXNET3_IO_TYPE_PT 0
+#define        VMXNET3_IO_TYPE_VD 1
+#define        VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define        VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define        VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+#ifndef __le16
+#define        __le16 uint16
+#endif
+#ifndef __le32
+#define        __le32 uint32
+#endif
+#ifndef __le64
+#define        __le64 uint64
+#endif
+
+#define        VMXNET3_CMD_FIRST_SET           0xCAFE0000u
+#define        VMXNET3_CMD_ACTIVATE_DEV        VMXNET3_CMD_FIRST_SET
+#define        VMXNET3_CMD_QUIESCE_DEV         (VMXNET3_CMD_FIRST_SET + 1)
+#define        VMXNET3_CMD_RESET_DEV           (VMXNET3_CMD_FIRST_SET + 2)
+#define        VMXNET3_CMD_UPDATE_RX_MODE      (VMXNET3_CMD_FIRST_SET + 3)
+#define        VMXNET3_CMD_UPDATE_MAC_FILTERS  (VMXNET3_CMD_FIRST_SET + 4)
+#define        VMXNET3_CMD_UPDATE_VLAN_FILTERS (VMXNET3_CMD_FIRST_SET + 5)
+#define        VMXNET3_CMD_UPDATE_RSSIDT       (VMXNET3_CMD_FIRST_SET + 6)
+#define        VMXNET3_CMD_UPDATE_IML          (VMXNET3_CMD_FIRST_SET + 7)
+#define        VMXNET3_CMD_UPDATE_PMCFG        (VMXNET3_CMD_FIRST_SET + 8)
+#define        VMXNET3_CMD_UPDATE_FEATURE      (VMXNET3_CMD_FIRST_SET + 9)
+#define        VMXNET3_CMD_STOP_EMULATION      (VMXNET3_CMD_FIRST_SET + 10)
+#define        VMXNET3_CMD_LOAD_PLUGIN         (VMXNET3_CMD_FIRST_SET + 11)
+#define        VMXNET3_CMD_ACTIVATE_VF         (VMXNET3_CMD_FIRST_SET + 12)
+
+#define        VMXNET3_CMD_FIRST_GET           0xF00D0000u
+#define        VMXNET3_CMD_GET_QUEUE_STATUS    VMXNET3_CMD_FIRST_GET
+#define        VMXNET3_CMD_GET_STATS           (VMXNET3_CMD_FIRST_GET + 1)
+#define        VMXNET3_CMD_GET_LINK            (VMXNET3_CMD_FIRST_GET + 2)
+#define        VMXNET3_CMD_GET_PERM_MAC_LO     (VMXNET3_CMD_FIRST_GET + 3)
+#define        VMXNET3_CMD_GET_PERM_MAC_HI     (VMXNET3_CMD_FIRST_GET + 4)
+#define        VMXNET3_CMD_GET_DID_LO          (VMXNET3_CMD_FIRST_GET + 5)
+#define        VMXNET3_CMD_GET_DID_HI          (VMXNET3_CMD_FIRST_GET + 6)
+#define        VMXNET3_CMD_GET_DEV_EXTRA_INFO  (VMXNET3_CMD_FIRST_GET + 7)
+#define        VMXNET3_CMD_GET_CONF_INTR       (VMXNET3_CMD_FIRST_GET + 8)
+#define        VMXNET3_CMD_GET_ADAPTIVE_RING_INFO (VMXNET3_CMD_FIRST_GET + 9)
+
+/* Adaptive Ring Info Flags */
+#define        VMXNET3_DISABLE_ADAPTIVE_RING 1
+
+/*
+ *     Little Endian layout of bitfields -
+ *     Byte 0 :        7.....len.....0
+ *     Byte 1 :        rsvd gen 13.len.8
+ *     Byte 2 :        5.msscof.0 ext1  dtype
+ *     Byte 3 :        13...msscof...6
+ *
+ *     Big Endian layout of bitfields -
+ *     Byte 0:         13...msscof...6
+ *     Byte 1 :        5.msscof.0 ext1  dtype
+ *     Byte 2 :        rsvd gen 13.len.8
+ *     Byte 3 :        7.....len.....0
+ *
+ *     Thus, le32_to_cpu on the dword will allow the big endian driver to read
+ *     the bit fields correctly. And cpu_to_le32 will convert bitfields
+ *     bit fields written by big endian driver to format required by device.
+ */
+
+typedef struct Vmxnet3_TxDesc {
+       __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 msscof:14;  /* MSS, checksum offset, flags */
+       uint32 ext1:1;
+       uint32 dtype:1;    /* descriptor type */
+       uint32 rsvd:1;
+       uint32 gen:1; /* generation bit */
+       uint32 len:14;
+#else
+       uint32 len:14;
+       uint32 gen:1; /* generation bit */
+       uint32 rsvd:1;
+       uint32 dtype:1;    /* descriptor type */
+       uint32 ext1:1;
+       uint32 msscof:14;  /* MSS, checksum offset, flags */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 tci:16; /* Tag to Insert */
+       uint32 ti:1; /* VLAN Tag Insertion */
+       uint32 ext2:1;
+       uint32 cq:1; /* completion request */
+       uint32 eop:1; /* End Of Packet */
+       uint32 om:2; /* offload mode */
+       uint32 hlen:10; /* header len */
+#else
+       uint32 hlen:10; /* header len */
+       uint32 om:2; /* offload mode */
+       uint32 eop:1; /* End Of Packet */
+       uint32 cq:1; /* completion request */
+       uint32 ext2:1;
+       uint32 ti:1; /* VLAN Tag Insertion */
+       uint32 tci:16; /* Tag to Insert */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+} __attribute__((__packed__)) Vmxnet3_TxDesc;
+
+/* TxDesc.OM values */
+#define        VMXNET3_OM_NONE  0
+#define        VMXNET3_OM_CSUM  2
+#define        VMXNET3_OM_TSO   3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define        VMXNET3_TXD_EOP_SHIFT 12
+#define        VMXNET3_TXD_CQ_SHIFT  13
+#define        VMXNET3_TXD_GEN_SHIFT 14
+#define        VMXNET3_TXD_EOP_DWORD_SHIFT 3
+#define        VMXNET3_TXD_GEN_DWORD_SHIFT 2
+
+#define        VMXNET3_TXD_CQ  (1 << VMXNET3_TXD_CQ_SHIFT)
+#define        VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define        VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define        VMXNET3_TXD_GEN_SIZE 1
+#define        VMXNET3_TXD_EOP_SIZE 1
+
+#define        VMXNET3_HDR_COPY_SIZE   128
+
+typedef struct Vmxnet3_TxDataDesc {
+       uint8 data[VMXNET3_HDR_COPY_SIZE];
+} __attribute__((__packed__)) Vmxnet3_TxDataDesc;
+
+#define        VMXNET3_TCD_GEN_SHIFT   31
+#define        VMXNET3_TCD_GEN_SIZE    1
+#define        VMXNET3_TCD_TXIDX_SHIFT 0
+#define        VMXNET3_TCD_TXIDX_SIZE  12
+#define        VMXNET3_TCD_GEN_DWORD_SHIFT     3
+
+typedef struct Vmxnet3_TxCompDesc {
+       uint32 txdIdx:12;    /* Index of the EOP TxDesc */
+       uint32 ext1:20;
+
+       __le32 ext2;
+       __le32 ext3;
+
+       uint32 rsvd:24;
+       uint32 type:7; /* completion type */
+       uint32 gen:1; /* generation bit */
+} __attribute__((__packed__)) Vmxnet3_TxCompDesc;
+
+typedef struct Vmxnet3_RxDesc {
+       __le64 addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 gen:1; /* Generation bit */
+       uint32 rsvd:15;
+       uint32 dtype:1; /* Descriptor type */
+       uint32 btype:1; /* Buffer Type */
+       uint32 len:14;
+#else
+       uint32 len:14;
+       uint32 btype:1; /* Buffer Type */
+       uint32 dtype:1; /* Descriptor type */
+       uint32 rsvd:15;
+       uint32 gen:1; /* Generation bit */
+#endif
+       __le32 ext1;
+} __attribute__((__packed__)) Vmxnet3_RxDesc;
+
+/* values of RXD.BTYPE */
+#define        VMXNET3_RXD_BTYPE_HEAD   0    /* head only */
+#define        VMXNET3_RXD_BTYPE_BODY   1    /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define        VMXNET3_RXD_BTYPE_SHIFT  14
+#define        VMXNET3_RXD_GEN_SHIFT    31
+
+typedef struct Vmxnet3_RxCompDesc {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 ext2:1;
+       uint32 cnc:1; /* Checksum Not Calculated */
+       uint32 rssType:4; /* RSS hash type used */
+       uint32 rqID:10; /* rx queue/ring ID */
+       uint32 sop:1; /* Start of Packet */
+       uint32 eop:1; /* End of Packet */
+       uint32 ext1:2;
+       uint32 rxdIdx:12; /* Index of the RxDesc */
+#else
+       uint32 rxdIdx:12; /* Index of the RxDesc */
+       uint32 ext1:2;
+       uint32 eop:1; /* End of Packet */
+       uint32 sop:1; /* Start of Packet */
+       uint32 rqID:10; /* rx queue/ring ID */
+       uint32 rssType:4; /* RSS hash type used */
+       uint32 cnc:1; /* Checksum Not Calculated */
+       uint32 ext2:1;
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
+       __le32 rssHash; /* RSS hash value */
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 tci:16; /* Tag stripped */
+       uint32 ts:1; /* Tag is stripped */
+       uint32 err:1; /* Error */
+       uint32 len:14; /* data length */
+#else
+       uint32 len:14; /* data length */
+       uint32 err:1; /* Error */
+       uint32 ts:1; /* Tag is stripped */
+       uint32 tci:16; /* Tag stripped */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+
+
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32 gen:1; /* generation bit */
+       uint32 type:7; /* completion type */
+       uint32 fcs:1; /* Frame CRC correct */
+       uint32 frg:1; /* IP Fragment */
+       uint32 v4:1; /* IPv4 */
+       uint32 v6:1; /* IPv6 */
+       uint32 ipc:1; /* IP Checksum Correct */
+       uint32 tcp:1; /* TCP packet */
+       uint32 udp:1; /* UDP packet */
+       uint32 tuc:1; /* TCP/UDP Checksum Correct */
+       uint32 csum:16;
+#else
+       uint32 csum:16;
+       uint32 tuc:1; /* TCP/UDP Checksum Correct */
+       uint32 udp:1; /* UDP packet */
+       uint32 tcp:1; /* TCP packet */
+       uint32 ipc:1; /* IP Checksum Correct */
+       uint32 v6:1; /* IPv6 */
+       uint32 v4:1; /* IPv4 */
+       uint32 frg:1; /* IP Fragment */
+       uint32 fcs:1; /* Frame CRC correct */
+       uint32 type:7; /* completion type */
+       uint32 gen:1; /* generation bit */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+} __attribute__((__packed__)) Vmxnet3_RxCompDesc;
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define        VMXNET3_RCD_TUC_SHIFT  16
+#define        VMXNET3_RCD_IPC_SHIFT  19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define        VMXNET3_RCD_TYPE_SHIFT 56
+#define        VMXNET3_RCD_GEN_SHIFT  63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define        VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
+                       1 << VMXNET3_RCD_IPC_SHIFT)
+
+/* value of RxCompDesc.rssType */
+#define        VMXNET3_RCD_RSS_TYPE_NONE 0
+#define        VMXNET3_RCD_RSS_TYPE_IPV4 1
+#define        VMXNET3_RCD_RSS_TYPE_TCPIPV4  2
+#define        VMXNET3_RCD_RSS_TYPE_IPV6 3
+#define        VMXNET3_RCD_RSS_TYPE_TCPIPV6 4
+
+/* a union for accessing all cmd/completion descriptors */
+typedef union Vmxnet3_GenericDesc {
+       __le64 qword[2];
+       __le32 dword[4];
+       __le16 word[8];
+       Vmxnet3_TxDesc txd;
+       Vmxnet3_RxDesc rxd;
+       Vmxnet3_TxCompDesc tcd;
+       Vmxnet3_RxCompDesc rcd;
+} Vmxnet3_GenericDesc;
+
+#define        VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define        VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define        VMXNET3_TXD_NEEDED(size) \
+       (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define        VMXNET3_MAX_TXD_PER_PKT 16
+
+/* Max size of a single rx buffer */
+#define        VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define        VMXNET3_MIN_T0_BUF_SIZE  128
+#define        VMXNET3_MAX_CSUM_OFFSET  1024
+
+/* Ring base address alignment */
+#define        VMXNET3_RING_BA_ALIGN   512
+#define        VMXNET3_RING_BA_MASK    (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define        VMXNET3_RING_SIZE_ALIGN 32
+#define        VMXNET3_RING_SIZE_MASK  (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define        VMXNET3_TX_RING_MAX_SIZE   4096
+#define        VMXNET3_TC_RING_MAX_SIZE   4096
+#define        VMXNET3_RX_RING_MAX_SIZE   4096
+#define        VMXNET3_RC_RING_MAX_SIZE   8192
+
+/* a list of reasons for queue stop */
+
+#define        VMXNET3_ERR_NOEOP 0x80000000  /* cannot find the EOP desc of a 
pkt */
+#define        VMXNET3_ERR_TXD_REUSE 0x80000001 /* reuse TxDesc before tx 
completion */
+#define        VMXNET3_ERR_BIG_PKT 0x80000002  /* too many TxDesc for a pkt */
+#define        VMXNET3_ERR_DESC_NOT_SPT 0x80000003  /* descriptor type not 
supported */
+#define        VMXNET3_ERR_SMALL_BUF 0x80000004  /* type 0 buffer too small */
+#define        VMXNET3_ERR_STRESS 0x80000005  /* stress option firing in 
vmkernel */
+#define        VMXNET3_ERR_SWITCH 0x80000006  /* mode switch failure */
+#define        VMXNET3_ERR_TXD_INVALID 0x80000007  /* invalid TxDesc */
+
+/* completion descriptor types */
+#define        VMXNET3_CDTYPE_TXCOMP 0    /* Tx Completion Descriptor */
+#define        VMXNET3_CDTYPE_RXCOMP 3    /* Rx Completion Descriptor */
+
+#define        VMXNET3_GOS_BITS_UNK 0   /* unknown */
+#define        VMXNET3_GOS_BITS_32 1
+#define        VMXNET3_GOS_BITS_64 2
+
+#define        VMXNET3_GOS_TYPE_UNK 0 /* unknown */
+#define        VMXNET3_GOS_TYPE_LINUX 1
+#define        VMXNET3_GOS_TYPE_WIN 2
+#define        VMXNET3_GOS_TYPE_SOLARIS 3
+#define        VMXNET3_GOS_TYPE_FREEBSD 4
+#define        VMXNET3_GOS_TYPE_PXE 5
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+
+typedef struct Vmxnet3_GOSInfo {
+#ifdef __BIG_ENDIAN_BITFIELD
+       uint32   gosMisc: 10; /* other info about gos */
+       uint32   gosVer:  16; /* gos version */
+       uint32   gosType: 4; /* which guest */
+       uint32   gosBits: 2; /* 32-bit or 64-bit? */
+#else
+       uint32   gosBits: 2; /* 32-bit or 64-bit? */
+       uint32   gosType: 4; /* which guest */
+       uint32   gosVer:  16; /* gos version */
+       uint32   gosMisc: 10; /* other info about gos */
+#endif  /* __BIG_ENDIAN_BITFIELD */
+} __attribute__((__packed__)) Vmxnet3_GOSInfo;
+
+typedef struct Vmxnet3_DriverInfo {
+       __le32 version; /* driver version */
+       Vmxnet3_GOSInfo gos;
+       __le32 vmxnet3RevSpt; /* vmxnet3 revision supported */
+       __le32 uptVerSpt; /* upt version supported */
+} __attribute__((__packed__)) Vmxnet3_DriverInfo;
+
+#define        VMXNET3_REV1_MAGIC 0xbabefee1
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define        VMXNET3_QUEUE_DESC_ALIGN 128
+
+typedef struct Vmxnet3_MiscConf {
+       Vmxnet3_DriverInfo driverInfo;
+       __le64 uptFeatures;
+       __le64 ddPA; /* driver data PA */
+       __le64 queueDescPA; /* queue descriptor table PA */
+       __le32 ddLen; /* driver data len */
+       __le32 queueDescLen; /* queue descriptor table len, in bytes */
+       __le32 mtu;
+       __le16 maxNumRxSG;
+       uint8 numTxQueues;
+       uint8 numRxQueues;
+       __le32 reserved[4];
+} __attribute__((__packed__)) Vmxnet3_MiscConf;
+
+typedef struct Vmxnet3_TxQueueConf {
+       __le64 txRingBasePA;
+       __le64 dataRingBasePA;
+       __le64 compRingBasePA;
+       __le64 ddPA; /* driver data */
+       __le64 reserved;
+       __le32 txRingSize; /* # of tx desc */
+       __le32 dataRingSize; /* # of data desc */
+       __le32 compRingSize; /* # of comp desc */
+       __le32 ddLen; /* size of driver data */
+       uint8 intrIdx;
+       uint8 _pad[7];
+} __attribute__((__packed__)) Vmxnet3_TxQueueConf;
+
+typedef struct Vmxnet3_RxQueueConf {
+       __le64 rxRingBasePA[2];
+       __le64 compRingBasePA;
+       __le64 ddPA; /* driver data */
+       __le64 reserved;
+       __le32 rxRingSize[2]; /* # of rx desc */
+       __le32 compRingSize; /* # of rx comp desc */
+       __le32 ddLen; /* size of driver data */
+       uint8 intrIdx;
+       uint8 _pad[7];
+} __attribute__((__packed__)) Vmxnet3_RxQueueConf;
+
+enum vmxnet3_intr_mask_mode {
+       VMXNET3_IMM_AUTO = 0,
+       VMXNET3_IMM_ACTIVE = 1,
+       VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+       VMXNET3_IT_AUTO = 0,
+       VMXNET3_IT_INTX = 1,
+       VMXNET3_IT_MSI  = 2,
+       VMXNET3_IT_MSIX = 3
+};
+
+#define        VMXNET3_MAX_TX_QUEUES   8
+#define        VMXNET3_MAX_RX_QUEUES   16
+/* addition 1 for events */
+#define        VMXNET3_MAX_INTRS       25
+
+/* value of intrCtrl */
+#define        VMXNET3_IC_DISABLE_ALL  0x1   /* bit 0 */
+
+typedef struct Vmxnet3_IntrConf {
+       Bool autoMask;
+       uint8 numIntrs; /* # of interrupts */
+       uint8 eventIntrIdx;
+       uint8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for each intr */
+       __le32 intrCtrl;
+       __le32 reserved[2];
+} __attribute__((__packed__)) Vmxnet3_IntrConf;
+
+/* one bit per VLAN ID, the size is in the units of uint32 */
+#define        VMXNET3_VFT_SIZE  (4096 / (sizeof (uint32) * 8))
+
+typedef struct Vmxnet3_QueueStatus {
+       Bool stopped;
+       uint8 _pad[3];
+       __le32 error;
+} __attribute__((__packed__)) Vmxnet3_QueueStatus;
+
+typedef struct Vmxnet3_TxQueueCtrl {
+       __le32 txNumDeferred;
+       __le32 txThreshold;
+       __le64 reserved;
+} __attribute__((__packed__)) Vmxnet3_TxQueueCtrl;
+
+typedef struct Vmxnet3_RxQueueCtrl {
+       Bool updateRxProd;
+       uint8 _pad[7];
+       __le64 reserved;
+} __attribute__((__packed__)) Vmxnet3_RxQueueCtrl;
+
+#define        VMXNET3_RXM_UCAST 0x01  /* unicast only */
+#define        VMXNET3_RXM_MCAST 0x02  /* multicast passing the filters */
+#define        VMXNET3_RXM_BCAST 0x04  /* broadcast only */
+#define        VMXNET3_RXM_ALL_MULTI 0x08  /* all multicast */
+#define        VMXNET3_RXM_PROMISC 0x10  /* promiscuous */
+
+typedef struct Vmxnet3_RxFilterConf {
+       __le32 rxMode; /* VMXNET3_RXM_xxx */
+       __le16 mfTableLen; /* size of the multicast filter table */
+       __le16 _pad1;
+       __le64 mfTablePA; /* PA of the multicast filters table */
+       __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+} __attribute__((__packed__)) Vmxnet3_RxFilterConf;
+
+#define        VMXNET3_PM_MAX_FILTERS 6
+#define        VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define        VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define        VMXNET3_PM_WAKEUP_MAGIC 0x01  /* wake up on magic pkts */
+#define        VMXNET3_PM_WAKEUP_FILTER 0x02  /* wake up on pkts matching 
filters */
+
+typedef struct Vmxnet3_PM_PktFilter {
+       uint8 maskSize;
+       uint8 patternSize;
+       uint8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+       uint8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+       uint8 pad[6];
+} __attribute__((__packed__)) Vmxnet3_PM_PktFilter;
+
+typedef struct Vmxnet3_PMConf {
+       __le16 wakeUpEvents;  /* VMXNET3_PM_WAKEUP_xxx */
+       uint8 numFilters;
+       uint8 pad[5];
+       Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+} __attribute__((__packed__)) Vmxnet3_PMConf;
+
+typedef struct Vmxnet3_VariableLenConfDesc {
+       __le32 confVer;
+       __le32 confLen;
+       __le64 confPA;
+} __attribute__((__packed__)) Vmxnet3_VariableLenConfDesc;
+
+typedef struct Vmxnet3_DSDevRead {
+       /* read-only region for device, read by dev in response to a SET cmd */
+       Vmxnet3_MiscConf misc;
+       Vmxnet3_IntrConf intrConf;
+       Vmxnet3_RxFilterConf rxFilterConf;
+       Vmxnet3_VariableLenConfDesc rssConfDesc;
+       Vmxnet3_VariableLenConfDesc pmConfDesc;
+       Vmxnet3_VariableLenConfDesc pluginConfDesc;
+} __attribute__((__packed__)) Vmxnet3_DSDevRead;
+
+typedef struct Vmxnet3_TxQueueDesc {
+       Vmxnet3_TxQueueCtrl ctrl;
+       Vmxnet3_TxQueueConf conf;
+       /* Driver read after a GET command */
+       Vmxnet3_QueueStatus status;
+       UPT1_TxStats stats;
+       uint8 _pad[88]; /* 128 aligned */
+} __attribute__((__packed__)) Vmxnet3_TxQueueDesc;
+
+typedef struct Vmxnet3_RxQueueDesc {
+       Vmxnet3_RxQueueCtrl ctrl;
+       Vmxnet3_RxQueueConf conf;
+       /* Driver read after a GET command */
+       Vmxnet3_QueueStatus status;
+       UPT1_RxStats stats;
+       uint8 _pad[88]; /* 128 aligned */
+} __attribute__((__packed__)) Vmxnet3_RxQueueDesc;
+
+typedef struct Vmxnet3_DriverShared {
+       __le32 magic;
+       __le32 pad; /* make devRead start at 64-bit boundaries */
+       Vmxnet3_DSDevRead devRead;
+       __le32 ecr;
+       __le32 reserved[5];
+} __attribute__((__packed__)) Vmxnet3_DriverShared;
+
+#define        VMXNET3_ECR_RQERR (1 << 0)
+#define        VMXNET3_ECR_TQERR (1 << 1)
+#define        VMXNET3_ECR_LINK (1 << 2)
+#define        VMXNET3_ECR_DIC (1 << 3)
+#define        VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define        VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define        VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+do {\
+       (idx)++;                                \
+       if (UNLIKELY((idx) == (ring_size))) {   \
+               (idx) = 0;                      \
+       }                                       \
+} while (0)
+
+#define        VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+       vfTable[vid >> 5] |= (1 << (vid & 31))
+#define        VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+       vfTable[vid >> 5] &= ~(1 << (vid & 31))
+
+#define        VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+       ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define        VMXNET3_MAX_MTU 9000
+#define        VMXNET3_MIN_MTU 60
+
+#define        VMXNET3_LINK_UP (10000 << 16 | 1)    // 10 Gbps, up
+#define        VMXNET3_LINK_DOWN 0
+
+#define        VMXWIFI_DRIVER_SHARED_LEN 8192
+
+#define        VMXNET3_DID_PASSTHRU    0xFFFF
+
+#endif /* _VMXNET3_DEFS_H_ */
--- /dev/null   1970-01-01 00:00:00.000000000 +0000
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_dev.h      2014-05-14 11:46:47.129439301 
-0700
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2012-2014 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+/* typedef's to make CDDL header happy */
+typedef uint8_t Bool;
+typedef uint8_t uint8;
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint64_t uint64;
+
+/* header file from OpenSolaris */
+#include "vmxnet3_defs.h"
+#include "vmxnet3_logs.h"
+
+#define VMXNET3_MAX_ETH_HDR    22
+
+/*
+ * Device specific data and access functions
+ */
+struct vmxnet3_cmd_ring {
+       union Vmxnet3_GenericDesc *base;
+       uint32_t size;
+       uint32_t next;
+       uint8_t  gen;
+       uint64_t phys_addr;
+};
+
+static inline uint32_t
+vmxnet3_ring_advance(struct vmxnet3_cmd_ring *ring)
+{
+       if (++ring->next == ring->size) {
+               ring->next = 0;
+               VMXNET3_FLIP_RING_GEN(ring->gen);
+       }
+       return ring->next;
+}
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct vmxnet3_tx_queue {
+       struct vmxnet3_cmd_ring tx_ring;        /* Transmit pending */
+       struct vmxnet3_cmd_ring comp_ring;      /* Transmit complete */
+       Vmxnet3_TxQueueCtrl     *shared;        /* Queue thresholds */
+       void                    *tx_prod;       /* Tx producer register */
+       struct rte_mbuf         **sw_ring;      /* Associated buffers */
+       uint16_t                nb_tx_desc;     /* Max Tx descriptors */
+       uint16_t                tx_free_thresh; /* Transmit cleanup level */
+       uint16_t                next_to_clean;  /* Next ring entry to clean */
+       uint16_t                queue_id;
+       uint8_t                 port_id;
+} __rte_cache_aligned;
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct vmxnet3_rx_queue {
+       struct vmxnet3_cmd_ring rx_ring;        /* Receive pending */
+       struct vmxnet3_cmd_ring comp_ring;      /* Receive completed */
+       struct rte_mempool      *mb_pool;       /* Receive buffer pool */
+       struct rte_mbuf         **sw_ring;      /* Associated buffers */
+       struct rte_mbuf         *start_seg;     /* Start of current Rx packet */
+       struct rte_mbuf         **prev_seg;     /* P2p to last segment */
+       Vmxnet3_RxQueueCtrl     *shared;        /* Queue thesholds */
+       void                    *rx_prod;       /* Rx producer register */
+
+       uint64_t                drop_errors;    /* Rx packet errors */
+       uint64_t                fcs_errors;
+
+       uint16_t                nb_rx_desc;     /* Max Rx descriptors */
+       uint16_t                queue_id;
+       uint8_t                 port_id;
+} __rte_cache_aligned;
+
+/**
+ * Structure associated with each interface
+ */
+struct vmxnet3_dev {
+       uint8_t  *bar0, *bar1;
+       uint32_t rx_mode;
+       struct ether_addr mac_addr;
+       rte_spinlock_t cmd_lock;
+       const struct rte_memzone *shared;
+       const struct rte_memzone *qdesc;
+       Vmxnet3_TxQueueDesc *tqd_start;
+       Vmxnet3_RxQueueDesc *rqd_start;
+       uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+
+       struct vmxnet3_tx_queue tx_queue[VMXNET3_MAX_TX_QUEUES];
+       struct vmxnet3_rx_queue rx_queue[VMXNET3_MAX_RX_QUEUES];
+};
+
+static inline uint32_t vmxnet3_read_reg(void *addr)
+{
+       return *((volatile uint32_t *)addr);
+}
+
+static inline void vmxnet3_write_reg(void *addr, uint32_t val)
+{
+       *((volatile uint32_t *)addr) = val;
+}
+
+static inline uint32_t vmxnet3_bar0_get32(struct vmxnet3_dev *dp, unsigned reg)
+{
+       return vmxnet3_read_reg(dp->bar0 + reg);
+}
+
+static inline void vmxnet3_bar0_put32(struct vmxnet3_dev *dp,
+                                     unsigned reg, uint32_t value)
+{
+       vmxnet3_write_reg(dp->bar0 + reg, value);
+}
+
+static inline uint32_t vmxnet3_bar1_get32(struct vmxnet3_dev *dp, unsigned reg)
+{
+       return vmxnet3_read_reg(dp->bar1 + reg);
+}
+
+static inline void vmxnet3_bar1_put32(struct vmxnet3_dev *dp,
+                                     unsigned reg, uint32_t value)
+{
+       vmxnet3_write_reg(dp->bar1 + reg, value);
+}
+
+extern int
+vmxnet3_tx_queue_setup(struct rte_eth_dev *dev,
+                      uint16_t queue_idx,
+                      uint16_t nb_desc,
+                      unsigned int socket_id,
+                      const struct rte_eth_txconf *tx_conf);
+extern void vmxnet3_tx_queue_release(void *rxq);
+extern void vmxnet3_tx_flush_all(struct rte_eth_dev *dev);
+extern uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                 uint16_t nb_pkts);
+
+extern int
+vmxnet3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                      uint16_t nb_desc, unsigned int socket_id,
+                      const struct rte_eth_rxconf *rx_conf,
+                      struct rte_mempool *mp);
+extern void vmxnet3_rx_queue_release(void *rxq);
+extern int vmxnet3_rx_init(struct rte_eth_dev *dev);
+extern void vmxnet3_rx_start(struct rte_eth_dev *dev);
+extern void vmxnet3_rx_flush_all(struct rte_eth_dev *dev);
+extern uint16_t vmxnet3_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts);

Reply via email to