Module Name:    src
Committed By:   yamaguchi
Date:           Thu Mar 23 02:57:54 UTC 2023

Modified Files:
        src/sys/dev/pci: if_vioif.c

Log Message:
vioif(4): reorganize functions

iThis change is move of function and rename,
and this is no functional change.


To generate a diff of this commit:
cvs rdiff -u -r1.100 -r1.101 src/sys/dev/pci/if_vioif.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_vioif.c
diff -u src/sys/dev/pci/if_vioif.c:1.100 src/sys/dev/pci/if_vioif.c:1.101
--- src/sys/dev/pci/if_vioif.c:1.100	Thu Mar 23 02:52:29 2023
+++ src/sys/dev/pci/if_vioif.c	Thu Mar 23 02:57:54 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: if_vioif.c,v 1.100 2023/03/23 02:52:29 yamaguchi Exp $	*/
+/*	$NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $	*/
 
 /*
  * Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.100 2023/03/23 02:52:29 yamaguchi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.101 2023/03/23 02:57:54 yamaguchi Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -330,7 +330,7 @@ struct vioif_softc {
 	void			*sc_dmamem;
 	void			*sc_kmem;
 
-	void			*sc_ctl_softint;
+	void			*sc_cfg_softint;
 
 	struct workqueue	*sc_txrx_workqueue;
 	bool			 sc_txrx_workqueue_sysctl;
@@ -360,1171 +360,1213 @@ static int	vioif_finalize_teardown(devic
 static int	vioif_init(struct ifnet *);
 static void	vioif_stop(struct ifnet *, int);
 static void	vioif_start(struct ifnet *);
-static void	vioif_start_locked(struct ifnet *, struct vioif_netqueue *);
 static int	vioif_transmit(struct ifnet *, struct mbuf *);
-static void	vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *);
 static int	vioif_ioctl(struct ifnet *, u_long, void *);
 static void	vioif_watchdog(struct ifnet *);
+static int	vioif_ifflags(struct vioif_softc *);
 static int	vioif_ifflags_cb(struct ethercom *);
 
 /* tx & rx */
-static void	vioif_net_sched_handle(struct vioif_softc *,
-		    struct vioif_netqueue *);
-static int	vioif_net_load_mbuf(struct virtio_softc *,
-		    struct vioif_net_map *, struct mbuf *, int);
-static void	vioif_net_unload_mbuf(struct virtio_softc *,
-		    struct vioif_net_map *);
-static int	vioif_net_enqueue_tx(struct virtio_softc *, struct virtqueue *,
-		    int, struct vioif_net_map *);
-static int	vioif_net_enqueue_rx(struct virtio_softc *, struct virtqueue *,
-		    int, struct vioif_net_map *);
-static struct mbuf *
-		vioif_net_dequeue_commit(struct virtio_softc *,
-		    struct virtqueue *, int, struct vioif_net_map *, int);
+static int	vioif_netqueue_init(struct vioif_softc *,
+		    struct virtio_softc *, size_t, u_int);
+static void	vioif_netqueue_teardown(struct vioif_softc *,
+		    struct virtio_softc *, size_t);
 static void	vioif_net_intr_enable(struct vioif_softc *,
 		    struct virtio_softc *);
 static void	vioif_net_intr_disable(struct vioif_softc *,
 		    struct virtio_softc *);
+static void	vioif_net_sched_handle(struct vioif_softc *,
+		    struct vioif_netqueue *);
 
 /* rx */
 static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *,
 		    struct vioif_netqueue *);
-static void	vioif_rx_queue_clear(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_netqueue *);
-static bool	vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_netqueue *, u_int, size_t *);
 static int	vioif_rx_intr(void *);
 static void	vioif_rx_handle(void *);
+static void	vioif_rx_queue_clear(struct vioif_softc *,
+		    struct virtio_softc *, struct vioif_netqueue *);
 
 /* tx */
+static void	vioif_start_locked(struct ifnet *, struct vioif_netqueue *);
+static void	vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *);
+static void	vioif_deferred_transmit(void *);
 static int	vioif_tx_intr(void *);
 static void	vioif_tx_handle(void *);
 static void	vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *,
 		    struct vioif_netqueue *);
-static bool	vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_netqueue *, u_int);
-static void	vioif_deferred_transmit(void *);
-
-/* workqueue */
-static struct workqueue*
-		vioif_workq_create(const char *, pri_t, int, int);
-static void	vioif_workq_destroy(struct workqueue *);
-static void	vioif_workq_work(struct work *, void *);
-static void	vioif_work_set(struct vioif_work *, void(*)(void *), void *);
-static void	vioif_work_add(struct workqueue *, struct vioif_work *);
-static void	vioif_work_wait(struct workqueue *, struct vioif_work *);
 
-/* other control */
-static int	vioif_get_link_status(struct vioif_softc *);
-static void	vioif_update_link_status(struct vioif_softc *);
+/* controls */
+static int	vioif_ctrl_intr(void *);
 static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
 static int	vioif_set_promisc(struct vioif_softc *, bool);
 static int	vioif_set_allmulti(struct vioif_softc *, bool);
 static int	vioif_set_rx_filter(struct vioif_softc *);
 static int	vioif_rx_filter(struct vioif_softc *);
 static int	vioif_set_mac_addr(struct vioif_softc *);
-static int	vioif_ctrl_intr(void *);
-static int	vioif_config_change(struct virtio_softc *);
-static void	vioif_ctl_softint(void *);
 static int	vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *, int);
+
+/* config interrupt */
+static int	vioif_config_change(struct virtio_softc *);
+static void	vioif_cfg_softint(void *);
+static void	vioif_update_link_status(struct vioif_softc *);
+
+/* others */
+static void	vioif_alloc_queues(struct vioif_softc *);
+static void	vioif_free_queues(struct vioif_softc *);
+static int	vioif_alloc_mems(struct vioif_softc *);
+static struct workqueue*
+		vioif_workq_create(const char *, pri_t, int, int);
+static void	vioif_workq_destroy(struct workqueue *);
+static void	vioif_work_set(struct vioif_work *, void(*)(void *), void *);
+static void	vioif_work_add(struct workqueue *, struct vioif_work *);
+static void	vioif_work_wait(struct workqueue *, struct vioif_work *);
 static int	vioif_setup_sysctl(struct vioif_softc *);
 static void	vioif_setup_stats(struct vioif_softc *);
-static int	vioif_ifflags(struct vioif_softc *);
-static void	vioif_intr_barrier(void);
-static void	vioif_notify(struct virtio_softc *, struct virtqueue *);
 
 CFATTACH_DECL_NEW(vioif, sizeof(struct vioif_softc),
 		  vioif_match, vioif_attach, NULL, NULL);
 
-static int
-vioif_match(device_t parent, cfdata_t match, void *aux)
-{
-	struct virtio_attach_args *va = aux;
-
-	if (va->sc_childdevid == VIRTIO_DEVICE_ID_NETWORK)
-		return 1;
-
-	return 0;
-}
-
-static int
-vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
-    bus_size_t size, int nsegs, const char *usage)
+static void
+vioif_intr_barrier(void)
 {
-	int r;
-
-	r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
-	    nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
-
-	if (r != 0) {
-		aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
-		    "error code %d\n", usage, r);
-	}
 
-	return r;
+	/* wait for finish all interrupt handler */
+	xc_barrier(0);
 }
 
 static void
-vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map)
+vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq)
 {
 
-	if (*map) {
-		bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map);
-		*map = NULL;
-	}
+	virtio_enqueue_commit(vsc, vq, -1, true);
 }
 
 static int
-vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map,
-    void *buf, bus_size_t size, int nsegs, int rw, const char *usage)
+vioif_match(device_t parent, cfdata_t match, void *aux)
 {
-	int r;
+	struct virtio_attach_args *va = aux;
 
-	r = vioif_dmamap_create(sc, map, size, nsegs, usage);
-	if (r != 0)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_NETWORK)
 		return 1;
 
-	r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf,
-	    size, NULL, rw | BUS_DMA_NOWAIT);
-	if (r != 0) {
-		vioif_dmamap_destroy(sc, map);
-		aprint_error_dev(sc->sc_dev, "%s dmamap load failed. "
-		    "error code %d\n", usage, r);
-	}
-
-	return r;
+	return 0;
 }
 
-static void *
-vioif_assign_mem(intptr_t *p, size_t size)
+static void
+vioif_attach(device_t parent, device_t self, void *aux)
 {
-	intptr_t rv;
+	struct vioif_softc *sc = device_private(self);
+	struct virtio_softc *vsc = device_private(parent);
+	struct vioif_netqueue *txq0;
+	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
+	uint64_t features, req_features;
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	u_int softint_flags;
+	int r, i, req_flags;
+	char xnamebuf[MAXCOMLEN];
+	size_t netq_num;
 
-	rv = *p;
-	*p += size;
+	if (virtio_child(vsc) != NULL) {
+		aprint_normal(": child already attached for %s; "
+		    "something wrong...\n", device_xname(parent));
+		return;
+	}
 
-	return (void *)rv;
-}
+	sc->sc_dev = self;
+	sc->sc_virtio = vsc;
+	sc->sc_link_state = LINK_STATE_UNKNOWN;
 
-static void
-vioif_alloc_queues(struct vioif_softc *sc)
-{
-	int nvq_pairs = sc->sc_max_nvq_pairs;
-	size_t nvqs, netq_num;
+	sc->sc_max_nvq_pairs = 1;
+	sc->sc_req_nvq_pairs = 1;
+	sc->sc_act_nvq_pairs = 1;
+	sc->sc_txrx_workqueue_sysctl = true;
+	sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT;
+	sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT;
+	sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT;
+	sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT;
 
-	KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
+	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
 
-	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
-	if (sc->sc_has_ctrl)
-		nvqs++;
+	snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self));
+	sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI,
+	    IPL_NET, WQ_PERCPU | WQ_MPSAFE);
+	if (sc->sc_txrx_workqueue == NULL)
+		goto err;
 
-	sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
-	sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * netq_num,
-	    KM_SLEEP);
-}
+	req_flags = 0;
 
-static void
-vioif_free_queues(struct vioif_softc *sc)
-{
-	size_t nvqs, netq_num;
+#ifdef VIOIF_MPSAFE
+	req_flags |= VIRTIO_F_INTR_MPSAFE;
+#endif
+	req_flags |= VIRTIO_F_INTR_MSIX;
 
-	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
-	if (sc->sc_ctrlq.ctrlq_vq)
-		nvqs++;
+	req_features =
+	    VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
+	    VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
+	req_features |= VIRTIO_F_RING_EVENT_IDX;
+	req_features |= VIRTIO_NET_F_CTRL_MAC_ADDR;
+#ifdef VIOIF_MULTIQ
+	req_features |= VIRTIO_NET_F_MQ;
+#endif
+	virtio_child_attach_start(vsc, self, IPL_NET, NULL,
+	    vioif_config_change, virtio_vq_intrhand, req_flags,
+	    req_features, VIRTIO_NET_FLAG_BITS);
 
-	kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num);
-	kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
-	sc->sc_netqs = NULL;
-	sc->sc_vqs = NULL;
-}
+	features = virtio_features(vsc);
+	if (features == 0)
+		goto err;
 
-static int
-vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc,
-    size_t qid, u_int softint_flags)
-{
-	static const struct {
-		const char	*dirname;
-		int		 segsize;
-		int		 nsegs;
-		int 		(*intrhand)(void *);
-		void		(*sihand)(void *);
-	} params[VIOIF_NETQ_IDX] = {
-		[VIOIF_NETQ_RX] = {
-			.dirname	= "rx",
-			.segsize	= MCLBYTES,
-			.nsegs		= 2,
-			.intrhand	= vioif_rx_intr,
-			.sihand		= vioif_rx_handle,
-		},
-		[VIOIF_NETQ_TX] = {
-			.dirname	= "tx",
-			.segsize	= ETHER_MAX_LEN - ETHER_HDR_LEN,
-			.nsegs		= 2,
-			.intrhand	= vioif_tx_intr,
-			.sihand		= vioif_tx_handle,
+	if (features & VIRTIO_NET_F_MAC) {
+		for (i = 0; i < __arraycount(sc->sc_mac); i++) {
+			sc->sc_mac[i] = virtio_read_device_config_1(vsc,
+			    VIRTIO_NET_CONFIG_MAC + i);
 		}
-	};
+	} else {
+		/* code stolen from sys/net/if_tap.c */
+		struct timeval tv;
+		uint32_t ui;
+		getmicrouptime(&tv);
+		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
+		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
+		for (i = 0; i < __arraycount(sc->sc_mac); i++) {
+			virtio_write_device_config_1(vsc,
+			    VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
+		}
+	}
 
-	struct virtqueue *vq;
-	struct vioif_netqueue *netq;
-	struct vioif_tx_context *txc;
-	struct vioif_rx_context *rxc;
-	char qname[32];
-	int r, dir;
+	/* 'Ethernet' with capital follows other ethernet driver attachment */
+	aprint_normal_dev(self, "Ethernet address %s\n",
+	    ether_sprintf(sc->sc_mac));
 
-	txc = NULL;
-	rxc = NULL;
-	netq = &sc->sc_netqs[qid];
-	vq = &sc->sc_vqs[qid];
-	dir = VIOIF_NETQ_DIR(qid);
+	if (features & (VIRTIO_NET_F_MRG_RXBUF | VIRTIO_F_VERSION_1)) {
+		sc->sc_hdr_size = sizeof(struct virtio_net_hdr);
+	} else {
+		sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers);
+	}
 
-	netq->netq_vq = &sc->sc_vqs[qid];
-	netq->netq_stopping = false;
-	netq->netq_running_handle = false;
+	if ((features & VIRTIO_NET_F_CTRL_VQ) &&
+	    (features & VIRTIO_NET_F_CTRL_RX)) {
+		sc->sc_has_ctrl = true;
 
-	snprintf(qname, sizeof(qname), "%s%zu",
-	    params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid));
-	snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
-	    "%s-%s", device_xname(sc->sc_dev), qname);
+		cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
+		mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
+		ctrlq->ctrlq_inuse = FREE;
+	} else {
+		sc->sc_has_ctrl = false;
+	}
 
-	mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
-	r = virtio_alloc_vq(vsc, vq, qid,
-	    params[dir].segsize + sc->sc_hdr_size,
-	    params[dir].nsegs, qname);
-	if (r != 0)
-		goto err;
-	netq->netq_vq = vq;
+	if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) {
+		sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc,
+		    VIRTIO_NET_CONFIG_MAX_VQ_PAIRS);
 
-	netq->netq_vq->vq_intrhand = params[dir].intrhand;
-	netq->netq_vq->vq_intrhand_arg = netq;
-	netq->netq_softint = softint_establish(softint_flags,
-	    params[dir].sihand, netq);
-	if (netq->netq_softint == NULL) {
-		aprint_error_dev(sc->sc_dev,
-		    "couldn't establish %s softint\n",
-		    params[dir].dirname);
-		goto err;
+		if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
+			goto err;
+
+		/* Limit the number of queue pairs to use */
+		sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu);
 	}
-	vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
 
-	switch (dir) {
-	case VIOIF_NETQ_RX:
-		rxc = kmem_zalloc(sizeof(*rxc), KM_SLEEP);
-		netq->netq_ctx = rxc;
-		/* nothing to do */
-		break;
-	case VIOIF_NETQ_TX:
-		txc = kmem_zalloc(sizeof(*txc), KM_SLEEP);
-		netq->netq_ctx = (void *)txc;
-		txc->txc_deferred_transmit = softint_establish(softint_flags,
-		    vioif_deferred_transmit, netq);
-		if (txc->txc_deferred_transmit == NULL) {
-			aprint_error_dev(sc->sc_dev,
-			    "couldn't establish softint for "
-			    "tx deferred transmit\n");
+	vioif_alloc_queues(sc);
+	virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs);
+
+#ifdef VIOIF_MPSAFE
+	softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
+#else
+	softint_flags = SOFTINT_NET;
+#endif
+
+	/*
+	 * Initialize network queues
+	 */
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		r = vioif_netqueue_init(sc, vsc, i, softint_flags);
+		if (r != 0)
 			goto err;
+	}
+
+	if (sc->sc_has_ctrl) {
+		int ctrlq_idx = sc->sc_max_nvq_pairs * 2;
+		/*
+		 * Allocating a virtqueue for control channel
+		 */
+		sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx];
+		r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx,
+		    NBPG, 1, "control");
+		if (r != 0) {
+			aprint_error_dev(self, "failed to allocate "
+			    "a virtqueue for control channel, error code %d\n",
+			    r);
+
+			sc->sc_has_ctrl = false;
+			cv_destroy(&ctrlq->ctrlq_wait);
+			mutex_destroy(&ctrlq->ctrlq_wait_lock);
+		} else {
+			ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
+			ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
 		}
-		txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc);
-		txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP);
-		break;
 	}
 
-	return 0;
+	sc->sc_cfg_softint = softint_establish(softint_flags,
+	    vioif_cfg_softint, sc);
+	if (sc->sc_cfg_softint == NULL) {
+		aprint_error_dev(self, "cannot establish ctl softint\n");
+		goto err;
+	}
 
-err:
-	netq->netq_ctx = NULL;
+	if (vioif_alloc_mems(sc) < 0)
+		goto err;
 
-	if (rxc != NULL) {
-		kmem_free(rxc, sizeof(*rxc));
+	if (virtio_child_attach_finish(vsc) != 0)
+		goto err;
+
+	if (vioif_setup_sysctl(sc) != 0) {
+		aprint_error_dev(self, "unable to create sysctl node\n");
+		/* continue */
 	}
 
-	if (txc != NULL) {
-		if (txc->txc_deferred_transmit != NULL)
-			softint_disestablish(txc->txc_deferred_transmit);
-		if (txc->txc_intrq != NULL)
-			pcq_destroy(txc->txc_intrq);
-		kmem_free(txc, sizeof(txc));
+	vioif_setup_stats(sc);
+
+	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
+	ifp->if_softc = sc;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+#ifdef VIOIF_MPSAFE
+	ifp->if_extflags = IFEF_MPSAFE;
+#endif
+	ifp->if_start = vioif_start;
+	if (sc->sc_req_nvq_pairs > 1)
+		ifp->if_transmit = vioif_transmit;
+	ifp->if_ioctl = vioif_ioctl;
+	ifp->if_init = vioif_init;
+	ifp->if_stop = vioif_stop;
+	ifp->if_capabilities = 0;
+	ifp->if_watchdog = vioif_watchdog;
+	txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
+	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq0->netq_vq->vq_num, IFQ_MAXLEN));
+	IFQ_SET_READY(&ifp->if_snd);
+
+	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
+
+	if_attach(ifp);
+	if_deferred_start_init(ifp, NULL);
+	ether_ifattach(ifp, sc->sc_mac);
+	ether_set_ifflags_cb(&sc->sc_ethercom, vioif_ifflags_cb);
+
+	return;
+
+err:
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		vioif_netqueue_teardown(sc, vsc, i);
 	}
 
-	vioif_work_set(&netq->netq_work, NULL, NULL);
-	if (netq->netq_softint != NULL) {
-		softint_disestablish(netq->netq_softint);
-		netq->netq_softint = NULL;
+	if (sc->sc_has_ctrl) {
+		cv_destroy(&ctrlq->ctrlq_wait);
+		mutex_destroy(&ctrlq->ctrlq_wait_lock);
+		virtio_free_vq(vsc, ctrlq->ctrlq_vq);
+		ctrlq->ctrlq_vq = NULL;
 	}
-	netq->netq_vq->vq_intrhand = NULL;
-	netq->netq_vq->vq_intrhand_arg = NULL;
 
-	virtio_free_vq(vsc, vq);
-	mutex_destroy(&netq->netq_lock);
-	netq->netq_vq = NULL;
+	vioif_free_queues(sc);
+	mutex_destroy(&sc->sc_lock);
+	virtio_child_attach_failed(vsc);
+	config_finalize_register(self, vioif_finalize_teardown);
 
-	return -1;
+	return;
 }
 
-static void
-vioif_netqueue_teardown(struct vioif_softc *sc, struct virtio_softc *vsc,
-    size_t qid)
+static int
+vioif_finalize_teardown(device_t self)
 {
-	struct vioif_netqueue *netq;
-	struct vioif_rx_context *rxc;
-	struct vioif_tx_context *txc;
-	int dir;
-
-	netq = &sc->sc_netqs[qid];
-
-	if (netq->netq_vq == NULL)
-		return;
+	struct vioif_softc *sc = device_private(self);
 
-	netq = &sc->sc_netqs[qid];
-	dir = VIOIF_NETQ_DIR(qid);
-	switch (dir) {
-	case VIOIF_NETQ_RX:
-		rxc = netq->netq_ctx;
-		netq->netq_ctx = NULL;
-		kmem_free(rxc, sizeof(*rxc));
-		break;
-	case VIOIF_NETQ_TX:
-		txc = netq->netq_ctx;
-		netq->netq_ctx = NULL;
-		softint_disestablish(txc->txc_deferred_transmit);
-		pcq_destroy(txc->txc_intrq);
-		kmem_free(txc, sizeof(*txc));
-		break;
+	if (sc->sc_txrx_workqueue != NULL) {
+		vioif_workq_destroy(sc->sc_txrx_workqueue);
+		sc->sc_txrx_workqueue = NULL;
 	}
 
-	softint_disestablish(netq->netq_softint);
-	virtio_free_vq(vsc, netq->netq_vq);
-	mutex_destroy(&netq->netq_lock);
-	netq->netq_vq = NULL;
+	return 0;
 }
 
-/* allocate memory */
 /*
- * dma memory is used for:
- *   netq_maps_kva:	 metadata array for received frames (READ) and
- *			 sent frames (WRITE)
- *   ctrlq_cmd:		 command to be sent via ctrl vq (WRITE)
- *   ctrlq_status:	 return value for a command via ctrl vq (READ)
- *   ctrlq_rx:		 parameter for a VIRTIO_NET_CTRL_RX class command
- *			 (WRITE)
- *   ctrlq_mac_tbl_uc:	 unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
- *			 class command (WRITE)
- *   ctrlq_mac_tbl_mc:	 multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
- *			 class command (WRITE)
- * ctrlq_* structures are allocated only one each; they are protected by
- * ctrlq_inuse variable and ctrlq_wait condvar.
+ * Interface functions for ifnet
  */
 static int
-vioif_alloc_mems(struct vioif_softc *sc)
+vioif_init(struct ifnet *ifp)
 {
+	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
 	struct vioif_netqueue *netq;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	struct vioif_net_map *maps;
-	unsigned int vq_num;
-	int r, rsegs;
-	bus_size_t dmamemsize;
-	size_t qid, i, netq_num, kmemsize;
-	void *vaddr;
-	intptr_t p;
-
-	netq_num = sc->sc_max_nvq_pairs * 2;
+	int r, i;
 
-	/* allocate DMA memory */
-	dmamemsize = 0;
+	vioif_stop(ifp, 0);
 
-	for (qid = 0; qid < netq_num; qid++) {
-		maps = sc->sc_netqs[qid].netq_maps;
-		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
-		dmamemsize += sizeof(*maps[0].vnm_hdr) * vq_num;
+	r = virtio_reinit_start(vsc);
+	if (r != 0) {
+		log(LOG_ERR, "%s: reset failed\n", ifp->if_xname);
+		return EIO;
 	}
 
-	if (sc->sc_has_ctrl) {
-		dmamemsize += sizeof(struct virtio_net_ctrl_cmd);
-		dmamemsize += sizeof(struct virtio_net_ctrl_status);
-		dmamemsize += sizeof(struct virtio_net_ctrl_rx);
-		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
-		    + ETHER_ADDR_LEN;
-		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
-		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
-		dmamemsize += sizeof(struct virtio_net_ctrl_mac_addr);
-		dmamemsize += sizeof(struct virtio_net_ctrl_mq);
-	}
+	virtio_negotiate_features(vsc, virtio_features(vsc));
 
-	r = bus_dmamem_alloc(virtio_dmat(vsc), dmamemsize, 0, 0,
-	    &sc->sc_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
-	if (r != 0) {
-		aprint_error_dev(sc->sc_dev,
-		    "DMA memory allocation failed, size %zu, "
-		    "error code %d\n", dmamemsize, r);
-		goto err_none;
-	}
-	r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_segs[0], 1,
-	    dmamemsize, &vaddr, BUS_DMA_NOWAIT);
-	if (r != 0) {
-		aprint_error_dev(sc->sc_dev,
-		    "DMA memory map failed, error code %d\n", r);
-		goto err_dmamem_alloc;
+	for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
+		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
+
+		mutex_enter(&netq->netq_lock);
+		vioif_populate_rx_mbufs_locked(sc, netq);
+		mutex_exit(&netq->netq_lock);
 	}
 
-	/* assign DMA memory */
-	memset(vaddr, 0, dmamemsize);
-	sc->sc_dmamem = vaddr;
-	p = (intptr_t) vaddr;
+	virtio_reinit_end(vsc);
 
-	for (qid = 0; qid < netq_num; qid++) {
-		netq = &sc->sc_netqs[qid];
-		maps = netq->netq_maps;
-		vq_num = netq->netq_vq->vq_num;
+	if (sc->sc_has_ctrl)
+		virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
 
-		netq->netq_maps_kva = vioif_assign_mem(&p,
-		    sizeof(*maps[0].vnm_hdr) * vq_num);
-	}
+	r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
+	if (r == 0)
+		sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
+	else
+		sc->sc_act_nvq_pairs = 1;
 
-	if (sc->sc_has_ctrl) {
-		ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_cmd));
-		ctrlq->ctrlq_status = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_status));
-		ctrlq->ctrlq_rx = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_rx));
-		ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_mac_tbl_uc)
-		    + ETHER_ADDR_LEN);
-		ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_mac_tbl_mc)
-		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES);
-		ctrlq->ctrlq_mac_addr = vioif_assign_mem(&p,
-		    sizeof(*ctrlq->ctrlq_mac_addr));
-		ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
-	}
+	SET(ifp->if_flags, IFF_RUNNING);
+	CLR(ifp->if_flags, IFF_OACTIVE);
 
-	/* allocate kmem */
-	kmemsize = 0;
+	vioif_net_intr_enable(sc, vsc);
 
-	for (qid = 0; qid < netq_num; qid++) {
-		netq = &sc->sc_netqs[qid];
-		vq_num = netq->netq_vq->vq_num;
+	vioif_update_link_status(sc);
+	r = vioif_rx_filter(sc);
 
-		kmemsize += sizeof(netq->netq_maps[0]) * vq_num;
-	}
+	return r;
+}
 
-	vaddr = kmem_zalloc(kmemsize, KM_SLEEP);
-	sc->sc_kmem = vaddr;
+static void
+vioif_stop(struct ifnet *ifp, int disable)
+{
+	struct vioif_softc *sc = ifp->if_softc;
+	struct virtio_softc *vsc = sc->sc_virtio;
+	struct vioif_netqueue *netq;
+	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
+	size_t i, act_qnum;
 
-	/* assign allocated kmem */
-	p = (intptr_t) vaddr;
+	act_qnum = sc->sc_act_nvq_pairs * 2;
 
-	for (qid = 0; qid < netq_num; qid++) {
-		netq = &sc->sc_netqs[qid];
-		vq_num = netq->netq_vq->vq_num;
+	CLR(ifp->if_flags, IFF_RUNNING);
+	for (i = 0; i < act_qnum; i++) {
+		netq = &sc->sc_netqs[i];
 
-		netq->netq_maps = vioif_assign_mem(&p,
-		    sizeof(netq->netq_maps[0]) * vq_num);
+		mutex_enter(&netq->netq_lock);
+		netq->netq_stopping = true;
+		mutex_exit(&netq->netq_lock);
 	}
 
-	/* prepare dmamaps */
-	for (qid = 0; qid < netq_num; qid++) {
-		static const struct {
-			const char	*msg_hdr;
-			const char	*msg_payload;
-			int		 dma_flag;
-			bus_size_t	 dma_size;
-			int		 dma_nsegs;
-		} dmaparams[VIOIF_NETQ_IDX] = {
-			[VIOIF_NETQ_RX] = {
-				.msg_hdr	= "rx header",
-				.msg_payload	= "rx payload",
-				.dma_flag	= BUS_DMA_READ,
-				.dma_size	= MCLBYTES - ETHER_ALIGN,
-				.dma_nsegs	= 1,
-			},
-			[VIOIF_NETQ_TX] = {
-				.msg_hdr	= "tx header",
-				.msg_payload	= "tx payload",
-				.dma_flag	= BUS_DMA_WRITE,
-				.dma_size	= ETHER_MAX_LEN,
-				.dma_nsegs	= VIRTIO_NET_TX_MAXNSEGS,
-			}
-		};
-
-		struct virtio_net_hdr *hdrs;
-		int dir;
+	/* disable interrupts */
+	vioif_net_intr_disable(sc, vsc);
+	if (sc->sc_has_ctrl)
+		virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
 
-		dir = VIOIF_NETQ_DIR(qid);
-		netq = &sc->sc_netqs[qid];
-		vq_num = netq->netq_vq->vq_num;
-		maps = netq->netq_maps;
-		hdrs = netq->netq_maps_kva;
+	/*
+	 * only way to stop interrupt, I/O and DMA is resetting...
+	 *
+	 * NOTE: Devices based on VirtIO draft specification can not
+	 * stop interrupt completely even if virtio_stop_vq_intr() is called.
+	 */
+	virtio_reset(vsc);
 
-		for (i = 0; i < vq_num; i++) {
-			maps[i].vnm_hdr = &hdrs[i];
-	
-			r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map,
-			    maps[i].vnm_hdr, sc->sc_hdr_size, 1,
-			    dmaparams[dir].dma_flag, dmaparams[dir].msg_hdr);
-			if (r != 0)
-				goto err_reqs;
+	vioif_intr_barrier();
 
-			r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map,
-			    dmaparams[dir].dma_size, dmaparams[dir].dma_nsegs,
-			    dmaparams[dir].msg_payload);
-			if (r != 0)
-				goto err_reqs;
-		}
+	for (i = 0; i < act_qnum; i++) {
+		netq = &sc->sc_netqs[i];
+		vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work);
 	}
 
-	if (sc->sc_has_ctrl) {
-		/* control vq class & command */
-		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap,
-		    ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
-		    BUS_DMA_WRITE, "control command");
-		if (r != 0)
-			goto err_reqs;
-
-		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap,
-		    ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
-		    BUS_DMA_READ, "control status");
-		if (r != 0)
-			goto err_reqs;
-
-		/* control vq rx mode command parameter */
-		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap,
-		    ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
-		    BUS_DMA_WRITE, "rx mode control command");
-		if (r != 0)
-			goto err_reqs;
-
-		/* multiqueue set command */
-		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap,
-		    ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1,
-		    BUS_DMA_WRITE, "multiqueue set command");
-		if (r != 0)
-			goto err_reqs;
-
-		/* control vq MAC filter table for unicast */
-		/* do not load now since its length is variable */
-		r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap,
-		    sizeof(*ctrlq->ctrlq_mac_tbl_uc)
-		    + ETHER_ADDR_LEN, 1,
-		    "unicast MAC address filter command");
-		if (r != 0)
-			goto err_reqs;
-
-		/* control vq MAC filter table for multicast */
-		r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap,
-		    sizeof(*ctrlq->ctrlq_mac_tbl_mc)
-		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
-		    "multicast MAC address filter command");
-		if (r != 0)
-			goto err_reqs;
+	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
+		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
+		vioif_rx_queue_clear(sc, vsc, netq);
 
-		/* control vq MAC address set command */
-		r = vioif_dmamap_create_load(sc,
-		    &ctrlq->ctrlq_mac_addr_dmamap,
-		    ctrlq->ctrlq_mac_addr,
-		    sizeof(*ctrlq->ctrlq_mac_addr), 1,
-		    BUS_DMA_WRITE, "mac addr set command");
-		if (r != 0)
-			goto err_reqs;
+		netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
+		vioif_tx_queue_clear(sc, vsc, netq);
 	}
 
-	return 0;
-
-err_reqs:
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap);
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap);
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap);
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
-	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_mac_addr_dmamap);
-	for (qid = 0; qid < netq_num; qid++) {
-		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
-		maps = sc->sc_netqs[qid].netq_maps;
+	/* all packet processing is stopped */
+	for (i = 0; i < act_qnum; i++) {
+		netq = &sc->sc_netqs[i];
 
-		for (i = 0; i < vq_num; i++) {
-			vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map);
-			vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map);
-		}
-	}
-	if (sc->sc_kmem) {
-		kmem_free(sc->sc_kmem, kmemsize);
-		sc->sc_kmem = NULL;
+		mutex_enter(&netq->netq_lock);
+		netq->netq_stopping = false;
+		mutex_exit(&netq->netq_lock);
 	}
-	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, dmamemsize);
-err_dmamem_alloc:
-	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_segs[0], 1);
-err_none:
-	return -1;
 }
 
 static void
-vioif_attach(device_t parent, device_t self, void *aux)
+vioif_start(struct ifnet *ifp)
 {
-	struct vioif_softc *sc = device_private(self);
-	struct virtio_softc *vsc = device_private(parent);
-	struct vioif_netqueue *txq0;
-	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	uint64_t features, req_features;
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
-	u_int softint_flags;
-	int r, i, req_flags;
-	char xnamebuf[MAXCOMLEN];
-	size_t netq_num;
-
-	if (virtio_child(vsc) != NULL) {
-		aprint_normal(": child already attached for %s; "
-		    "something wrong...\n", device_xname(parent));
-		return;
-	}
+	struct vioif_softc *sc = ifp->if_softc;
+	struct vioif_netqueue *txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
 
-	sc->sc_dev = self;
-	sc->sc_virtio = vsc;
-	sc->sc_link_state = LINK_STATE_UNKNOWN;
+#ifdef VIOIF_MPSAFE
+	KASSERT(if_is_mpsafe(ifp));
+#endif
 
-	sc->sc_max_nvq_pairs = 1;
-	sc->sc_req_nvq_pairs = 1;
-	sc->sc_act_nvq_pairs = 1;
-	sc->sc_txrx_workqueue_sysctl = true;
-	sc->sc_tx_intr_process_limit = VIOIF_TX_INTR_PROCESS_LIMIT;
-	sc->sc_tx_process_limit = VIOIF_TX_PROCESS_LIMIT;
-	sc->sc_rx_intr_process_limit = VIOIF_RX_INTR_PROCESS_LIMIT;
-	sc->sc_rx_process_limit = VIOIF_RX_PROCESS_LIMIT;
+	mutex_enter(&txq0->netq_lock);
+	vioif_start_locked(ifp, txq0);
+	mutex_exit(&txq0->netq_lock);
+}
 
-	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
+static inline int
+vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m)
+{
+	struct vioif_softc *sc = ifp->if_softc;
+	u_int cpuid = cpu_index(curcpu());
 
-	snprintf(xnamebuf, sizeof(xnamebuf), "%s_txrx", device_xname(self));
-	sc->sc_txrx_workqueue = vioif_workq_create(xnamebuf, VIOIF_WORKQUEUE_PRI,
-	    IPL_NET, WQ_PERCPU | WQ_MPSAFE);
-	if (sc->sc_txrx_workqueue == NULL)
-		goto err;
+	return VIOIF_NETQ_TXQID(cpuid % sc->sc_act_nvq_pairs);
+}
 
-	req_flags = 0;
+static int
+vioif_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+	struct vioif_softc *sc = ifp->if_softc;
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
+	int qid;
 
-#ifdef VIOIF_MPSAFE
-	req_flags |= VIRTIO_F_INTR_MPSAFE;
-#endif
-	req_flags |= VIRTIO_F_INTR_MSIX;
+	qid = vioif_select_txqueue(ifp, m);
+	netq = &sc->sc_netqs[qid];
+	txc = netq->netq_ctx;
 
-	req_features =
-	    VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
-	    VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
-	req_features |= VIRTIO_F_RING_EVENT_IDX;
-	req_features |= VIRTIO_NET_F_CTRL_MAC_ADDR;
-#ifdef VIOIF_MULTIQ
-	req_features |= VIRTIO_NET_F_MQ;
-#endif
-	virtio_child_attach_start(vsc, self, IPL_NET, NULL,
-	    vioif_config_change, virtio_vq_intrhand, req_flags,
-	    req_features, VIRTIO_NET_FLAG_BITS);
+	if (__predict_false(!pcq_put(txc->txc_intrq, m))) {
+		m_freem(m);
+		return ENOBUFS;
+	}
 
-	features = virtio_features(vsc);
-	if (features == 0)
-		goto err;
+	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
+	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
+	if (m->m_flags & M_MCAST)
+		if_statinc_ref(nsr, if_omcasts);
+	IF_STAT_PUTREF(ifp);
 
-	if (features & VIRTIO_NET_F_MAC) {
-		for (i = 0; i < __arraycount(sc->sc_mac); i++) {
-			sc->sc_mac[i] = virtio_read_device_config_1(vsc,
-			    VIRTIO_NET_CONFIG_MAC + i);
-		}
-	} else {
-		/* code stolen from sys/net/if_tap.c */
-		struct timeval tv;
-		uint32_t ui;
-		getmicrouptime(&tv);
-		ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
-		memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
-		for (i = 0; i < __arraycount(sc->sc_mac); i++) {
-			virtio_write_device_config_1(vsc,
-			    VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
-		}
+	if (mutex_tryenter(&netq->netq_lock)) {
+		vioif_transmit_locked(ifp, netq);
+		mutex_exit(&netq->netq_lock);
 	}
 
-	/* 'Ethernet' with capital follows other ethernet driver attachment */
-	aprint_normal_dev(self, "Ethernet address %s\n",
-	    ether_sprintf(sc->sc_mac));
+	return 0;
+}
 
-	if (features & (VIRTIO_NET_F_MRG_RXBUF | VIRTIO_F_VERSION_1)) {
-		sc->sc_hdr_size = sizeof(struct virtio_net_hdr);
-	} else {
-		sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers);
-	}
+void
+vioif_watchdog(struct ifnet *ifp)
+{
+	struct vioif_softc *sc = ifp->if_softc;
+	struct vioif_netqueue *netq;
+	int i;
 
-	if ((features & VIRTIO_NET_F_CTRL_VQ) &&
-	    (features & VIRTIO_NET_F_CTRL_RX)) {
-		sc->sc_has_ctrl = true;
+	if (ifp->if_flags & IFF_RUNNING) {
+		for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
+			netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
 
-		cv_init(&ctrlq->ctrlq_wait, "ctrl_vq");
-		mutex_init(&ctrlq->ctrlq_wait_lock, MUTEX_DEFAULT, IPL_NET);
-		ctrlq->ctrlq_inuse = FREE;
-	} else {
-		sc->sc_has_ctrl = false;
+			mutex_enter(&netq->netq_lock);
+			if (!netq->netq_running_handle) {
+				netq->netq_running_handle = true;
+				vioif_net_sched_handle(sc, netq);
+			}
+			mutex_exit(&netq->netq_lock);
+		}
 	}
+}
 
-	if (sc->sc_has_ctrl && (features & VIRTIO_NET_F_MQ)) {
-		sc->sc_max_nvq_pairs = virtio_read_device_config_2(vsc,
-		    VIRTIO_NET_CONFIG_MAX_VQ_PAIRS);
+static int
+vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
+{
+	int s, r;
 
-		if (sc->sc_max_nvq_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
-			goto err;
+	s = splnet();
 
-		/* Limit the number of queue pairs to use */
-		sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu);
+	r = ether_ioctl(ifp, cmd, data);
+	if (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)) {
+		if (ifp->if_flags & IFF_RUNNING) {
+			r = vioif_rx_filter(ifp->if_softc);
+		} else {
+			r = 0;
+		}
 	}
 
-	vioif_alloc_queues(sc);
-	virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs);
+	splx(s);
 
-#ifdef VIOIF_MPSAFE
-	softint_flags = SOFTINT_NET | SOFTINT_MPSAFE;
-#else
-	softint_flags = SOFTINT_NET;
-#endif
+	return r;
+}
 
-	/*
-	 * Initialize network queues
-	 */
-	netq_num = sc->sc_max_nvq_pairs * 2;
-	for (i = 0; i < netq_num; i++) {
-		r = vioif_netqueue_init(sc, vsc, i, softint_flags);
-		if (r != 0)
-			goto err;
-	}
+static int
+vioif_ifflags(struct vioif_softc *sc)
+{
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	bool onoff;
+	int r;
 
-	if (sc->sc_has_ctrl) {
-		int ctrlq_idx = sc->sc_max_nvq_pairs * 2;
-		/*
-		 * Allocating a virtqueue for control channel
-		 */
-		sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx];
-		r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx,
-		    NBPG, 1, "control");
-		if (r != 0) {
-			aprint_error_dev(self, "failed to allocate "
-			    "a virtqueue for control channel, error code %d\n",
-			    r);
+	if (!sc->sc_has_ctrl) {
+		/* no ctrl vq; always promisc and allmulti */
+		ifp->if_flags |= (IFF_PROMISC | IFF_ALLMULTI);
+		return 0;
+	}
 
-			sc->sc_has_ctrl = false;
-			cv_destroy(&ctrlq->ctrlq_wait);
-			mutex_destroy(&ctrlq->ctrlq_wait_lock);
-		} else {
-			ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
-			ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
+	onoff = ifp->if_flags & IFF_ALLMULTI ? true : false;
+	r = vioif_set_allmulti(sc, onoff);
+	if (r != 0) {
+		log(LOG_WARNING,
+		    "%s: couldn't %sable ALLMULTI\n",
+		    ifp->if_xname, onoff ? "en" : "dis");
+		if (onoff == false) {
+			ifp->if_flags |= IFF_ALLMULTI;
 		}
 	}
 
-	sc->sc_ctl_softint = softint_establish(softint_flags,
-	    vioif_ctl_softint, sc);
-	if (sc->sc_ctl_softint == NULL) {
-		aprint_error_dev(self, "cannot establish ctl softint\n");
-		goto err;
+	onoff = ifp->if_flags & IFF_PROMISC ? true : false;
+	r = vioif_set_promisc(sc, onoff);
+	if (r != 0) {
+		log(LOG_WARNING,
+		    "%s: couldn't %sable PROMISC\n",
+		    ifp->if_xname, onoff ? "en" : "dis");
+		if (onoff == false) {
+			ifp->if_flags |= IFF_PROMISC;
+		}
 	}
 
-	if (vioif_alloc_mems(sc) < 0)
-		goto err;
+	return 0;
+}
 
-	if (virtio_child_attach_finish(vsc) != 0)
-		goto err;
+static int
+vioif_ifflags_cb(struct ethercom *ec)
+{
+	struct ifnet *ifp = &ec->ec_if;
+	struct vioif_softc *sc = ifp->if_softc;
 
-	if (vioif_setup_sysctl(sc) != 0) {
-		aprint_error_dev(self, "unable to create sysctl node\n");
-		/* continue */
-	}
+	return vioif_ifflags(sc);
+}
 
-	vioif_setup_stats(sc);
+static int
+vioif_setup_sysctl(struct vioif_softc *sc)
+{
+	const char *devname;
+	struct sysctllog **log;
+	const struct sysctlnode *rnode, *rxnode, *txnode;
+	int error;
+
+	log = &sc->sc_sysctllog;
+	devname = device_xname(sc->sc_dev);
+
+	error = sysctl_createv(log, 0, NULL, &rnode,
+	    0, CTLTYPE_NODE, devname,
+	    SYSCTL_DESCR("virtio-net information and settings"),
+	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
+
+	error = sysctl_createv(log, 0, &rnode, NULL,
+	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
+	    SYSCTL_DESCR("Use workqueue for packet processing"),
+	    NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
-	ifp->if_softc = sc;
-	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
-#ifdef VIOIF_MPSAFE
-	ifp->if_extflags = IFEF_MPSAFE;
-#endif
-	ifp->if_start = vioif_start;
-	if (sc->sc_req_nvq_pairs > 1)
-		ifp->if_transmit = vioif_transmit;
-	ifp->if_ioctl = vioif_ioctl;
-	ifp->if_init = vioif_init;
-	ifp->if_stop = vioif_stop;
-	ifp->if_capabilities = 0;
-	ifp->if_watchdog = vioif_watchdog;
-	txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
-	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq0->netq_vq->vq_num, IFQ_MAXLEN));
-	IFQ_SET_READY(&ifp->if_snd);
+	error = sysctl_createv(log, 0, &rnode, &rxnode,
+	    0, CTLTYPE_NODE, "rx",
+	    SYSCTL_DESCR("virtio-net information and settings for Rx"),
+	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
+	error = sysctl_createv(log, 0, &rxnode, NULL,
+	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
+	    SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
+	    NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-	if_attach(ifp);
-	if_deferred_start_init(ifp, NULL);
-	ether_ifattach(ifp, sc->sc_mac);
-	ether_set_ifflags_cb(&sc->sc_ethercom, vioif_ifflags_cb);
+	error = sysctl_createv(log, 0, &rxnode, NULL,
+	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
+	    SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
+	    NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-	return;
+	error = sysctl_createv(log, 0, &rnode, &txnode,
+	    0, CTLTYPE_NODE, "tx",
+	    SYSCTL_DESCR("virtio-net information and settings for Tx"),
+	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-err:
-	netq_num = sc->sc_max_nvq_pairs * 2;
-	for (i = 0; i < netq_num; i++) {
-		vioif_netqueue_teardown(sc, vsc, i);
-	}
+	error = sysctl_createv(log, 0, &txnode, NULL,
+	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
+	    SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
+	    NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
+	if (error)
+		goto out;
 
-	if (sc->sc_has_ctrl) {
-		cv_destroy(&ctrlq->ctrlq_wait);
-		mutex_destroy(&ctrlq->ctrlq_wait_lock);
-		virtio_free_vq(vsc, ctrlq->ctrlq_vq);
-		ctrlq->ctrlq_vq = NULL;
-	}
+	error = sysctl_createv(log, 0, &txnode, NULL,
+	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
+	    SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
+	    NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
 
-	vioif_free_queues(sc);
-	mutex_destroy(&sc->sc_lock);
-	virtio_child_attach_failed(vsc);
-	config_finalize_register(self, vioif_finalize_teardown);
+out:
+	if (error)
+		sysctl_teardown(log);
 
-	return;
+	return error;
 }
 
-static int
-vioif_finalize_teardown(device_t self)
+static void
+vioif_setup_stats(struct vioif_softc *sc)
 {
-	struct vioif_softc *sc = device_private(self);
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
+	struct vioif_rx_context *rxc;
+	size_t i, netq_num;
 
-	if (sc->sc_txrx_workqueue != NULL) {
-		vioif_workq_destroy(sc->sc_txrx_workqueue);
-		sc->sc_txrx_workqueue = NULL;
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		netq = &sc->sc_netqs[i];
+		evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC,
+		    NULL, netq->netq_evgroup, "failed to load mbuf to DMA");
+		evcnt_attach_dynamic(&netq->netq_enqueue_failed,
+		    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+		    "virtqueue enqueue failed failed");
+
+		switch (VIOIF_NETQ_DIR(i)) {
+		case VIOIF_NETQ_RX:
+			rxc = netq->netq_ctx;
+			evcnt_attach_dynamic(&rxc->rxc_mbuf_enobufs,
+			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+			    "no receive buffer");
+			break;
+		case VIOIF_NETQ_TX:
+			txc = netq->netq_ctx;
+			evcnt_attach_dynamic(&txc->txc_defrag_failed,
+			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+			    "m_defrag() failed");
+			break;
+		}
 	}
 
-	return 0;
+	evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC,
+	    NULL, device_xname(sc->sc_dev), "control command dmamap load failed");
+	evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_failed, EVCNT_TYPE_MISC,
+	    NULL, device_xname(sc->sc_dev), "control command failed");
 }
 
 /*
- * Interface functions for ifnet
+ * allocate memory
  */
 static int
-vioif_init(struct ifnet *ifp)
+vioif_dmamap_create(struct vioif_softc *sc, bus_dmamap_t *map,
+    bus_size_t size, int nsegs, const char *usage)
 {
-	struct vioif_softc *sc = ifp->if_softc;
-	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_netqueue *netq;
-	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	int r, i;
+	int r;
 
-	vioif_stop(ifp, 0);
+	r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), size,
+	    nsegs, size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, map);
 
-	r = virtio_reinit_start(vsc);
 	if (r != 0) {
-		log(LOG_ERR, "%s: reset failed\n", ifp->if_xname);
-		return EIO;
+		aprint_error_dev(sc->sc_dev, "%s dmamap creation failed, "
+		    "error code %d\n", usage, r);
 	}
 
-	virtio_negotiate_features(vsc, virtio_features(vsc));
+	return r;
+}
 
-	for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
-		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
+static void
+vioif_dmamap_destroy(struct vioif_softc *sc, bus_dmamap_t *map)
+{
 
-		mutex_enter(&netq->netq_lock);
-		vioif_populate_rx_mbufs_locked(sc, netq);
-		mutex_exit(&netq->netq_lock);
+	if (*map) {
+		bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), *map);
+		*map = NULL;
 	}
+}
 
-	virtio_reinit_end(vsc);
+static int
+vioif_dmamap_create_load(struct vioif_softc *sc, bus_dmamap_t *map,
+    void *buf, bus_size_t size, int nsegs, int rw, const char *usage)
+{
+	int r;
 
-	if (sc->sc_has_ctrl)
-		virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
+	r = vioif_dmamap_create(sc, map, size, nsegs, usage);
+	if (r != 0)
+		return 1;
 
-	r = vioif_ctrl_mq_vq_pairs_set(sc, sc->sc_req_nvq_pairs);
-	if (r == 0)
-		sc->sc_act_nvq_pairs = sc->sc_req_nvq_pairs;
-	else
-		sc->sc_act_nvq_pairs = 1;
+	r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), *map, buf,
+	    size, NULL, rw | BUS_DMA_NOWAIT);
+	if (r != 0) {
+		vioif_dmamap_destroy(sc, map);
+		aprint_error_dev(sc->sc_dev, "%s dmamap load failed. "
+		    "error code %d\n", usage, r);
+	}
 
-	SET(ifp->if_flags, IFF_RUNNING);
-	CLR(ifp->if_flags, IFF_OACTIVE);
+	return r;
+}
 
-	vioif_net_intr_enable(sc, vsc);
+static void *
+vioif_assign_mem(intptr_t *p, size_t size)
+{
+	intptr_t rv;
 
-	vioif_update_link_status(sc);
-	r = vioif_rx_filter(sc);
+	rv = *p;
+	*p += size;
 
-	return r;
+	return (void *)rv;
 }
 
-static void
-vioif_stop(struct ifnet *ifp, int disable)
+/*
+ * dma memory is used for:
+ *   netq_maps_kva:	 metadata array for received frames (READ) and
+ *			 sent frames (WRITE)
+ *   ctrlq_cmd:		 command to be sent via ctrl vq (WRITE)
+ *   ctrlq_status:	 return value for a command via ctrl vq (READ)
+ *   ctrlq_rx:		 parameter for a VIRTIO_NET_CTRL_RX class command
+ *			 (WRITE)
+ *   ctrlq_mac_tbl_uc:	 unicast MAC address filter for a VIRTIO_NET_CTRL_MAC
+ *			 class command (WRITE)
+ *   ctrlq_mac_tbl_mc:	 multicast MAC address filter for a VIRTIO_NET_CTRL_MAC
+ *			 class command (WRITE)
+ * ctrlq_* structures are allocated only one each; they are protected by
+ * ctrlq_inuse variable and ctrlq_wait condvar.
+ */
+static int
+vioif_alloc_mems(struct vioif_softc *sc)
 {
-	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
 	struct vioif_netqueue *netq;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	size_t i, act_qnum;
+	struct vioif_net_map *maps;
+	unsigned int vq_num;
+	int r, rsegs;
+	bus_size_t dmamemsize;
+	size_t qid, i, netq_num, kmemsize;
+	void *vaddr;
+	intptr_t p;
+
+	netq_num = sc->sc_max_nvq_pairs * 2;
+
+	/* allocate DMA memory */
+	dmamemsize = 0;
+
+	for (qid = 0; qid < netq_num; qid++) {
+		maps = sc->sc_netqs[qid].netq_maps;
+		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
+		dmamemsize += sizeof(*maps[0].vnm_hdr) * vq_num;
+	}
+
+	if (sc->sc_has_ctrl) {
+		dmamemsize += sizeof(struct virtio_net_ctrl_cmd);
+		dmamemsize += sizeof(struct virtio_net_ctrl_status);
+		dmamemsize += sizeof(struct virtio_net_ctrl_rx);
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
+		    + ETHER_ADDR_LEN;
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
+		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_addr);
+		dmamemsize += sizeof(struct virtio_net_ctrl_mq);
+	}
+
+	r = bus_dmamem_alloc(virtio_dmat(vsc), dmamemsize, 0, 0,
+	    &sc->sc_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
+	if (r != 0) {
+		aprint_error_dev(sc->sc_dev,
+		    "DMA memory allocation failed, size %zu, "
+		    "error code %d\n", dmamemsize, r);
+		goto err_none;
+	}
+	r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_segs[0], 1,
+	    dmamemsize, &vaddr, BUS_DMA_NOWAIT);
+	if (r != 0) {
+		aprint_error_dev(sc->sc_dev,
+		    "DMA memory map failed, error code %d\n", r);
+		goto err_dmamem_alloc;
+	}
 
-	act_qnum = sc->sc_act_nvq_pairs * 2;
+	/* assign DMA memory */
+	memset(vaddr, 0, dmamemsize);
+	sc->sc_dmamem = vaddr;
+	p = (intptr_t) vaddr;
 
-	CLR(ifp->if_flags, IFF_RUNNING);
-	for (i = 0; i < act_qnum; i++) {
-		netq = &sc->sc_netqs[i];
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		maps = netq->netq_maps;
+		vq_num = netq->netq_vq->vq_num;
 
-		mutex_enter(&netq->netq_lock);
-		netq->netq_stopping = true;
-		mutex_exit(&netq->netq_lock);
+		netq->netq_maps_kva = vioif_assign_mem(&p,
+		    sizeof(*maps[0].vnm_hdr) * vq_num);
 	}
 
-	/* disable interrupts */
-	vioif_net_intr_disable(sc, vsc);
-	if (sc->sc_has_ctrl)
-		virtio_stop_vq_intr(vsc, ctrlq->ctrlq_vq);
+	if (sc->sc_has_ctrl) {
+		ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_cmd));
+		ctrlq->ctrlq_status = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_status));
+		ctrlq->ctrlq_rx = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_rx));
+		ctrlq->ctrlq_mac_tbl_uc = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_mac_tbl_uc)
+		    + ETHER_ADDR_LEN);
+		ctrlq->ctrlq_mac_tbl_mc = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_mac_tbl_mc)
+		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES);
+		ctrlq->ctrlq_mac_addr = vioif_assign_mem(&p,
+		    sizeof(*ctrlq->ctrlq_mac_addr));
+		ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
+	}
 
-	/*
-	 * only way to stop interrupt, I/O and DMA is resetting...
-	 *
-	 * NOTE: Devices based on VirtIO draft specification can not
-	 * stop interrupt completely even if virtio_stop_vq_intr() is called.
-	 */
-	virtio_reset(vsc);
+	/* allocate kmem */
+	kmemsize = 0;
 
-	vioif_intr_barrier();
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
 
-	for (i = 0; i < act_qnum; i++) {
-		netq = &sc->sc_netqs[i];
-		vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work);
+		kmemsize += sizeof(netq->netq_maps[0]) * vq_num;
 	}
 
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
-		vioif_rx_queue_clear(sc, vsc, netq);
+	vaddr = kmem_zalloc(kmemsize, KM_SLEEP);
+	sc->sc_kmem = vaddr;
 
-		netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
-		vioif_tx_queue_clear(sc, vsc, netq);
-	}
+	/* assign allocated kmem */
+	p = (intptr_t) vaddr;
 
-	/* all packet processing is stopped */
-	for (i = 0; i < act_qnum; i++) {
-		netq = &sc->sc_netqs[i];
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
 
-		mutex_enter(&netq->netq_lock);
-		netq->netq_stopping = false;
-		mutex_exit(&netq->netq_lock);
+		netq->netq_maps = vioif_assign_mem(&p,
+		    sizeof(netq->netq_maps[0]) * vq_num);
 	}
-}
 
-static void
-vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq,
-    bool is_transmit)
-{
-	struct vioif_softc *sc = ifp->if_softc;
-	struct virtio_softc *vsc = sc->sc_virtio;
-	struct virtqueue *vq = netq->netq_vq;
-	struct vioif_tx_context *txc;
-	struct vioif_net_map *map;
-	struct mbuf *m;
-	int queued = 0;
-
-	KASSERT(mutex_owned(&netq->netq_lock));
+	/* prepare dmamaps */
+	for (qid = 0; qid < netq_num; qid++) {
+		static const struct {
+			const char	*msg_hdr;
+			const char	*msg_payload;
+			int		 dma_flag;
+			bus_size_t	 dma_size;
+			int		 dma_nsegs;
+		} dmaparams[VIOIF_NETQ_IDX] = {
+			[VIOIF_NETQ_RX] = {
+				.msg_hdr	= "rx header",
+				.msg_payload	= "rx payload",
+				.dma_flag	= BUS_DMA_READ,
+				.dma_size	= MCLBYTES - ETHER_ALIGN,
+				.dma_nsegs	= 1,
+			},
+			[VIOIF_NETQ_TX] = {
+				.msg_hdr	= "tx header",
+				.msg_payload	= "tx payload",
+				.dma_flag	= BUS_DMA_WRITE,
+				.dma_size	= ETHER_MAX_LEN,
+				.dma_nsegs	= VIRTIO_NET_TX_MAXNSEGS,
+			}
+		};
 
-	if (netq->netq_stopping ||
-	    !ISSET(ifp->if_flags, IFF_RUNNING))
-		return;
+		struct virtio_net_hdr *hdrs;
+		int dir;
 
-	txc = netq->netq_ctx;
+		dir = VIOIF_NETQ_DIR(qid);
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
+		maps = netq->netq_maps;
+		hdrs = netq->netq_maps_kva;
 
-	if (!txc->txc_link_active)
-		return;
+		for (i = 0; i < vq_num; i++) {
+			maps[i].vnm_hdr = &hdrs[i];
+	
+			r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map,
+			    maps[i].vnm_hdr, sc->sc_hdr_size, 1,
+			    dmaparams[dir].dma_flag, dmaparams[dir].msg_hdr);
+			if (r != 0)
+				goto err_reqs;
 
-	if (!is_transmit &&
-	    ISSET(ifp->if_flags, IFF_OACTIVE))
-		return;
+			r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map,
+			    dmaparams[dir].dma_size, dmaparams[dir].dma_nsegs,
+			    dmaparams[dir].msg_payload);
+			if (r != 0)
+				goto err_reqs;
+		}
+	}
 
-	for (;;) {
-		int slot, r;
-		r = virtio_enqueue_prep(vsc, vq, &slot);
-		if (r == EAGAIN)
-			break;
-		if (__predict_false(r != 0))
-			panic("enqueue_prep for tx buffers");
+	if (sc->sc_has_ctrl) {
+		/* control vq class & command */
+		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_cmd_dmamap,
+		    ctrlq->ctrlq_cmd, sizeof(*ctrlq->ctrlq_cmd), 1,
+		    BUS_DMA_WRITE, "control command");
+		if (r != 0)
+			goto err_reqs;
 
-		if (is_transmit)
-			m = pcq_get(txc->txc_intrq);
-		else
-			IFQ_DEQUEUE(&ifp->if_snd, m);
+		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_status_dmamap,
+		    ctrlq->ctrlq_status, sizeof(*ctrlq->ctrlq_status), 1,
+		    BUS_DMA_READ, "control status");
+		if (r != 0)
+			goto err_reqs;
 
-		if (m == NULL) {
-			virtio_enqueue_abort(vsc, vq, slot);
-			break;
-		}
+		/* control vq rx mode command parameter */
+		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_rx_dmamap,
+		    ctrlq->ctrlq_rx, sizeof(*ctrlq->ctrlq_rx), 1,
+		    BUS_DMA_WRITE, "rx mode control command");
+		if (r != 0)
+			goto err_reqs;
 
-		map = &netq->netq_maps[slot];
-		KASSERT(map->vnm_mbuf == NULL);
+		/* multiqueue set command */
+		r = vioif_dmamap_create_load(sc, &ctrlq->ctrlq_mq_dmamap,
+		    ctrlq->ctrlq_mq, sizeof(*ctrlq->ctrlq_mq), 1,
+		    BUS_DMA_WRITE, "multiqueue set command");
+		if (r != 0)
+			goto err_reqs;
 
-		r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_WRITE);
-		if (r != 0) {
-			/* maybe just too fragmented */
-			struct mbuf *newm;
+		/* control vq MAC filter table for unicast */
+		/* do not load now since its length is variable */
+		r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_uc_dmamap,
+		    sizeof(*ctrlq->ctrlq_mac_tbl_uc)
+		    + ETHER_ADDR_LEN, 1,
+		    "unicast MAC address filter command");
+		if (r != 0)
+			goto err_reqs;
 
-			newm = m_defrag(m, M_NOWAIT);
-			if (newm != NULL) {
-				m = newm;
-				r = vioif_net_load_mbuf(vsc, map, m,
-				    BUS_DMA_WRITE);
-			} else {
-				txc->txc_defrag_failed.ev_count++;
-				r = -1;
-			}
+		/* control vq MAC filter table for multicast */
+		r = vioif_dmamap_create(sc, &ctrlq->ctrlq_tbl_mc_dmamap,
+		    sizeof(*ctrlq->ctrlq_mac_tbl_mc)
+		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES, 1,
+		    "multicast MAC address filter command");
+		if (r != 0)
+			goto err_reqs;
 
-			if (r != 0) {
-				netq->netq_mbuf_load_failed.ev_count++;
-				m_freem(m);
-				if_statinc(ifp, if_oerrors);
-				virtio_enqueue_abort(vsc, vq, slot);
-				continue;
-			}
-		}
+		/* control vq MAC address set command */
+		r = vioif_dmamap_create_load(sc,
+		    &ctrlq->ctrlq_mac_addr_dmamap,
+		    ctrlq->ctrlq_mac_addr,
+		    sizeof(*ctrlq->ctrlq_mac_addr), 1,
+		    BUS_DMA_WRITE, "mac addr set command");
+		if (r != 0)
+			goto err_reqs;
+	}
 
-		memset(map->vnm_hdr, 0, sc->sc_hdr_size);
+	return 0;
 
-		r = vioif_net_enqueue_tx(vsc, vq, slot, map);
-		if (r != 0) {
-			netq->netq_enqueue_failed.ev_count++;
-			vioif_net_unload_mbuf(vsc, map);
-			m_freem(m);
-			/* slot already freed by vioif_net_enqueue_tx */
+err_reqs:
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_mc_dmamap);
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_tbl_uc_dmamap);
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_rx_dmamap);
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
+	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_mac_addr_dmamap);
+	for (qid = 0; qid < netq_num; qid++) {
+		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
+		maps = sc->sc_netqs[qid].netq_maps;
 
-			if_statinc(ifp, if_oerrors);
-			continue;
+		for (i = 0; i < vq_num; i++) {
+			vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map);
+			vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map);
 		}
-
-		queued++;
-		bpf_mtap(ifp, m, BPF_D_OUT);
 	}
-
-	if (queued > 0) {
-		vioif_notify(vsc, vq);
-		ifp->if_timer = 5;
+	if (sc->sc_kmem) {
+		kmem_free(sc->sc_kmem, kmemsize);
+		sc->sc_kmem = NULL;
 	}
+	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, dmamemsize);
+err_dmamem_alloc:
+	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_segs[0], 1);
+err_none:
+	return -1;
 }
 
 static void
-vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
-{
-
-	/*
-	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
-	 */
-	vioif_send_common_locked(ifp, netq, false);
-
-}
-
-static void
-vioif_start(struct ifnet *ifp)
+vioif_alloc_queues(struct vioif_softc *sc)
 {
-	struct vioif_softc *sc = ifp->if_softc;
-	struct vioif_netqueue *txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
-
-#ifdef VIOIF_MPSAFE
-	KASSERT(if_is_mpsafe(ifp));
-#endif
+	int nvq_pairs = sc->sc_max_nvq_pairs;
+	size_t nvqs, netq_num;
 
-	mutex_enter(&txq0->netq_lock);
-	vioif_start_locked(ifp, txq0);
-	mutex_exit(&txq0->netq_lock);
-}
+	KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
 
-static inline int
-vioif_select_txqueue(struct ifnet *ifp, struct mbuf *m)
-{
-	struct vioif_softc *sc = ifp->if_softc;
-	u_int cpuid = cpu_index(curcpu());
+	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
+	if (sc->sc_has_ctrl)
+		nvqs++;
 
-	return VIOIF_NETQ_TXQID(cpuid % sc->sc_act_nvq_pairs);
+	sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
+	sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * netq_num,
+	    KM_SLEEP);
 }
 
 static void
-vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
+vioif_free_queues(struct vioif_softc *sc)
 {
+	size_t nvqs, netq_num;
 
-	vioif_send_common_locked(ifp, netq, true);
+	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
+	if (sc->sc_ctrlq.ctrlq_vq)
+		nvqs++;
+
+	kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num);
+	kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
+	sc->sc_netqs = NULL;
+	sc->sc_vqs = NULL;
 }
 
+/*
+ * Network queues
+ */
 static int
-vioif_transmit(struct ifnet *ifp, struct mbuf *m)
+vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc,
+    size_t qid, u_int softint_flags)
 {
-	struct vioif_softc *sc = ifp->if_softc;
+	static const struct {
+		const char	*dirname;
+		int		 segsize;
+		int		 nsegs;
+		int 		(*intrhand)(void *);
+		void		(*sihand)(void *);
+	} params[VIOIF_NETQ_IDX] = {
+		[VIOIF_NETQ_RX] = {
+			.dirname	= "rx",
+			.segsize	= MCLBYTES,
+			.nsegs		= 2,
+			.intrhand	= vioif_rx_intr,
+			.sihand		= vioif_rx_handle,
+		},
+		[VIOIF_NETQ_TX] = {
+			.dirname	= "tx",
+			.segsize	= ETHER_MAX_LEN - ETHER_HDR_LEN,
+			.nsegs		= 2,
+			.intrhand	= vioif_tx_intr,
+			.sihand		= vioif_tx_handle,
+		}
+	};
+
+	struct virtqueue *vq;
 	struct vioif_netqueue *netq;
 	struct vioif_tx_context *txc;
-	int qid;
+	struct vioif_rx_context *rxc;
+	char qname[32];
+	int r, dir;
 
-	qid = vioif_select_txqueue(ifp, m);
+	txc = NULL;
+	rxc = NULL;
 	netq = &sc->sc_netqs[qid];
-	txc = netq->netq_ctx;
+	vq = &sc->sc_vqs[qid];
+	dir = VIOIF_NETQ_DIR(qid);
 
-	if (__predict_false(!pcq_put(txc->txc_intrq, m))) {
-		m_freem(m);
-		return ENOBUFS;
-	}
+	netq->netq_vq = &sc->sc_vqs[qid];
+	netq->netq_stopping = false;
+	netq->netq_running_handle = false;
 
-	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
-	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
-	if (m->m_flags & M_MCAST)
-		if_statinc_ref(nsr, if_omcasts);
-	IF_STAT_PUTREF(ifp);
+	snprintf(qname, sizeof(qname), "%s%zu",
+	    params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid));
+	snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
+	    "%s-%s", device_xname(sc->sc_dev), qname);
 
-	if (mutex_tryenter(&netq->netq_lock)) {
-		vioif_transmit_locked(ifp, netq);
-		mutex_exit(&netq->netq_lock);
+	mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
+	r = virtio_alloc_vq(vsc, vq, qid,
+	    params[dir].segsize + sc->sc_hdr_size,
+	    params[dir].nsegs, qname);
+	if (r != 0)
+		goto err;
+	netq->netq_vq = vq;
+
+	netq->netq_vq->vq_intrhand = params[dir].intrhand;
+	netq->netq_vq->vq_intrhand_arg = netq;
+	netq->netq_softint = softint_establish(softint_flags,
+	    params[dir].sihand, netq);
+	if (netq->netq_softint == NULL) {
+		aprint_error_dev(sc->sc_dev,
+		    "couldn't establish %s softint\n",
+		    params[dir].dirname);
+		goto err;
 	}
+	vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
 
-	return 0;
-}
+	switch (dir) {
+	case VIOIF_NETQ_RX:
+		rxc = kmem_zalloc(sizeof(*rxc), KM_SLEEP);
+		netq->netq_ctx = rxc;
+		/* nothing to do */
+		break;
+	case VIOIF_NETQ_TX:
+		txc = kmem_zalloc(sizeof(*txc), KM_SLEEP);
+		netq->netq_ctx = (void *)txc;
+		txc->txc_deferred_transmit = softint_establish(softint_flags,
+		    vioif_deferred_transmit, netq);
+		if (txc->txc_deferred_transmit == NULL) {
+			aprint_error_dev(sc->sc_dev,
+			    "couldn't establish softint for "
+			    "tx deferred transmit\n");
+			goto err;
+		}
+		txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc);
+		txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP);
+		break;
+	}
 
-static void
-vioif_deferred_transmit(void *arg)
-{
-	struct vioif_netqueue *netq = arg;
-	struct virtio_softc *vsc = netq->netq_vq->vq_owner;
-	struct vioif_softc *sc = device_private(virtio_child(vsc));
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	return 0;
 
-	mutex_enter(&netq->netq_lock);
-	vioif_send_common_locked(ifp, netq, true);
-	mutex_exit(&netq->netq_lock);
-}
+err:
+	netq->netq_ctx = NULL;
 
-static int
-vioif_ioctl(struct ifnet *ifp, u_long cmd, void *data)
-{
-	int s, r;
+	if (rxc != NULL) {
+		kmem_free(rxc, sizeof(*rxc));
+	}
 
-	s = splnet();
+	if (txc != NULL) {
+		if (txc->txc_deferred_transmit != NULL)
+			softint_disestablish(txc->txc_deferred_transmit);
+		if (txc->txc_intrq != NULL)
+			pcq_destroy(txc->txc_intrq);
+		kmem_free(txc, sizeof(txc));
+	}
 
-	r = ether_ioctl(ifp, cmd, data);
-	if (r == ENETRESET && (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI)) {
-		if (ifp->if_flags & IFF_RUNNING) {
-			r = vioif_rx_filter(ifp->if_softc);
-		} else {
-			r = 0;
-		}
+	vioif_work_set(&netq->netq_work, NULL, NULL);
+	if (netq->netq_softint != NULL) {
+		softint_disestablish(netq->netq_softint);
+		netq->netq_softint = NULL;
 	}
+	netq->netq_vq->vq_intrhand = NULL;
+	netq->netq_vq->vq_intrhand_arg = NULL;
 
-	splx(s);
+	virtio_free_vq(vsc, vq);
+	mutex_destroy(&netq->netq_lock);
+	netq->netq_vq = NULL;
 
-	return r;
+	return -1;
 }
 
-void
-vioif_watchdog(struct ifnet *ifp)
+static void
+vioif_netqueue_teardown(struct vioif_softc *sc, struct virtio_softc *vsc,
+    size_t qid)
 {
-	struct vioif_softc *sc = ifp->if_softc;
 	struct vioif_netqueue *netq;
-	int i;
+	struct vioif_rx_context *rxc;
+	struct vioif_tx_context *txc;
+	int dir;
 
-	if (ifp->if_flags & IFF_RUNNING) {
-		for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-			netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
+	netq = &sc->sc_netqs[qid];
 
-			mutex_enter(&netq->netq_lock);
-			if (!netq->netq_running_handle) {
-				netq->netq_running_handle = true;
-				vioif_net_sched_handle(sc, netq);
-			}
-			mutex_exit(&netq->netq_lock);
-		}
+	if (netq->netq_vq == NULL)
+		return;
+
+	netq = &sc->sc_netqs[qid];
+	dir = VIOIF_NETQ_DIR(qid);
+	switch (dir) {
+	case VIOIF_NETQ_RX:
+		rxc = netq->netq_ctx;
+		netq->netq_ctx = NULL;
+		kmem_free(rxc, sizeof(*rxc));
+		break;
+	case VIOIF_NETQ_TX:
+		txc = netq->netq_ctx;
+		netq->netq_ctx = NULL;
+		softint_disestablish(txc->txc_deferred_transmit);
+		pcq_destroy(txc->txc_intrq);
+		kmem_free(txc, sizeof(*txc));
+		break;
 	}
+
+	softint_disestablish(netq->netq_softint);
+	virtio_free_vq(vsc, netq->netq_vq);
+	mutex_destroy(&netq->netq_lock);
+	netq->netq_vq = NULL;
 }
 
 static void
@@ -1613,13 +1655,6 @@ vioif_net_enqueue_rx(struct virtio_softc
 	    BUS_DMASYNC_PREREAD, false);
 }
 
-static void
-vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq)
-{
-
-	virtio_enqueue_commit(vsc, vq, -1, true);
-}
-
 static struct mbuf *
 vioif_net_dequeue_commit(struct virtio_softc *vsc, struct virtqueue *vq,
    int slot, struct vioif_net_map *map, int dma_flags)
@@ -1679,13 +1714,12 @@ vioif_net_intr_disable(struct vioif_soft
 
 		virtio_stop_vq_intr(vsc, netq->netq_vq);
 	}
-
 }
 
 /*
  * Receive implementation
  */
-/* add mbufs for all the empty receive slots */
+/* enqueue mbufs to receive slots */
 static void
 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq)
 {
@@ -1750,37 +1784,6 @@ vioif_populate_rx_mbufs_locked(struct vi
 		vioif_notify(vsc, vq);
 }
 
-static void
-vioif_rx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_netqueue *netq)
-{
-	struct vioif_net_map *map;
-	struct mbuf *m;
-	unsigned int i, vq_num;
-	bool more;
-
-	mutex_enter(&netq->netq_lock);
-	vq_num = netq->netq_vq->vq_num;
-
-	for (;;) {
-		more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL);
-		if (more == false)
-			break;
-	}
-
-	for (i = 0; i < vq_num; i++) {
-		map = &netq->netq_maps[i];
-
-		m = map->vnm_mbuf;
-		if (m == NULL)
-			continue;
-
-		vioif_net_unload_mbuf(vsc, map);
-		m_freem(m);
-	}
-	mutex_exit(&netq->netq_lock);
-}
-
 /* dequeue received packets */
 static bool
 vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
@@ -1829,7 +1832,36 @@ done:
 	return more;
 }
 
-/* rx interrupt; call _dequeue above and schedule a softint */
+static void
+vioif_rx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
+    struct vioif_netqueue *netq)
+{
+	struct vioif_net_map *map;
+	struct mbuf *m;
+	unsigned int i, vq_num;
+	bool more;
+
+	mutex_enter(&netq->netq_lock);
+
+	vq_num = netq->netq_vq->vq_num;
+	for (;;) {
+		more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL);
+		if (more == false)
+			break;
+	}
+
+	for (i = 0; i < vq_num; i++) {
+		map = &netq->netq_maps[i];
+
+		m = map->vnm_mbuf;
+		if (m == NULL)
+			continue;
+
+		vioif_net_unload_mbuf(vsc, map);
+		m_freem(m);
+	}
+	mutex_exit(&netq->netq_lock);
+}
 
 static void
 vioif_rx_handle_locked(void *xnetq, u_int limit)
@@ -1854,77 +1886,267 @@ vioif_rx_handle_locked(void *xnetq, u_in
 		return;
 	}
 
-	enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
-	if (enqueued != 0) {
-		virtio_stop_vq_intr(vsc, netq->netq_vq);
-		vioif_net_sched_handle(sc, netq);
-		return;
+	enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
+	if (enqueued != 0) {
+		virtio_stop_vq_intr(vsc, netq->netq_vq);
+		vioif_net_sched_handle(sc, netq);
+		return;
+	}
+
+	netq->netq_running_handle = false;
+}
+
+static int
+vioif_rx_intr(void *arg)
+{
+	struct vioif_netqueue *netq = arg;
+	struct virtqueue *vq = netq->netq_vq;
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+	u_int limit;
+
+	mutex_enter(&netq->netq_lock);
+
+	/* handler is already running in softint/workqueue */
+	if (netq->netq_running_handle)
+		goto done;
+
+	netq->netq_running_handle = true;
+
+	limit = sc->sc_rx_intr_process_limit;
+	virtio_stop_vq_intr(vsc, vq);
+	vioif_rx_handle_locked(netq, limit);
+
+done:
+	mutex_exit(&netq->netq_lock);
+	return 1;
+}
+
+static void
+vioif_rx_handle(void *xnetq)
+{
+	struct vioif_netqueue *netq = xnetq;
+	struct virtqueue *vq = netq->netq_vq;
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+	u_int limit;
+
+	mutex_enter(&netq->netq_lock);
+
+	KASSERT(netq->netq_running_handle);
+
+	if (netq->netq_stopping) {
+		netq->netq_running_handle = false;
+		goto done;
+	}
+
+	limit = sc->sc_rx_process_limit;
+	vioif_rx_handle_locked(netq, limit);
+
+done:
+	mutex_exit(&netq->netq_lock);
+}
+
+/*
+ * Transmition implementation
+ */
+/* enqueue mbufs to send */
+static void
+vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq,
+    bool is_transmit)
+{
+	struct vioif_softc *sc = ifp->if_softc;
+	struct virtio_softc *vsc = sc->sc_virtio;
+	struct virtqueue *vq = netq->netq_vq;
+	struct vioif_tx_context *txc;
+	struct vioif_net_map *map;
+	struct mbuf *m;
+	int queued = 0;
+
+	KASSERT(mutex_owned(&netq->netq_lock));
+
+	if (netq->netq_stopping ||
+	    !ISSET(ifp->if_flags, IFF_RUNNING))
+		return;
+
+	txc = netq->netq_ctx;
+
+	if (!txc->txc_link_active)
+		return;
+
+	if (!is_transmit &&
+	    ISSET(ifp->if_flags, IFF_OACTIVE))
+		return;
+
+	for (;;) {
+		int slot, r;
+		r = virtio_enqueue_prep(vsc, vq, &slot);
+		if (r == EAGAIN)
+			break;
+		if (__predict_false(r != 0))
+			panic("enqueue_prep for tx buffers");
+
+		if (is_transmit)
+			m = pcq_get(txc->txc_intrq);
+		else
+			IFQ_DEQUEUE(&ifp->if_snd, m);
+
+		if (m == NULL) {
+			virtio_enqueue_abort(vsc, vq, slot);
+			break;
+		}
+
+		map = &netq->netq_maps[slot];
+		KASSERT(map->vnm_mbuf == NULL);
+
+		r = vioif_net_load_mbuf(vsc, map, m, BUS_DMA_WRITE);
+		if (r != 0) {
+			/* maybe just too fragmented */
+			struct mbuf *newm;
+
+			newm = m_defrag(m, M_NOWAIT);
+			if (newm != NULL) {
+				m = newm;
+				r = vioif_net_load_mbuf(vsc, map, m,
+				    BUS_DMA_WRITE);
+			} else {
+				txc->txc_defrag_failed.ev_count++;
+				r = -1;
+			}
+
+			if (r != 0) {
+				netq->netq_mbuf_load_failed.ev_count++;
+				m_freem(m);
+				if_statinc(ifp, if_oerrors);
+				virtio_enqueue_abort(vsc, vq, slot);
+				continue;
+			}
+		}
+
+		memset(map->vnm_hdr, 0, sc->sc_hdr_size);
+
+		r = vioif_net_enqueue_tx(vsc, vq, slot, map);
+		if (r != 0) {
+			netq->netq_enqueue_failed.ev_count++;
+			vioif_net_unload_mbuf(vsc, map);
+			m_freem(m);
+			/* slot already freed by vioif_net_enqueue_tx */
+
+			if_statinc(ifp, if_oerrors);
+			continue;
+		}
+
+		queued++;
+		bpf_mtap(ifp, m, BPF_D_OUT);
+	}
+
+	if (queued > 0) {
+		vioif_notify(vsc, vq);
+		ifp->if_timer = 5;
+	}
+}
+
+/* dequeue sent mbufs */
+static bool
+vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
+    struct vioif_netqueue *netq, u_int limit)
+{
+	struct virtqueue *vq = netq->netq_vq;
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	struct vioif_net_map *map;
+	struct mbuf *m;
+	int slot, len;
+	bool more = false;
+
+	KASSERT(mutex_owned(&netq->netq_lock));
+
+	if (virtio_vq_is_enqueued(vsc, vq) == false)
+		return false;
+
+	for (;;) {
+		if (limit-- == 0) {
+			more = true;
+			break;
+		}
+
+		if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
+			break;
+
+		map = &netq->netq_maps[slot];
+		KASSERT(map->vnm_mbuf != NULL);
+		m = vioif_net_dequeue_commit(vsc, vq, slot,
+		    map, BUS_DMASYNC_POSTWRITE);
+		KASSERT(m != NULL);
+
+		if_statinc(ifp, if_opackets);
+		m_freem(m);
 	}
 
-	netq->netq_running_handle = false;
+	return more;
 }
 
-static int
-vioif_rx_intr(void *arg)
+static void
+vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
+    struct vioif_netqueue *netq)
 {
-	struct vioif_netqueue *netq = arg;
-	struct virtqueue *vq = netq->netq_vq;
-	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(virtio_child(vsc));
-	u_int limit;
+	struct vioif_net_map *map;
+	struct mbuf *m;
+	unsigned int i, vq_num;
+	bool more;
 
 	mutex_enter(&netq->netq_lock);
 
-	/* handler is already running in softint/workqueue */
-	if (netq->netq_running_handle)
-		goto done;
+	vq_num = netq->netq_vq->vq_num;
+	for (;;) {
+		more = vioif_tx_deq_locked(sc, vsc, netq, vq_num);
+		if (more == false)
+			break;
+	}
 
-	netq->netq_running_handle = true;
+	for (i = 0; i < vq_num; i++) {
+		map = &netq->netq_maps[i];
 
-	limit = sc->sc_rx_intr_process_limit;
-	virtio_stop_vq_intr(vsc, vq);
-	vioif_rx_handle_locked(netq, limit);
+		m = map->vnm_mbuf;
+		if (m == NULL)
+			continue;
 
-done:
+		vioif_net_unload_mbuf(vsc, map);
+		m_freem(m);
+	}
 	mutex_exit(&netq->netq_lock);
-	return 1;
 }
 
 static void
-vioif_rx_handle(void *xnetq)
+vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
 {
-	struct vioif_netqueue *netq = xnetq;
-	struct virtqueue *vq = netq->netq_vq;
-	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(virtio_child(vsc));
-	u_int limit;
 
-	mutex_enter(&netq->netq_lock);
+	/*
+	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
+	 */
+	vioif_send_common_locked(ifp, netq, false);
 
-	KASSERT(netq->netq_running_handle);
+}
 
-	if (netq->netq_stopping) {
-		netq->netq_running_handle = false;
-		goto done;
-	}
+static void
+vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
+{
 
-	limit = sc->sc_rx_process_limit;
-	vioif_rx_handle_locked(netq, limit);
+	vioif_send_common_locked(ifp, netq, true);
+}
 
-done:
+static void
+vioif_deferred_transmit(void *arg)
+{
+	struct vioif_netqueue *netq = arg;
+	struct virtio_softc *vsc = netq->netq_vq->vq_owner;
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+
+	mutex_enter(&netq->netq_lock);
+	vioif_send_common_locked(ifp, netq, true);
 	mutex_exit(&netq->netq_lock);
 }
 
-/*
- * Transmition implementation
- */
-/* actual transmission is done in if_start */
-/* tx interrupt; dequeue and free mbufs */
-/*
- * tx interrupt is actually disabled; this should be called upon
- * tx vq full and watchdog
- */
-
 static void
 vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit)
 {
@@ -2019,75 +2241,6 @@ done:
 	mutex_exit(&netq->netq_lock);
 }
 
-static void
-vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_netqueue *netq)
-{
-	struct vioif_net_map *map;
-	struct mbuf *m;
-	unsigned int i, vq_num;
-	bool more;
-
-	mutex_enter(&netq->netq_lock);
-
-	vq_num = netq->netq_vq->vq_num;
-	for (;;) {
-		more = vioif_tx_deq_locked(sc, vsc, netq, vq_num);
-		if (more == false)
-			break;
-	}
-
-	for (i = 0; i < vq_num; i++) {
-		map = &netq->netq_maps[i];
-
-		m = map->vnm_mbuf;
-		if (m == NULL)
-			continue;
-
-		vioif_net_unload_mbuf(vsc, map);
-		m_freem(m);
-	}
-	mutex_exit(&netq->netq_lock);
-}
-
-static bool
-vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_netqueue *netq, u_int limit)
-{
-	struct virtqueue *vq = netq->netq_vq;
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
-	struct vioif_net_map *map;
-	struct mbuf *m;
-	int slot, len;
-	bool more = false;
-
-	KASSERT(mutex_owned(&netq->netq_lock));
-
-	if (virtio_vq_is_enqueued(vsc, vq) == false)
-		return false;
-
-	for (;;) {
-		if (limit-- == 0) {
-			more = true;
-			break;
-		}
-
-		if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
-			break;
-
-		map = &netq->netq_maps[slot];
-		KASSERT(map->vnm_mbuf != NULL);
-		m = vioif_net_dequeue_commit(vsc, vq, slot,
-		    map, BUS_DMASYNC_POSTWRITE);
-		KASSERT(m != NULL);
-
-		if_statinc(ifp, if_opackets);
-		m_freem(m);
-	}
-
-	return more;
-}
-
 /*
  * Control vq
  */
@@ -2227,6 +2380,31 @@ vioif_ctrl_send_command(struct vioif_sof
 	return r;
 }
 
+/* ctrl vq interrupt; wake up the command issuer */
+static int
+vioif_ctrl_intr(void *arg)
+{
+	struct vioif_ctrlqueue *ctrlq = arg;
+	struct virtqueue *vq = ctrlq->ctrlq_vq;
+	struct virtio_softc *vsc = vq->vq_owner;
+	int r, slot;
+
+	if (virtio_vq_is_enqueued(vsc, vq) == false)
+		return 0;
+
+	r = virtio_dequeue(vsc, vq, &slot, NULL);
+	if (r == ENOENT)
+		return 0;
+	virtio_dequeue_commit(vsc, vq, slot);
+
+	mutex_enter(&ctrlq->ctrlq_wait_lock);
+	ctrlq->ctrlq_inuse = DONE;
+	cv_signal(&ctrlq->ctrlq_wait);
+	mutex_exit(&ctrlq->ctrlq_wait_lock);
+
+	return 1;
+}
+
 static int
 vioif_ctrl_rx(struct vioif_softc *sc, int cmd, bool onoff)
 {
@@ -2263,46 +2441,30 @@ vioif_set_allmulti(struct vioif_softc *s
 	return vioif_ctrl_rx(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, onoff);
 }
 
-/* issue VIRTIO_NET_CTRL_MAC_TABLE_SET command and wait for completion */
 static int
-vioif_set_rx_filter(struct vioif_softc *sc)
+vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs)
 {
-	/* filter already set in ctrlq->ctrlq_mac_tbl */
-	struct virtio_softc *vsc = sc->sc_virtio;
-	struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
-	struct vioif_ctrl_cmdspec specs[2];
-	int nspecs = __arraycount(specs);
+	struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq;
+	struct vioif_ctrl_cmdspec specs[1];
 	int r;
 
-	mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc;
-	mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc;
-
 	if (!sc->sc_has_ctrl)
 		return ENOTSUP;
 
-	vioif_ctrl_acquire(sc);
-
-	specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
-	specs[0].buf = mac_tbl_uc;
-	specs[0].bufsize = sizeof(*mac_tbl_uc)
-	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_uc->nentries));
+	if (nvq_pairs <= 1)
+		return EINVAL;
 
-	specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
-	specs[1].buf = mac_tbl_mc;
-	specs[1].bufsize = sizeof(*mac_tbl_mc)
-	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_mc->nentries));
+	vioif_ctrl_acquire(sc);
 
-	r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
-	if (r != 0)
-		goto out;
+	mq->virtqueue_pairs = virtio_rw16(sc->sc_virtio, nvq_pairs);
+	specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
+	specs[0].buf = mq;
+	specs[0].bufsize = sizeof(*mq);
 
 	r = vioif_ctrl_send_command(sc,
-	    VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
-	    specs, nspecs);
-
-	vioif_ctrl_unload_cmdspec(sc, specs, nspecs);
+	    VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
+	    specs, __arraycount(specs));
 
-out:
 	vioif_ctrl_release(sc);
 
 	return r;
@@ -2350,110 +2512,53 @@ vioif_set_mac_addr(struct vioif_softc *s
 			    VIRTIO_NET_CONFIG_MAC + i, sc->sc_mac[i]);
 		}
 		r = 0;
-	}
-
-	return r;
-}
-
-static int
-vioif_ctrl_mq_vq_pairs_set(struct vioif_softc *sc, int nvq_pairs)
-{
-	struct virtio_net_ctrl_mq *mq = sc->sc_ctrlq.ctrlq_mq;
-	struct vioif_ctrl_cmdspec specs[1];
-	int r;
-
-	if (!sc->sc_has_ctrl)
-		return ENOTSUP;
-
-	if (nvq_pairs <= 1)
-		return EINVAL;
-
-	vioif_ctrl_acquire(sc);
-
-	mq->virtqueue_pairs = virtio_rw16(sc->sc_virtio, nvq_pairs);
-	specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
-	specs[0].buf = mq;
-	specs[0].bufsize = sizeof(*mq);
-
-	r = vioif_ctrl_send_command(sc,
-	    VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
-	    specs, __arraycount(specs));
-
-	vioif_ctrl_release(sc);
+	}
 
 	return r;
 }
 
-/* ctrl vq interrupt; wake up the command issuer */
 static int
-vioif_ctrl_intr(void *arg)
+vioif_set_rx_filter(struct vioif_softc *sc)
 {
-	struct vioif_ctrlqueue *ctrlq = arg;
-	struct virtqueue *vq = ctrlq->ctrlq_vq;
-	struct virtio_softc *vsc = vq->vq_owner;
-	int r, slot;
-
-	if (virtio_vq_is_enqueued(vsc, vq) == false)
-		return 0;
+	/* filter already set in ctrlq->ctrlq_mac_tbl */
+	struct virtio_softc *vsc = sc->sc_virtio;
+	struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
+	struct vioif_ctrl_cmdspec specs[2];
+	int nspecs = __arraycount(specs);
+	int r;
 
-	r = virtio_dequeue(vsc, vq, &slot, NULL);
-	if (r == ENOENT)
-		return 0;
-	virtio_dequeue_commit(vsc, vq, slot);
+	mac_tbl_uc = sc->sc_ctrlq.ctrlq_mac_tbl_uc;
+	mac_tbl_mc = sc->sc_ctrlq.ctrlq_mac_tbl_mc;
 
-	mutex_enter(&ctrlq->ctrlq_wait_lock);
-	ctrlq->ctrlq_inuse = DONE;
-	cv_signal(&ctrlq->ctrlq_wait);
-	mutex_exit(&ctrlq->ctrlq_wait_lock);
+	if (!sc->sc_has_ctrl)
+		return ENOTSUP;
 
-	return 1;
-}
+	vioif_ctrl_acquire(sc);
 
-static int
-vioif_ifflags(struct vioif_softc *sc)
-{
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
-	bool onoff;
-	int r;
+	specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
+	specs[0].buf = mac_tbl_uc;
+	specs[0].bufsize = sizeof(*mac_tbl_uc)
+	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_uc->nentries));
 
-	if (!sc->sc_has_ctrl) {
-		/* no ctrl vq; always promisc and allmulti */
-		ifp->if_flags |= (IFF_PROMISC | IFF_ALLMULTI);
-		return 0;
-	}
+	specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
+	specs[1].buf = mac_tbl_mc;
+	specs[1].bufsize = sizeof(*mac_tbl_mc)
+	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_mc->nentries));
 
-	onoff = ifp->if_flags & IFF_ALLMULTI ? true : false;
-	r = vioif_set_allmulti(sc, onoff);
-	if (r != 0) {
-		log(LOG_WARNING,
-		    "%s: couldn't %sable ALLMULTI\n",
-		    ifp->if_xname, onoff ? "en" : "dis");
-		if (onoff == false) {
-			ifp->if_flags |= IFF_ALLMULTI;
-		}
-	}
+	r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
+	if (r != 0)
+		goto out;
 
-	onoff = ifp->if_flags & IFF_PROMISC ? true : false;
-	r = vioif_set_promisc(sc, onoff);
-	if (r != 0) {
-		log(LOG_WARNING,
-		    "%s: couldn't %sable PROMISC\n",
-		    ifp->if_xname, onoff ? "en" : "dis");
-		if (onoff == false) {
-			ifp->if_flags |= IFF_PROMISC;
-		}
-	}
+	r = vioif_ctrl_send_command(sc,
+	    VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET,
+	    specs, nspecs);
 
-	return 0;
-}
+	vioif_ctrl_unload_cmdspec(sc, specs, nspecs);
 
-static int
-vioif_ifflags_cb(struct ethercom *ec)
-{
-	struct ifnet *ifp = &ec->ec_if;
-	struct vioif_softc *sc = ifp->if_softc;
+out:
+	vioif_ctrl_release(sc);
 
-	return vioif_ifflags(sc);
+	return r;
 }
 
 /*
@@ -2536,6 +2641,28 @@ set_ifflags:
 	return r;
 }
 
+/*
+ * VM configuration changes
+ */
+static int
+vioif_config_change(struct virtio_softc *vsc)
+{
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+
+	softint_schedule(sc->sc_cfg_softint);
+	return 0;
+}
+
+static void
+vioif_cfg_softint(void *arg)
+{
+	struct vioif_softc *sc = arg;
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+
+	vioif_update_link_status(sc);
+	vioif_start(ifp);
+}
+
 static int
 vioif_get_link_status(struct vioif_softc *sc)
 {
@@ -2554,7 +2681,6 @@ vioif_get_link_status(struct vioif_softc
 	return LINK_STATE_DOWN;
 }
 
-/* change link status */
 static void
 vioif_update_link_status(struct vioif_softc *sc)
 {
@@ -2589,23 +2715,15 @@ done:
 	mutex_exit(&sc->sc_lock);
 }
 
-static int
-vioif_config_change(struct virtio_softc *vsc)
-{
-	struct vioif_softc *sc = device_private(virtio_child(vsc));
-
-	softint_schedule(sc->sc_ctl_softint);
-	return 0;
-}
-
 static void
-vioif_ctl_softint(void *arg)
+vioif_workq_work(struct work *wk, void *context)
 {
-	struct vioif_softc *sc = arg;
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	struct vioif_work *work;
 
-	vioif_update_link_status(sc);
-	vioif_start(ifp);
+	work = container_of(wk, struct vioif_work, cookie);
+
+	atomic_store_relaxed(&work->added, 0);
+	work->func(work->arg);
 }
 
 static struct workqueue *
@@ -2631,17 +2749,6 @@ vioif_workq_destroy(struct workqueue *wq
 }
 
 static void
-vioif_workq_work(struct work *wk, void *context)
-{
-	struct vioif_work *work;
-
-	work = container_of(wk, struct vioif_work, cookie);
-
-	atomic_store_relaxed(&work->added, 0);
-	work->func(work->arg);
-}
-
-static void
 vioif_work_set(struct vioif_work *work, void (*func)(void *), void *arg)
 {
 
@@ -2670,125 +2777,6 @@ vioif_work_wait(struct workqueue *wq, st
 	workqueue_wait(wq, &work->cookie);
 }
 
-static int
-vioif_setup_sysctl(struct vioif_softc *sc)
-{
-	const char *devname;
-	struct sysctllog **log;
-	const struct sysctlnode *rnode, *rxnode, *txnode;
-	int error;
-
-	log = &sc->sc_sysctllog;
-	devname = device_xname(sc->sc_dev);
-
-	error = sysctl_createv(log, 0, NULL, &rnode,
-	    0, CTLTYPE_NODE, devname,
-	    SYSCTL_DESCR("virtio-net information and settings"),
-	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &rnode, NULL,
-	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
-	    SYSCTL_DESCR("Use workqueue for packet processing"),
-	    NULL, 0, &sc->sc_txrx_workqueue_sysctl, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &rnode, &rxnode,
-	    0, CTLTYPE_NODE, "rx",
-	    SYSCTL_DESCR("virtio-net information and settings for Rx"),
-	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &rxnode, NULL,
-	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
-	    SYSCTL_DESCR("max number of Rx packets to process for interrupt processing"),
-	    NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &rxnode, NULL,
-	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
-	    SYSCTL_DESCR("max number of Rx packets to process for deferred processing"),
-	    NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &rnode, &txnode,
-	    0, CTLTYPE_NODE, "tx",
-	    SYSCTL_DESCR("virtio-net information and settings for Tx"),
-	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &txnode, NULL,
-	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
-	    SYSCTL_DESCR("max number of Tx packets to process for interrupt processing"),
-	    NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
-	if (error)
-		goto out;
-
-	error = sysctl_createv(log, 0, &txnode, NULL,
-	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
-	    SYSCTL_DESCR("max number of Tx packets to process for deferred processing"),
-	    NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
-
-out:
-	if (error)
-		sysctl_teardown(log);
-
-	return error;
-}
-
-static void
-vioif_setup_stats(struct vioif_softc *sc)
-{
-	struct vioif_netqueue *netq;
-	struct vioif_tx_context *txc;
-	struct vioif_rx_context *rxc;
-	size_t i, netq_num;
-
-	netq_num = sc->sc_max_nvq_pairs * 2;
-	for (i = 0; i < netq_num; i++) {
-		netq = &sc->sc_netqs[i];
-		evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC,
-		    NULL, netq->netq_evgroup, "failed to load mbuf to DMA");
-		evcnt_attach_dynamic(&netq->netq_enqueue_failed,
-		    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
-		    "virtqueue enqueue failed failed");
-
-		switch (VIOIF_NETQ_DIR(i)) {
-		case VIOIF_NETQ_RX:
-			rxc = netq->netq_ctx;
-			evcnt_attach_dynamic(&rxc->rxc_mbuf_enobufs,
-			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
-			    "no receive buffer");
-			break;
-		case VIOIF_NETQ_TX:
-			txc = netq->netq_ctx;
-			evcnt_attach_dynamic(&txc->txc_defrag_failed,
-			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
-			    "m_defrag() failed");
-			break;
-		}
-	}
-
-	evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC,
-	    NULL, device_xname(sc->sc_dev), "control command dmamap load failed");
-	evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_failed, EVCNT_TYPE_MISC,
-	    NULL, device_xname(sc->sc_dev), "control command failed");
-}
-
-static void
-vioif_intr_barrier(void)
-{
-
-	/* wait for finish all interrupt handler */
-	xc_barrier(0);
-}
-
 MODULE(MODULE_CLASS_DRIVER, if_vioif, "virtio");
 
 #ifdef _MODULE

Reply via email to