Module Name:    src
Committed By:   jdolecek
Date:           Sat Mar 25 18:02:06 UTC 2017

Modified Files:
        src/sys/dev/pci: if_vioif.c ld_virtio.c viomb.c viornd.c vioscsi.c
            virtio.c virtiovar.h

Log Message:
reorganize the attachment process for virtio child devices, so that
more common code is shared among the drivers, and it's possible for
the drivers to be correctly dynamically loaded; forbid direct access
to struct virtio_softc from the child driver code


To generate a diff of this commit:
cvs rdiff -u -r1.31 -r1.32 src/sys/dev/pci/if_vioif.c
cvs rdiff -u -r1.14 -r1.15 src/sys/dev/pci/ld_virtio.c
cvs rdiff -u -r1.7 -r1.8 src/sys/dev/pci/viomb.c
cvs rdiff -u -r1.10 -r1.11 src/sys/dev/pci/viornd.c
cvs rdiff -u -r1.11 -r1.12 src/sys/dev/pci/vioscsi.c
cvs rdiff -u -r1.21 -r1.22 src/sys/dev/pci/virtio.c
cvs rdiff -u -r1.6 -r1.7 src/sys/dev/pci/virtiovar.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_vioif.c
diff -u src/sys/dev/pci/if_vioif.c:1.31 src/sys/dev/pci/if_vioif.c:1.32
--- src/sys/dev/pci/if_vioif.c:1.31	Tue Jan 17 01:25:21 2017
+++ src/sys/dev/pci/if_vioif.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: if_vioif.c,v 1.31 2017/01/17 01:25:21 ozaki-r Exp $	*/
+/*	$NetBSD: if_vioif.c,v 1.32 2017/03/25 18:02:06 jdolecek Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.31 2017/01/17 01:25:21 ozaki-r Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.32 2017/03/25 18:02:06 jdolecek Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -227,6 +227,8 @@ struct vioif_softc {
 	kmutex_t		*sc_tx_lock;
 	kmutex_t		*sc_rx_lock;
 	bool			sc_stopping;
+
+	bool			sc_has_ctrl;
 };
 #define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
 #define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */
@@ -281,7 +283,7 @@ CFATTACH_DECL_NEW(vioif, sizeof(struct v
 static int
 vioif_match(device_t parent, cfdata_t match, void *aux)
 {
-	struct virtio_softc *va = aux;
+	struct virtio_attach_args *va = aux;
 
 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
 		return 1;
@@ -323,12 +325,12 @@ vioif_alloc_mems(struct vioif_softc *sc)
 	intptr_t p;
 	int rxqsize, txqsize;
 
-	rxqsize = vsc->sc_vqs[VQ_RX].vq_num;
-	txqsize = vsc->sc_vqs[VQ_TX].vq_num;
+	rxqsize = sc->sc_vq[VQ_RX].vq_num;
+	txqsize = sc->sc_vq[VQ_TX].vq_num;
 
 	allocsize = sizeof(struct virtio_net_hdr) * rxqsize;
 	allocsize += sizeof(struct virtio_net_hdr) * txqsize;
-	if (vsc->sc_nvqs == 3) {
+	if (sc->sc_has_ctrl) {
 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
 		allocsize += sizeof(struct virtio_net_ctrl_status) * 1;
 		allocsize += sizeof(struct virtio_net_ctrl_rx) * 1;
@@ -336,7 +338,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 			+ sizeof(struct virtio_net_ctrl_mac_tbl)
 			+ ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
 	}
-	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
+	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
 			     &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
@@ -344,7 +346,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 				 "error code %d\n", allocsize, r);
 		goto err_none;
 	}
-	r = bus_dmamem_map(vsc->sc_dmat,
+	r = bus_dmamem_map(virtio_dmat(vsc),
 			   &sc->sc_hdr_segs[0], 1, allocsize,
 			   &vaddr, BUS_DMA_NOWAIT);
 	if (r != 0) {
@@ -360,7 +362,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 #define P(name,size)	do { sc->sc_ ##name = (void*) p;	\
 			     p += size; } while (0)
 	P(tx_hdrs, sizeof(struct virtio_net_hdr) * txqsize);
-	if (vsc->sc_nvqs == 3) {
+	if (sc->sc_has_ctrl) {
 		P(ctrl_cmd, sizeof(struct virtio_net_ctrl_cmd));
 		P(ctrl_status, sizeof(struct virtio_net_ctrl_status));
 		P(ctrl_rx, sizeof(struct virtio_net_ctrl_rx));
@@ -385,7 +387,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 
 #define C(map, buf, size, nsegs, rw, usage)				\
 	do {								\
-		r = bus_dmamap_create(vsc->sc_dmat, size, nsegs, size, 0, \
+		r = bus_dmamap_create(virtio_dmat(vsc), size, nsegs, size, 0, \
 				      BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,	\
 				      &sc->sc_ ##map);			\
 		if (r != 0) {						\
@@ -398,7 +400,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 #define C_L1(map, buf, size, nsegs, rw, usage)				\
 	C(map, buf, size, nsegs, rw, usage);				\
 	do {								\
-		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
+		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
 				    &sc->sc_ ##buf, size, NULL,		\
 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
 		if (r != 0) {						\
@@ -411,7 +413,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 #define C_L2(map, buf, size, nsegs, rw, usage)				\
 	C(map, buf, size, nsegs, rw, usage);				\
 	do {								\
-		r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ ##map,	\
+		r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ ##map,	\
 				    sc->sc_ ##buf, size, NULL,		\
 				    BUS_DMA_ ##rw | BUS_DMA_NOWAIT);	\
 		if (r != 0) {						\
@@ -436,7 +438,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 		  "tx payload");
 	}
 
-	if (vsc->sc_nvqs == 3) {
+	if (sc->sc_has_ctrl) {
 		/* control vq class & command */
 		C_L2(ctrl_cmd_dmamap, ctrl_cmd,
 		    sizeof(struct virtio_net_ctrl_cmd), 1, WRITE,
@@ -474,7 +476,7 @@ err_reqs:
 #define D(map)								\
 	do {								\
 		if (sc->sc_ ##map) {					\
-			bus_dmamap_destroy(vsc->sc_dmat, sc->sc_ ##map); \
+			bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_ ##map); \
 			sc->sc_ ##map = NULL;				\
 		}							\
 	} while (0)
@@ -497,9 +499,9 @@ err_reqs:
 		sc->sc_arrays = 0;
 	}
 err_dmamem_map:
-	bus_dmamem_unmap(vsc->sc_dmat, sc->sc_hdrs, allocsize);
+	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_hdrs, allocsize);
 err_dmamem_alloc:
-	bus_dmamem_free(vsc->sc_dmat, &sc->sc_hdr_segs[0], 1);
+	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
 err_none:
 	return -1;
 }
@@ -510,12 +512,11 @@ vioif_attach(device_t parent, device_t s
 	struct vioif_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
 	uint32_t features;
-	char buf[256];
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	u_int flags;
-	int r;
+	int r, nvqs=0, req_flags;
 
-	if (vsc->sc_child != NULL) {
+	if (virtio_child(vsc) != NULL) {
 		aprint_normal(": child already attached for %s; "
 			      "something wrong...\n",
 			      device_xname(parent));
@@ -525,27 +526,24 @@ vioif_attach(device_t parent, device_t s
 	sc->sc_dev = self;
 	sc->sc_virtio = vsc;
 
-	vsc->sc_child = self;
-	vsc->sc_ipl = IPL_NET;
-	vsc->sc_vqs = &sc->sc_vq[0];
-	vsc->sc_config_change = NULL;
-	vsc->sc_intrhand = virtio_vq_intr;
-	vsc->sc_flags = 0;
+	req_flags = 0;
 
 #ifdef VIOIF_MPSAFE
-	vsc->sc_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
+	req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
 #endif
 #ifdef VIOIF_SOFTINT_INTR
-	vsc->sc_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
+	req_flags |= VIRTIO_F_PCI_INTR_SOFTINT;
 #endif
-	vsc->sc_flags |= VIRTIO_F_PCI_INTR_MSIX;
+	req_flags |= VIRTIO_F_PCI_INTR_MSIX;
+
+	virtio_child_attach_start(vsc, self, IPL_NET, sc->sc_vq,
+	    NULL, virtio_vq_intr, req_flags,
+	    (VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
+	     VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY),
+	    VIRTIO_NET_FLAG_BITS);
+
+	features = virtio_features(vsc);
 
-	features = virtio_negotiate_features(vsc,
-					     (VIRTIO_NET_F_MAC |
-					      VIRTIO_NET_F_STATUS |
-					      VIRTIO_NET_F_CTRL_VQ |
-					      VIRTIO_NET_F_CTRL_RX |
-					      VIRTIO_F_NOTIFY_ON_EMPTY));
 	if (features & VIRTIO_NET_F_MAC) {
 		sc->sc_mac[0] = virtio_read_device_config_1(vsc,
 						    VIRTIO_NET_CONFIG_MAC+0);
@@ -585,10 +583,8 @@ vioif_attach(device_t parent, device_t s
 					     VIRTIO_NET_CONFIG_MAC+5,
 					     sc->sc_mac[5]);
 	}
-	aprint_normal(": Ethernet address %s\n", ether_sprintf(sc->sc_mac));
-	snprintb(buf, sizeof(buf), VIRTIO_NET_FLAG_BITS, features);
-	aprint_normal_dev(self, "Features: %s\n", buf);
-	aprint_naive("\n");
+
+	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->sc_mac));
 
 #ifdef VIOIF_MPSAFE
 	sc->sc_tx_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
@@ -602,22 +598,22 @@ vioif_attach(device_t parent, device_t s
 	/*
 	 * Allocating a virtqueue for Rx
 	 */
-	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_RX], 0,
+	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_RX], VQ_RX,
 	    MCLBYTES+sizeof(struct virtio_net_hdr), 2, "rx");
 	if (r != 0)
 		goto err;
-	vsc->sc_nvqs = 1;
+	nvqs = 1;
 	sc->sc_vq[VQ_RX].vq_done = vioif_rx_vq_done;
 
 	/*
 	 * Allocating a virtqueue for Tx
 	 */
-	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_TX], 1,
+	r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_TX], VQ_TX,
 	    (sizeof(struct virtio_net_hdr) + (ETHER_MAX_LEN - ETHER_HDR_LEN)),
 	    VIRTIO_NET_TX_MAXNSEGS + 1, "tx");
 	if (r != 0)
 		goto err;
-	vsc->sc_nvqs = 2;
+	nvqs = 2;
 	sc->sc_vq[VQ_TX].vq_done = vioif_tx_vq_done;
 
 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
@@ -628,7 +624,7 @@ vioif_attach(device_t parent, device_t s
 		/*
 		 * Allocating a virtqueue for control channel
 		 */
-		r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_CTRL], 2,
+		r = virtio_alloc_vq(vsc, &sc->sc_vq[VQ_CTRL], VQ_CTRL,
 		    NBPG, 1, "control");
 		if (r != 0) {
 			aprint_error_dev(self, "failed to allocate "
@@ -641,7 +637,8 @@ vioif_attach(device_t parent, device_t s
 		mutex_init(&sc->sc_ctrl_wait_lock, MUTEX_DEFAULT, IPL_NET);
 		sc->sc_ctrl_inuse = FREE;
 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
-		vsc->sc_nvqs = 3;
+		sc->sc_has_ctrl = true;
+		nvqs = 3;
 	}
 skip:
 
@@ -659,6 +656,9 @@ skip:
 	if (vioif_alloc_mems(sc) < 0)
 		goto err;
 
+	if (virtio_child_attach_finish(vsc) != 0)
+		goto err;
+
 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
 	ifp->if_softc = sc;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -683,15 +683,15 @@ err:
 	if (sc->sc_rx_lock)
 		mutex_obj_free(sc->sc_rx_lock);
 
-	if (vsc->sc_nvqs == 3) {
+	if (sc->sc_has_ctrl) {
 		cv_destroy(&sc->sc_ctrl_wait);
 		mutex_destroy(&sc->sc_ctrl_wait_lock);
 	}
 
-	while (vsc->sc_nvqs > 0)
-		virtio_free_vq(vsc, &sc->sc_vq[--vsc->sc_nvqs]);
+	while (nvqs > 0)
+		virtio_free_vq(vsc, &sc->sc_vq[--nvqs]);
 
-	vsc->sc_child = (void*)1;
+	virtio_child_attach_failed(vsc);
 	return;
 }
 
@@ -723,10 +723,8 @@ vioif_init(struct ifnet *ifp)
 	vioif_stop(ifp, 0);
 
 	if (!sc->sc_deferred_init_done) {
-		struct virtio_softc *vsc = sc->sc_virtio;
-
 		sc->sc_deferred_init_done = 1;
-		if (vsc->sc_nvqs == 3)
+		if (sc->sc_has_ctrl)
 			vioif_deferred_init(sc->sc_dev);
 	}
 
@@ -766,10 +764,10 @@ vioif_stop(struct ifnet *ifp, int disabl
 		vioif_rx_drain(sc);
 
 	virtio_reinit_start(vsc);
-	virtio_negotiate_features(vsc, vsc->sc_features);
+	virtio_negotiate_features(vsc, virtio_features(vsc));
 	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_RX]);
 	virtio_stop_vq_intr(vsc, &sc->sc_vq[VQ_TX]);
-	if (vsc->sc_nvqs >= 3)
+	if (sc->sc_has_ctrl)
 		virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_CTRL]);
 	virtio_reinit_end(vsc);
 	vioif_updown(sc, false);
@@ -812,7 +810,7 @@ retry:
 		}
 		if (r != 0)
 			panic("enqueue_prep for a tx buffer");
-		r = bus_dmamap_load_mbuf(vsc->sc_dmat,
+		r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
 					 sc->sc_tx_dmamaps[slot],
 					 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 		if (r != 0) {
@@ -824,7 +822,7 @@ retry:
 		r = virtio_enqueue_reserve(vsc, vq, slot,
 					sc->sc_tx_dmamaps[slot]->dm_nsegs + 1);
 		if (r != 0) {
-			bus_dmamap_unload(vsc->sc_dmat,
+			bus_dmamap_unload(virtio_dmat(vsc),
 					  sc->sc_tx_dmamaps[slot]);
 			ifp->if_flags |= IFF_OACTIVE;
 			vioif_tx_vq_done_locked(vq);
@@ -837,10 +835,10 @@ retry:
 		sc->sc_tx_mbufs[slot] = m;
 
 		memset(&sc->sc_tx_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
 				BUS_DMASYNC_PREWRITE);
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
 				0, sc->sc_txhdr_dmamaps[slot]->dm_mapsize,
 				BUS_DMASYNC_PREWRITE);
 		virtio_enqueue(vsc, vq, slot, sc->sc_txhdr_dmamaps[slot], true);
@@ -915,7 +913,7 @@ vioif_add_rx_mbuf(struct vioif_softc *sc
 	}
 	sc->sc_rx_mbufs[i] = m;
 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
-	r = bus_dmamap_load_mbuf(sc->sc_virtio->sc_dmat,
+	r = bus_dmamap_load_mbuf(virtio_dmat(sc->sc_virtio),
 				 sc->sc_rx_dmamaps[i],
 				 m, BUS_DMA_READ|BUS_DMA_NOWAIT);
 	if (r) {
@@ -931,7 +929,7 @@ vioif_add_rx_mbuf(struct vioif_softc *sc
 static void
 vioif_free_rx_mbuf(struct vioif_softc *sc, int i)
 {
-	bus_dmamap_unload(sc->sc_virtio->sc_dmat, sc->sc_rx_dmamaps[i]);
+	bus_dmamap_unload(virtio_dmat(sc->sc_virtio), sc->sc_rx_dmamaps[i]);
 	m_freem(sc->sc_rx_mbufs[i]);
 	sc->sc_rx_mbufs[i] = NULL;
 }
@@ -979,9 +977,9 @@ vioif_populate_rx_mbufs_locked(struct vi
 			vioif_free_rx_mbuf(sc, slot);
 			break;
 		}
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
 			0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
 			0, MCLBYTES, BUS_DMASYNC_PREREAD);
 		virtio_enqueue(vsc, vq, slot, sc->sc_rxhdr_dmamaps[slot], false);
 		virtio_enqueue(vsc, vq, slot, sc->sc_rx_dmamaps[slot], false);
@@ -1023,15 +1021,15 @@ vioif_rx_deq_locked(struct vioif_softc *
 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
 		len -= sizeof(struct virtio_net_hdr);
 		r = 1;
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rxhdr_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rxhdr_dmamaps[slot],
 				0, sizeof(struct virtio_net_hdr),
 				BUS_DMASYNC_POSTREAD);
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_rx_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot],
 				0, MCLBYTES,
 				BUS_DMASYNC_POSTREAD);
 		m = sc->sc_rx_mbufs[slot];
 		KASSERT(m != NULL);
-		bus_dmamap_unload(vsc->sc_dmat, sc->sc_rx_dmamaps[slot]);
+		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_rx_dmamaps[slot]);
 		sc->sc_rx_mbufs[slot] = 0;
 		virtio_dequeue_commit(vsc, vq, slot);
 		m_set_rcvif(m, ifp);
@@ -1053,7 +1051,7 @@ static int
 vioif_rx_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(vsc->sc_child);
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	int r = 0;
 
 #ifdef VIOIF_SOFTINT_INTR
@@ -1115,7 +1113,7 @@ static int
 vioif_tx_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(vsc->sc_child);
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	int r = 0;
 
@@ -1137,7 +1135,7 @@ static int
 vioif_tx_vq_done_locked(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(vsc->sc_child);
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	struct mbuf *m;
 	int r = 0;
@@ -1147,14 +1145,14 @@ vioif_tx_vq_done_locked(struct virtqueue
 
 	while (virtio_dequeue(vsc, vq, &slot, &len) == 0) {
 		r++;
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_txhdr_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_txhdr_dmamaps[slot],
 				0, sizeof(struct virtio_net_hdr),
 				BUS_DMASYNC_POSTWRITE);
-		bus_dmamap_sync(vsc->sc_dmat, sc->sc_tx_dmamaps[slot],
+		bus_dmamap_sync(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot],
 				0, sc->sc_tx_dmamaps[slot]->dm_mapsize,
 				BUS_DMASYNC_POSTWRITE);
 		m = sc->sc_tx_mbufs[slot];
-		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[slot]);
+		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[slot]);
 		sc->sc_tx_mbufs[slot] = 0;
 		virtio_dequeue_commit(vsc, vq, slot);
 		ifp->if_opackets++;
@@ -1179,7 +1177,7 @@ vioif_tx_drain(struct vioif_softc *sc)
 	for (i = 0; i < vq->vq_num; i++) {
 		if (sc->sc_tx_mbufs[i] == NULL)
 			continue;
-		bus_dmamap_unload(vsc->sc_dmat, sc->sc_tx_dmamaps[i]);
+		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_tx_dmamaps[i]);
 		m_freem(sc->sc_tx_mbufs[i]);
 		sc->sc_tx_mbufs[i] = NULL;
 	}
@@ -1196,7 +1194,7 @@ vioif_ctrl_rx(struct vioif_softc *sc, in
 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
 	int r, slot;
 
-	if (vsc->sc_nvqs < 3)
+	if (!sc->sc_has_ctrl)
 		return ENOTSUP;
 
 	mutex_enter(&sc->sc_ctrl_wait_lock);
@@ -1209,13 +1207,13 @@ vioif_ctrl_rx(struct vioif_softc *sc, in
 	sc->sc_ctrl_cmd->command = cmd;
 	sc->sc_ctrl_rx->onoff = onoff;
 
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
 			0, sizeof(struct virtio_net_ctrl_cmd),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap,
 			0, sizeof(struct virtio_net_ctrl_rx),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
 			0, sizeof(struct virtio_net_ctrl_status),
 			BUS_DMASYNC_PREREAD);
 
@@ -1237,13 +1235,13 @@ vioif_ctrl_rx(struct vioif_softc *sc, in
 	mutex_exit(&sc->sc_ctrl_wait_lock);
 	/* already dequeueued */
 
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
 			sizeof(struct virtio_net_ctrl_cmd),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_rx_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_rx_dmamap, 0,
 			sizeof(struct virtio_net_ctrl_rx),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
 			sizeof(struct virtio_net_ctrl_status),
 			BUS_DMASYNC_POSTREAD);
 
@@ -1292,7 +1290,7 @@ vioif_set_rx_filter(struct vioif_softc *
 	struct virtqueue *vq = &sc->sc_vq[VQ_CTRL];
 	int r, slot;
 
-	if (vsc->sc_nvqs < 3)
+	if (!sc->sc_has_ctrl)
 		return ENOTSUP;
 
 	mutex_enter(&sc->sc_ctrl_wait_lock);
@@ -1304,7 +1302,7 @@ vioif_set_rx_filter(struct vioif_softc *
 	sc->sc_ctrl_cmd->class = VIRTIO_NET_CTRL_MAC;
 	sc->sc_ctrl_cmd->command = VIRTIO_NET_CTRL_MAC_TABLE_SET;
 
-	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap,
+	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap,
 			    sc->sc_ctrl_mac_tbl_uc,
 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
@@ -1314,7 +1312,7 @@ vioif_set_rx_filter(struct vioif_softc *
 		       "error code %d\n", device_xname(sc->sc_dev), r);
 		goto out;
 	}
-	r = bus_dmamap_load(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap,
+	r = bus_dmamap_load(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap,
 			    sc->sc_ctrl_mac_tbl_mc,
 			    (sizeof(struct virtio_net_ctrl_mac_tbl)
 			  + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
@@ -1322,22 +1320,22 @@ vioif_set_rx_filter(struct vioif_softc *
 	if (r) {
 		printf("%s: control command dmamap load failed, "
 		       "error code %d\n", device_xname(sc->sc_dev), r);
-		bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
+		bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
 		goto out;
 	}
 
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap,
 			0, sizeof(struct virtio_net_ctrl_cmd),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
 			(sizeof(struct virtio_net_ctrl_mac_tbl)
 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
 			(sizeof(struct virtio_net_ctrl_mac_tbl)
 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap,
 			0, sizeof(struct virtio_net_ctrl_status),
 			BUS_DMASYNC_PREREAD);
 
@@ -1360,22 +1358,22 @@ vioif_set_rx_filter(struct vioif_softc *
 	mutex_exit(&sc->sc_ctrl_wait_lock);
 	/* already dequeueued */
 
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_cmd_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_cmd_dmamap, 0,
 			sizeof(struct virtio_net_ctrl_cmd),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap, 0,
 			(sizeof(struct virtio_net_ctrl_mac_tbl)
 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_uc->nentries),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap, 0,
 			(sizeof(struct virtio_net_ctrl_mac_tbl)
 			 + ETHER_ADDR_LEN * sc->sc_ctrl_mac_tbl_mc->nentries),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_ctrl_status_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_ctrl_status_dmamap, 0,
 			sizeof(struct virtio_net_ctrl_status),
 			BUS_DMASYNC_POSTREAD);
-	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_uc_dmamap);
-	bus_dmamap_unload(vsc->sc_dmat, sc->sc_ctrl_tbl_mc_dmamap);
+	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_uc_dmamap);
+	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_ctrl_tbl_mc_dmamap);
 
 	if (sc->sc_ctrl_status->ack == VIRTIO_NET_OK)
 		r = 0;
@@ -1399,7 +1397,7 @@ static int
 vioif_ctrl_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioif_softc *sc = device_private(vsc->sc_child);
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	int r, slot;
 
 	r = virtio_dequeue(vsc, vq, &slot, NULL);
@@ -1427,7 +1425,6 @@ vioif_ctrl_vq_done(struct virtqueue *vq)
 static int
 vioif_rx_filter(struct vioif_softc *sc)
 {
-	struct virtio_softc *vsc = sc->sc_virtio;
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	struct ether_multi *enm;
 	struct ether_multistep step;
@@ -1435,7 +1432,7 @@ vioif_rx_filter(struct vioif_softc *sc)
 	int promisc = 0, allmulti = 0, rxfilter = 0;
 	int r;
 
-	if (vsc->sc_nvqs < 3) {	/* no ctrl vq; always promisc */
+	if (!sc->sc_has_ctrl) {	/* no ctrl vq; always promisc */
 		ifp->if_flags |= IFF_PROMISC;
 		return 0;
 	}
@@ -1508,7 +1505,7 @@ vioif_updown(struct vioif_softc *sc, boo
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
 
-	if (!(vsc->sc_features & VIRTIO_NET_F_STATUS))
+	if (!(virtio_features(vsc) & VIRTIO_NET_F_STATUS))
 		return ENODEV;
 	virtio_write_device_config_1(vsc,
 				     VIRTIO_NET_CONFIG_STATUS,

Index: src/sys/dev/pci/ld_virtio.c
diff -u src/sys/dev/pci/ld_virtio.c:1.14 src/sys/dev/pci/ld_virtio.c:1.15
--- src/sys/dev/pci/ld_virtio.c:1.14	Mon Feb 27 21:32:33 2017
+++ src/sys/dev/pci/ld_virtio.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: ld_virtio.c,v 1.14 2017/02/27 21:32:33 jdolecek Exp $	*/
+/*	$NetBSD: ld_virtio.c,v 1.15 2017/03/25 18:02:06 jdolecek Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.14 2017/02/27 21:32:33 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.15 2017/03/25 18:02:06 jdolecek Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -140,7 +140,7 @@ CFATTACH_DECL_NEW(ld_virtio, sizeof(stru
 static int
 ld_virtio_match(device_t parent, cfdata_t match, void *aux)
 {
-	struct virtio_softc *va = aux;
+	struct virtio_attach_args *va = aux;
 
 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
 		return 1;
@@ -160,7 +160,7 @@ ld_virtio_alloc_reqs(struct ld_virtio_so
 	void *vaddr;
 
 	allocsize = sizeof(struct virtio_blk_req) * qsize;
-	r = bus_dmamem_alloc(sc->sc_virtio->sc_dmat, allocsize, 0, 0,
+	r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
 			     &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
@@ -168,7 +168,7 @@ ld_virtio_alloc_reqs(struct ld_virtio_so
 				 "error code %d\n", allocsize, r);
 		goto err_none;
 	}
-	r = bus_dmamem_map(sc->sc_virtio->sc_dmat,
+	r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
 			   &sc->sc_reqs_seg, 1, allocsize,
 			   &vaddr, BUS_DMA_NOWAIT);
 	if (r != 0) {
@@ -181,7 +181,7 @@ ld_virtio_alloc_reqs(struct ld_virtio_so
 	memset(vaddr, 0, allocsize);
 	for (i = 0; i < qsize; i++) {
 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
-		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
+		r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
 				      offsetof(struct virtio_blk_req, vr_bp),
 				      1,
 				      offsetof(struct virtio_blk_req, vr_bp),
@@ -194,7 +194,7 @@ ld_virtio_alloc_reqs(struct ld_virtio_so
 					 "error code %d\n", r);
 			goto err_reqs;
 		}
-		r = bus_dmamap_load(sc->sc_virtio->sc_dmat, vr->vr_cmdsts,
+		r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
 				    &vr->vr_hdr,
 				    offsetof(struct virtio_blk_req, vr_bp),
 				    NULL, BUS_DMA_NOWAIT);
@@ -204,7 +204,7 @@ ld_virtio_alloc_reqs(struct ld_virtio_so
 					 "error code %d\n", r);
 			goto err_reqs;
 		}
-		r = bus_dmamap_create(sc->sc_virtio->sc_dmat,
+		r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
 				      ld->sc_maxxfer,
 				      (ld->sc_maxxfer / NBPG) +
 				      VIRTIO_BLK_MIN_SEGMENTS,
@@ -225,19 +225,19 @@ err_reqs:
 	for (i = 0; i < qsize; i++) {
 		struct virtio_blk_req *vr = &sc->sc_reqs[i];
 		if (vr->vr_cmdsts) {
-			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
+			bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
 					   vr->vr_cmdsts);
 			vr->vr_cmdsts = 0;
 		}
 		if (vr->vr_payload) {
-			bus_dmamap_destroy(sc->sc_virtio->sc_dmat,
+			bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
 					   vr->vr_payload);
 			vr->vr_payload = 0;
 		}
 	}
-	bus_dmamem_unmap(sc->sc_virtio->sc_dmat, sc->sc_reqs, allocsize);
+	bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
 err_dmamem_alloc:
-	bus_dmamem_free(sc->sc_virtio->sc_dmat, &sc->sc_reqs_seg, 1);
+	bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
 err_none:
 	return -1;
 }
@@ -249,10 +249,9 @@ ld_virtio_attach(device_t parent, device
 	struct ld_softc *ld = &sc->sc_ld;
 	struct virtio_softc *vsc = device_private(parent);
 	uint32_t features;
-	char buf[256];
 	int qsize, maxxfersize, maxnsegs;
 
-	if (vsc->sc_child != NULL) {
+	if (virtio_child(vsc) != NULL) {
 		aprint_normal(": child already attached for %s; "
 			      "something wrong...\n", device_xname(parent));
 		return;
@@ -261,28 +260,19 @@ ld_virtio_attach(device_t parent, device
 	sc->sc_dev = self;
 	sc->sc_virtio = vsc;
 
-	vsc->sc_child = self;
-	vsc->sc_ipl = IPL_BIO;
-	vsc->sc_vqs = &sc->sc_vq;
-	vsc->sc_nvqs = 1;
-	vsc->sc_config_change = NULL;
-	vsc->sc_intrhand = virtio_vq_intr;
-	vsc->sc_flags = 0;
-
-	features = virtio_negotiate_features(vsc,
-					     (VIRTIO_BLK_F_SIZE_MAX |
-					      VIRTIO_BLK_F_SEG_MAX |
-					      VIRTIO_BLK_F_GEOMETRY |
-					      VIRTIO_BLK_F_RO |
-					      VIRTIO_BLK_F_BLK_SIZE));
+	virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq,
+	    NULL, virtio_vq_intr, 0,
+	    (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
+	     VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE),
+	    VIRTIO_BLK_FLAG_BITS);
+
+	features = virtio_features(vsc);
+
 	if (features & VIRTIO_BLK_F_RO)
 		sc->sc_readonly = 1;
 	else
 		sc->sc_readonly = 0;
 
-	snprintb(buf, sizeof(buf), VIRTIO_BLK_FLAG_BITS, features);
-	aprint_normal(": Features: %s\n", buf);
-	aprint_naive("\n");
 	if (features & VIRTIO_BLK_F_BLK_SIZE) {
 		ld->sc_secsize = virtio_read_device_config_4(vsc,
 					VIRTIO_BLK_CONFIG_BLK_SIZE);
@@ -332,6 +322,9 @@ ld_virtio_attach(device_t parent, device
 	qsize = sc->sc_vq.vq_num;
 	sc->sc_vq.vq_done = ld_virtio_vq_done;
 
+	if (virtio_child_attach_finish(vsc) != 0)
+		goto err;
+
 	ld->sc_dv = self;
 	ld->sc_secperunit = virtio_read_device_config_8(vsc,
 				VIRTIO_BLK_CONFIG_CAPACITY);
@@ -358,7 +351,7 @@ ld_virtio_attach(device_t parent, device
 	return;
 
 err:
-	vsc->sc_child = (void*)1;
+	virtio_child_attach_failed(vsc);
 	return;
 }
 
@@ -384,7 +377,7 @@ ld_virtio_start(struct ld_softc *ld, str
 	vr = &sc->sc_reqs[slot];
 	KASSERT(vr->vr_bp == NULL);
 
-	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
+	r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
 			    bp->b_data, bp->b_bcount, NULL,
 			    ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
 			     |BUS_DMA_NOWAIT));
@@ -398,7 +391,7 @@ ld_virtio_start(struct ld_softc *ld, str
 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
 	    VIRTIO_BLK_MIN_SEGMENTS);
 	if (r != 0) {
-		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
+		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
 		return r;
 	}
 
@@ -407,13 +400,13 @@ ld_virtio_start(struct ld_softc *ld, str
 	vr->vr_hdr.ioprio = 0;
 	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;
 
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
 			0, bp->b_bcount,
 			isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			offsetof(struct virtio_blk_req, vr_status),
 			sizeof(uint8_t),
 			BUS_DMASYNC_PREREAD);
@@ -440,14 +433,14 @@ ld_virtio_vq_done1(struct ld_virtio_soft
 
 	vr->vr_bp = NULL;
 
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
 			0, bp->b_bcount,
 			(bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
 					      :BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
 			BUS_DMASYNC_POSTREAD);
 
@@ -468,7 +461,7 @@ static int
 ld_virtio_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct ld_virtio_softc *sc = device_private(vsc->sc_child);
+	struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
 	int r = 0;
 	int slot;
 
@@ -505,7 +498,7 @@ ld_virtio_dump(struct ld_softc *ld, void
 		return r;
 	}
 	vr = &sc->sc_reqs[slot];
-	r = bus_dmamap_load(vsc->sc_dmat, vr->vr_payload,
+	r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
 			    data, blkcnt*ld->sc_secsize, NULL,
 			    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
 	if (r != 0)
@@ -514,7 +507,7 @@ ld_virtio_dump(struct ld_softc *ld, void
 	r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 
 	    VIRTIO_BLK_MIN_SEGMENTS);
 	if (r != 0) {
-		bus_dmamap_unload(vsc->sc_dmat, vr->vr_payload);
+		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
 		return r;
 	}
 
@@ -523,13 +516,13 @@ ld_virtio_dump(struct ld_softc *ld, void
 	vr->vr_hdr.ioprio = 0;
 	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;
 
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
 			0, blkcnt*ld->sc_secsize,
 			BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			offsetof(struct virtio_blk_req, vr_status),
 			sizeof(uint8_t),
 			BUS_DMASYNC_PREREAD);
@@ -557,13 +550,13 @@ ld_virtio_dump(struct ld_softc *ld, void
 			break;
 	}
 		
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
 			0, blkcnt*ld->sc_secsize,
 			BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			offsetof(struct virtio_blk_req, vr_status),
 			sizeof(uint8_t),
 			BUS_DMASYNC_POSTREAD);
@@ -581,7 +574,7 @@ ld_virtio_detach(device_t self, int flag
 {
 	struct ld_virtio_softc *sc = device_private(self);
 	struct ld_softc *ld = &sc->sc_ld;
-	bus_dma_tag_t dmat = sc->sc_virtio->sc_dmat;
+	bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
 	int r, i, qsize;
 
 	qsize = sc->sc_vq.vq_num;
@@ -603,6 +596,8 @@ ld_virtio_detach(device_t self, int flag
 
 	ldenddetach(ld);
 
+	virtio_child_detach(sc->sc_virtio);
+
 	return 0;
 }
 

Index: src/sys/dev/pci/viomb.c
diff -u src/sys/dev/pci/viomb.c:1.7 src/sys/dev/pci/viomb.c:1.8
--- src/sys/dev/pci/viomb.c:1.7	Tue Sep 27 03:33:32 2016
+++ src/sys/dev/pci/viomb.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: viomb.c,v 1.7 2016/09/27 03:33:32 pgoyette Exp $	*/
+/*	$NetBSD: viomb.c,v 1.8 2017/03/25 18:02:06 jdolecek Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.7 2016/09/27 03:33:32 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.8 2017/03/25 18:02:06 jdolecek Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -112,9 +112,9 @@ CFATTACH_DECL_NEW(viomb, sizeof(struct v
 static int
 viomb_match(device_t parent, cfdata_t match, void *aux)
 {
-	struct virtio_softc *vsc = aux;
+	struct virtio_attach_args *va = aux;
 
-	if (vsc->sc_childdevid == PCI_PRODUCT_VIRTIO_BALLOON)
+	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BALLOON)
 		return 1;
 
 	return 0;
@@ -126,14 +126,13 @@ viomb_attach(device_t parent, device_t s
 	struct viomb_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
 	const struct sysctlnode *node;
-	uint32_t features;
-	char buf[256];
 
-	if (vsc->sc_child != NULL) {
+	if (virtio_child(vsc) != NULL) {
 		aprint_normal(": child already attached for %s; "
 			      "something wrong...\n", device_xname(parent));
 		return;
 	}
+
 	if (balloon_initialized++) {
 		aprint_normal(": balloon already exists; something wrong...\n");
 		goto err_none;
@@ -142,19 +141,6 @@ viomb_attach(device_t parent, device_t s
 	sc->sc_dev = self;
 	sc->sc_virtio = vsc;
 
-	vsc->sc_child = self;
-	vsc->sc_ipl = IPL_VM;
-	vsc->sc_vqs = &sc->sc_vq[0];
-	vsc->sc_nvqs = 2;
-	vsc->sc_config_change = viomb_config_change;
-	vsc->sc_intrhand = virtio_vq_intr;
-	vsc->sc_flags = 0;
-
-	features = virtio_negotiate_features(vsc,
-	    VIRTIO_CONFIG_DEVICE_FEATURES);
-
-	snprintb(buf, sizeof(buf), VIRTIO_BALLOON_FLAG_BITS, features);
-	aprint_normal(": Features: %s\n", buf);
 	if ((virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
 			     sizeof(uint32_t)*PGS_PER_REQ, 1,
 			     "inflate") != 0) ||
@@ -170,13 +156,13 @@ viomb_attach(device_t parent, device_t s
 	sc->sc_inflight = 0;
 	TAILQ_INIT(&sc->sc_balloon_pages);
 
-	if (bus_dmamap_create(vsc->sc_dmat, sizeof(uint32_t)*PGS_PER_REQ,
+	if (bus_dmamap_create(virtio_dmat(vsc), sizeof(uint32_t)*PGS_PER_REQ,
 			      1, sizeof(uint32_t)*PGS_PER_REQ, 0,
 			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
 		aprint_error_dev(sc->sc_dev, "dmamap creation failed.\n");
 		goto err_vq;
 	}
-	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
+	if (bus_dmamap_load(virtio_dmat(vsc), sc->sc_req.bl_dmamap,
 			    &sc->sc_req.bl_pages[0],
 			    sizeof(uint32_t) * PGS_PER_REQ,
 			    NULL, BUS_DMA_NOWAIT)) {
@@ -188,6 +174,13 @@ viomb_attach(device_t parent, device_t s
 	mutex_init(&sc->sc_waitlock, MUTEX_DEFAULT, IPL_VM); /* spin */
 	cv_init(&sc->sc_wait, "balloon");
 
+	virtio_child_attach_start(vsc, self, IPL_VM, sc->sc_vq,
+	    viomb_config_change, virtio_vq_intr, 0,
+	    0, VIRTIO_BALLOON_FLAG_BITS);
+
+	if (virtio_child_attach_finish(vsc) != 0)
+		goto err_mutex;
+
 	if (kthread_create(PRI_IDLE, KTHREAD_MPSAFE, NULL,
 			   viomb_thread, sc, NULL, "viomb")) {
 		aprint_error_dev(sc->sc_dev, "cannot create kthread.\n");
@@ -212,12 +205,12 @@ err_mutex:
 	cv_destroy(&sc->sc_wait);
 	mutex_destroy(&sc->sc_waitlock);
 err_dmamap:
-	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
+	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_req.bl_dmamap);
 err_vq:
 	virtio_free_vq(vsc, &sc->sc_vq[1]);
 	virtio_free_vq(vsc, &sc->sc_vq[0]);
 err_none:
-	vsc->sc_child = (void*)1;
+	virtio_child_attach_failed(vsc);
 	return;
 }
 
@@ -242,7 +235,7 @@ viomb_read_config(struct viomb_softc *sc
 static int
 viomb_config_change(struct virtio_softc *vsc)
 {
-	struct viomb_softc *sc = device_private(vsc->sc_child);
+	struct viomb_softc *sc = device_private(virtio_child(vsc));
 	unsigned int old;
 
 	old = sc->sc_npages;
@@ -308,7 +301,7 @@ inflate(struct viomb_softc *sc)
 		uvm_pglistfree(&b->bl_pglist);
 		return 0;
 	}
-	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), b->bl_dmamap, 0,
 	    sizeof(uint32_t)*nvpages, BUS_DMASYNC_PREWRITE);
 	virtio_enqueue(vsc, vq, slot, b->bl_dmamap, true);
 	virtio_enqueue_commit(vsc, vq, slot, true);
@@ -321,7 +314,7 @@ static int
 inflateq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct viomb_softc *sc = device_private(vsc->sc_child);
+	struct viomb_softc *sc = device_private(virtio_child(vsc));
 
 	mutex_enter(&sc->sc_waitlock);
 	sc->sc_inflate_done = 1;
@@ -351,7 +344,7 @@ inflate_done(struct viomb_softc *sc)
 
 	b = &sc->sc_req;
 	nvpages = b->bl_nentries;
-	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), b->bl_dmamap,
 			offsetof(struct balloon_req, bl_pages),
 			sizeof(uint32_t)*nvpages,
 			BUS_DMASYNC_POSTWRITE);
@@ -421,13 +414,13 @@ deflate(struct viomb_softc *sc)
 		}
 		return 0;
 	}
-	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
+	bus_dmamap_sync(virtio_dmat(vsc), b->bl_dmamap, 0,
 	    sizeof(uint32_t)*nvpages, BUS_DMASYNC_PREWRITE);
 	virtio_enqueue(vsc, vq, slot, b->bl_dmamap, true);
 	virtio_enqueue_commit(vsc, vq, slot, true);
 	sc->sc_inflight -= nvpages;
 
-	if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST))
+	if (!(virtio_features(vsc) & VIRTIO_BALLOON_F_MUST_TELL_HOST))
 		uvm_pglistfree(&b->bl_pglist);
 
 	return 0;
@@ -437,7 +430,7 @@ static int
 deflateq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct viomb_softc *sc = device_private(vsc->sc_child);
+	struct viomb_softc *sc = device_private(virtio_child(vsc));
 
 	mutex_enter(&sc->sc_waitlock);
 	sc->sc_deflate_done = 1;
@@ -466,12 +459,12 @@ deflate_done(struct viomb_softc *sc)
 
 	b = &sc->sc_req;
 	nvpages = b->bl_nentries;
-	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap,
+	bus_dmamap_sync(virtio_dmat(vsc), b->bl_dmamap,
 			offsetof(struct balloon_req, bl_pages),
 			sizeof(uint32_t)*nvpages,
 			BUS_DMASYNC_POSTWRITE);
 
-	if (vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)
+	if (virtio_features(vsc) & VIRTIO_BALLOON_F_MUST_TELL_HOST)
 		uvm_pglistfree(&b->bl_pglist);
 
 	sc->sc_inflight += nvpages;

Index: src/sys/dev/pci/viornd.c
diff -u src/sys/dev/pci/viornd.c:1.10 src/sys/dev/pci/viornd.c:1.11
--- src/sys/dev/pci/viornd.c:1.10	Wed Nov 30 01:36:38 2016
+++ src/sys/dev/pci/viornd.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/* 	$NetBSD: viornd.c,v 1.10 2016/11/30 01:36:38 christos Exp $ */
+/* 	$NetBSD: viornd.c,v 1.11 2017/03/25 18:02:06 jdolecek Exp $ */
 /*	$OpenBSD: viornd.c,v 1.1 2014/01/21 21:14:58 sf Exp $	*/
 
 /*
@@ -101,7 +101,7 @@ viornd_get(size_t bytes, void *priv)
 		goto out;
 	}
 
-        bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE,
+        bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap, 0, VIORND_BUFSIZE,
             BUS_DMASYNC_PREREAD);
 	if (virtio_enqueue_prep(vsc, vq, &slot)) {
 		goto out;
@@ -119,43 +119,32 @@ out:
 int
 viornd_match(device_t parent, cfdata_t match, void *aux)
 {
-	struct virtio_softc *va = aux;
+	struct virtio_attach_args *va = aux;
+
 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_ENTROPY)
 		return 1;
+
 	return 0;
 }
 
 void
-viornd_attach( device_t parent, device_t self, void *aux)
+viornd_attach(device_t parent, device_t self, void *aux)
 {
 	struct viornd_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
 	bus_dma_segment_t segs[1];
 	int nsegs;
 	int error;
-	uint32_t features;
-	char buf[256];
 
-	vsc->sc_vqs = &sc->sc_vq;
-	vsc->sc_nvqs = 1;
-	vsc->sc_config_change = NULL;
-	if (vsc->sc_child != NULL)
+	if (virtio_child(vsc) != NULL)
 		panic("already attached to something else");
-	vsc->sc_child = self;
-	vsc->sc_ipl = IPL_NET;
-	vsc->sc_intrhand = virtio_vq_intr;
-	sc->sc_virtio = vsc;
-	sc->sc_dev = self;
-
-	features = virtio_negotiate_features(vsc, 0);
-	snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
-	aprint_normal(": Features: %s\n", buf);
-	aprint_naive("\n");
 
+	sc->sc_dev = self;
+	sc->sc_virtio = vsc;
 
 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM);
 
-	error = bus_dmamem_alloc(vsc->sc_dmat, 
+	error = bus_dmamem_alloc(virtio_dmat(vsc), 
 				 VIRTIO_PAGE_SIZE, 0, 0, segs, 1, &nsegs,
 				 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW);
 	if (error) {
@@ -164,14 +153,14 @@ viornd_attach( device_t parent, device_t
 		goto alloc_failed;
 	}
 
-	error = bus_dmamem_map(vsc->sc_dmat, segs, nsegs, VIORND_BUFSIZE,
+	error = bus_dmamem_map(virtio_dmat(vsc), segs, nsegs, VIORND_BUFSIZE,
 			       &sc->sc_buf, BUS_DMA_NOWAIT);
 	if (error) {
 		aprint_error_dev(sc->sc_dev, "can't map dmamem: %d\n", error);
 		goto map_failed;
 	}
 
-	error = bus_dmamap_create(vsc->sc_dmat, VIORND_BUFSIZE, 1,
+	error = bus_dmamap_create(virtio_dmat(vsc), VIORND_BUFSIZE, 1,
 				  VIORND_BUFSIZE, 0,
 				  BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
 				  &sc->sc_dmamap);
@@ -181,7 +170,7 @@ viornd_attach( device_t parent, device_t
 		goto create_failed;
 	}
 
-	error = bus_dmamap_load(vsc->sc_dmat, sc->sc_dmamap,
+	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dmamap,
 	    			sc->sc_buf, VIORND_BUFSIZE, NULL,
 				BUS_DMA_NOWAIT|BUS_DMA_READ);
 	if (error) {
@@ -190,6 +179,10 @@ viornd_attach( device_t parent, device_t
 		goto load_failed;
 	}
 
+	virtio_child_attach_start(vsc, self, IPL_NET, &sc->sc_vq,
+	    NULL, virtio_vq_intr, 0,
+	    0, VIRTIO_COMMON_FLAG_BITS);
+
 	error = virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1,
 	    "Entropy request");
 	if (error) {
@@ -197,25 +190,31 @@ viornd_attach( device_t parent, device_t
 				 error);
 		goto vio_failed;
 	}
-
 	sc->sc_vq.vq_done = viornd_vq_done;
-	virtio_start_vq_intr(vsc, &sc->sc_vq);
+
+	if (virtio_child_attach_finish(vsc) != 0) {
+		virtio_free_vq(vsc, &sc->sc_vq);
+		goto vio_failed;
+	}
+
 	rndsource_setcb(&sc->sc_rndsource, viornd_get, sc);
 	rnd_attach_source(&sc->sc_rndsource, device_xname(sc->sc_dev),
 			  RND_TYPE_RNG,
 			  RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB);
 	viornd_get(VIORND_BUFSIZE, sc);
+
 	return;
+
 vio_failed:
-	bus_dmamap_unload(vsc->sc_dmat, sc->sc_dmamap);
+	bus_dmamap_unload(virtio_dmat(vsc), sc->sc_dmamap);
 load_failed:
-	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_dmamap);
+	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dmamap);
 create_failed:
-	bus_dmamem_unmap(vsc->sc_dmat, sc->sc_buf, VIORND_BUFSIZE);
+	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_buf, VIORND_BUFSIZE);
 map_failed:
-	bus_dmamem_free(vsc->sc_dmat, segs, nsegs);
+	bus_dmamem_free(virtio_dmat(vsc), segs, nsegs);
 alloc_failed:
-	vsc->sc_child = (void *)1;	/* XXX bare constant 1 */
+	virtio_child_attach_failed(vsc);
 	return;
 }
 
@@ -223,7 +222,7 @@ int
 viornd_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct viornd_softc *sc = device_private(vsc->sc_child);
+	struct viornd_softc *sc = device_private(virtio_child(vsc));
 	int slot, len;
 
 	mutex_enter(&sc->sc_mutex);
@@ -235,7 +234,7 @@ viornd_vq_done(struct virtqueue *vq)
 
 	sc->sc_active = false;
 
-	bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE,
+	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dmamap, 0, VIORND_BUFSIZE,
 	    BUS_DMASYNC_POSTREAD);
 	if (len > VIORND_BUFSIZE) {
 		aprint_error_dev(sc->sc_dev,

Index: src/sys/dev/pci/vioscsi.c
diff -u src/sys/dev/pci/vioscsi.c:1.11 src/sys/dev/pci/vioscsi.c:1.12
--- src/sys/dev/pci/vioscsi.c:1.11	Mon Mar 13 21:06:50 2017
+++ src/sys/dev/pci/vioscsi.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: vioscsi.c,v 1.11 2017/03/13 21:06:50 jdolecek Exp $	*/
+/*	$NetBSD: vioscsi.c,v 1.12 2017/03/25 18:02:06 jdolecek Exp $	*/
 /*	$OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $	*/
 
 /*
@@ -18,7 +18,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.11 2017/03/13 21:06:50 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.12 2017/03/25 18:02:06 jdolecek Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -79,6 +79,8 @@ static void	 vioscsi_attach(device_t, de
 
 static int	 vioscsi_alloc_reqs(struct vioscsi_softc *,
     struct virtio_softc *, int, uint32_t);
+static void	 vioscsi_free_reqs(struct vioscsi_softc *,
+    struct virtio_softc *);
 static void	 vioscsi_scsipi_request(struct scsipi_channel *,
     scsipi_adapter_req_t, void *);
 static int	 vioscsi_vq_done(struct virtqueue *);
@@ -98,10 +100,11 @@ CFATTACH_DECL_NEW(vioscsi, sizeof(struct
 static int
 vioscsi_match(device_t parent, cfdata_t match, void *aux)
 {
-	struct virtio_softc *va = aux;
+	struct virtio_attach_args *va = aux;
 
 	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
 		return 1;
+
 	return 0;
 }
 
@@ -112,11 +115,10 @@ vioscsi_attach(device_t parent, device_t
 	struct virtio_softc *vsc = device_private(parent);
 	struct scsipi_adapter *adapt = &sc->sc_adapter;
 	struct scsipi_channel *chan = &sc->sc_channel;
-	uint32_t features;
-	char buf[256];
-	int rv;
+	int rv, qsize = 0, i = 0;
+	int ipl = IPL_BIO;
 
-	if (vsc->sc_child != NULL) {
+	if (virtio_child(vsc) != NULL) {
 		aprint_error(": parent %s already has a child\n",
 		    device_xname(parent));
 		return;
@@ -124,20 +126,9 @@ vioscsi_attach(device_t parent, device_t
 
 	sc->sc_dev = self;
 
-	vsc->sc_child = self;
-	vsc->sc_ipl = IPL_BIO;
-	vsc->sc_vqs = sc->sc_vqs;
-	vsc->sc_nvqs = __arraycount(sc->sc_vqs);
-	vsc->sc_config_change = NULL;
-	vsc->sc_intrhand = virtio_vq_intr;
-	vsc->sc_flags = 0;
-
-	vsc->sc_flags |= VIRTIO_F_PCI_INTR_MSIX;
-
-	features = virtio_negotiate_features(vsc, 0);
-	snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
-	aprint_normal(": Features: %s\n", buf);
-	aprint_naive("\n");
+	virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
+    	    NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
+	    0, VIRTIO_COMMON_FLAG_BITS);
 
 	uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
 	    VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
@@ -156,7 +147,7 @@ vioscsi_attach(device_t parent, device_t
 
 	sc->sc_seg_max = seg_max;
 
-	for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
+	for (i = 0; i < __arraycount(sc->sc_vqs); i++) {
 		rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
 		    1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
 		if (rv) {
@@ -169,12 +160,17 @@ vioscsi_attach(device_t parent, device_t
 			sc->sc_vqs[i].vq_done = vioscsi_vq_done;
 	}
 
-	int qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
-	aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
+	qsize = sc->sc_vqs[VIOSCSI_VQ_REQUEST].vq_num;
 	if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
 		return;
 
-	virtio_start_vq_intr(vsc, &sc->sc_vqs[VIOSCSI_VQ_REQUEST]);
+	aprint_normal_dev(sc->sc_dev,
+	    "cmd_per_lun %zu qsize %zu seg_max %zu max_target %zu"
+	    " max_lun %zu\n",
+	    cmd_per_lun, qsize, seg_max, max_target, max_lun);
+
+	if (virtio_child_attach_finish(vsc) != 0)
+		goto err;
 
 	/*
 	 * Fill in the scsipi_adapter.
@@ -194,12 +190,25 @@ vioscsi_attach(device_t parent, device_t
 	chan->chan_adapter = adapt;
 	chan->chan_bustype = &scsi_bustype;
 	chan->chan_channel = 0;
-	chan->chan_ntargets = max_target + 1;
-	chan->chan_nluns = max_lun + 1;
+	chan->chan_ntargets = MIN(max_target + 1, 8);
+	chan->chan_nluns = MIN(max_lun + 1, 8);
 	chan->chan_id = 0;
 	chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
 
-	config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
+	config_found(self, &sc->sc_channel, scsiprint);
+	return;
+
+err:
+	if (qsize > 0)
+		vioscsi_free_reqs(sc, vsc);
+
+	for (i=0; i < __arraycount(sc->sc_vqs); i++) {
+		if (sc->sc_vqs[i].vq_num > 0)
+			virtio_free_vq(vsc, &sc->sc_vqs[i]);
+	}
+
+	virtio_child_attach_failed(vsc);
+
 }
 
 #define XS2DMA(xs) \
@@ -314,7 +323,7 @@ vioscsi_scsipi_request(struct scsipi_cha
 	memset(req->cdb, 0, sizeof(req->cdb));
 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
 
-	error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
+	error = bus_dmamap_load(virtio_dmat(vsc), vr->vr_data,
 	    xs->data, xs->datalen, NULL, XS2DMA(xs));
 	switch (error) {
 	case 0:
@@ -341,21 +350,21 @@ nomore:
 	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
 	if (error) {
 		DPRINTF(("%s: error reserving %d\n", __func__, error));
-		bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
+		bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
 		xs->error = XS_RESOURCE_SHORTAGE;
 		goto nomore;
 	}
 
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
 	    offsetof(struct vioscsi_req, vr_req),
 	    sizeof(struct virtio_scsi_req_hdr),
 	    BUS_DMASYNC_PREWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
 	    offsetof(struct vioscsi_req, vr_res),
             sizeof(struct virtio_scsi_res_hdr),
 	    BUS_DMASYNC_PREREAD);
 	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
-		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
+		bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
 		    XS2DMAPRE(xs));
 
 	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
@@ -377,7 +386,7 @@ nomore:
 	// XXX: do this better.
 	int timeout = 1000;
 	do {
-		(*vsc->sc_intrhand)(vsc);
+		virtio_intrhand(vsc);
 		if (vr->vr_xs != xs)
 			break;
 		delay(1000);
@@ -403,15 +412,15 @@ vioscsi_req_done(struct vioscsi_softc *s
 
 	DPRINTF(("%s: enter\n", __func__));
 
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
 	    offsetof(struct vioscsi_req, vr_req),
 	    sizeof(struct virtio_scsi_req_hdr),
 	    BUS_DMASYNC_POSTWRITE);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_control,
 	    offsetof(struct vioscsi_req, vr_res),
 	    sizeof(struct virtio_scsi_res_hdr),
 	    BUS_DMASYNC_POSTREAD);
-	bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
+	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
 	    XS2DMAPOST(xs));
 
 	xs->status = vr->vr_res.status;
@@ -442,7 +451,7 @@ vioscsi_req_done(struct vioscsi_softc *s
 	DPRINTF(("%s: done %d, %d, %d\n", __func__,
 	    xs->error, xs->status, xs->resid));
 
-	bus_dmamap_unload(vsc->sc_dmat, vr->vr_data);
+	bus_dmamap_unload(virtio_dmat(vsc), vr->vr_data);
 	vr->vr_xs = NULL;
 
 	scsipi_done(xs);
@@ -452,7 +461,7 @@ static int
 vioscsi_vq_done(struct virtqueue *vq)
 {
 	struct virtio_softc *vsc = vq->vq_owner;
-	struct vioscsi_softc *sc = device_private(vsc->sc_child);
+	struct vioscsi_softc *sc = device_private(virtio_child(vsc));
 	int ret = 0;
 
 	DPRINTF(("%s: enter\n", __func__));
@@ -507,7 +516,7 @@ vioscsi_alloc_reqs(struct vioscsi_softc 
 	struct vioscsi_req *vr;
 
 	allocsize = qsize * sizeof(struct vioscsi_req);
-	r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
+	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
 	    &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
@@ -515,12 +524,12 @@ vioscsi_alloc_reqs(struct vioscsi_softc 
 		    allocsize, r);
 		return r;
 	}
-	r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
+	r = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1,
 	    allocsize, &vaddr, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
 		    "%s: bus_dmamem_map failed, error %d\n", __func__, r);
-		bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
+		bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
 		return r;
 	}
 	memset(vaddr, 0, allocsize);
@@ -532,7 +541,7 @@ vioscsi_alloc_reqs(struct vioscsi_softc 
 	for (slot=0; slot < qsize; slot++) {
 		vr = &sc->sc_reqs[slot];
 
-		r = bus_dmamap_create(vsc->sc_dmat,
+		r = bus_dmamap_create(virtio_dmat(vsc),
 		    offsetof(struct vioscsi_req, vr_xs), 1,
 		    offsetof(struct vioscsi_req, vr_xs), 0,
 		    BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
@@ -543,7 +552,7 @@ vioscsi_alloc_reqs(struct vioscsi_softc 
 			goto cleanup;
 		}
 
-		r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
+		r = bus_dmamap_create(virtio_dmat(vsc), MAXPHYS, sc->sc_seg_max,
 		    MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
 		if (r != 0) {
 			aprint_error_dev(sc->sc_dev,
@@ -552,7 +561,7 @@ vioscsi_alloc_reqs(struct vioscsi_softc 
 			goto cleanup;
 		}
 
-		r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
+		r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_control,
 		    vr, offsetof(struct vioscsi_req, vr_xs), NULL,
 		    BUS_DMA_NOWAIT);
 		if (r != 0) {
@@ -571,18 +580,43 @@ cleanup:
 
 		if (vr->vr_control) {
 			/* this will also unload the mapping if loaded */
-			bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
+			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
 			vr->vr_control = NULL;
 		}
 
 		if (vr->vr_data) {
-			bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
+			bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
 			vr->vr_data = NULL;
 		}
 	}
 
-	bus_dmamem_unmap(vsc->sc_dmat, vaddr, allocsize);
-	bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
+	bus_dmamem_unmap(virtio_dmat(vsc), vaddr, allocsize);
+	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
 
 	return r;
 }
+
+static void
+vioscsi_free_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc)
+{
+	int slot;
+	struct vioscsi_req *vr;
+
+	if (sc->sc_nreqs == 0) {
+		/* Not allocated */
+		return;
+	}
+
+	/* Free request maps */ 
+	for (slot=0; slot < sc->sc_nreqs; slot++) {
+		vr = &sc->sc_reqs[slot];
+
+		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_control);
+		bus_dmamap_destroy(virtio_dmat(vsc), vr->vr_data);
+	}
+
+	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_reqs,
+			 sc->sc_nreqs * sizeof(struct vioscsi_req));
+	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_reqs_segs[0], 1);
+}
+

Index: src/sys/dev/pci/virtio.c
diff -u src/sys/dev/pci/virtio.c:1.21 src/sys/dev/pci/virtio.c:1.22
--- src/sys/dev/pci/virtio.c:1.21	Sat Mar 25 17:50:51 2017
+++ src/sys/dev/pci/virtio.c	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: virtio.c,v 1.21 2017/03/25 17:50:51 jdolecek Exp $	*/
+/*	$NetBSD: virtio.c,v 1.22 2017/03/25 18:02:06 jdolecek Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.21 2017/03/25 17:50:51 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.22 2017/03/25 18:02:06 jdolecek Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -41,6 +41,8 @@ __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
 
+#define VIRTIO_PRIVATE
+
 #include <dev/pci/virtioreg.h>
 #include <dev/pci/virtiovar.h>
 
@@ -59,6 +61,7 @@ static int	virtio_setup_msix_interrupts(
 static int	virtio_setup_intx_interrupt(struct virtio_softc *,
 		    struct pci_attach_args *);
 static int	virtio_setup_interrupts(struct virtio_softc *);
+static void	virtio_free_interrupts(struct virtio_softc *);
 static void	virtio_soft_intr(void *arg);
 static void	virtio_init_vq(struct virtio_softc *,
 		    struct virtqueue *, const bool);
@@ -349,10 +352,50 @@ virtio_setup_interrupts(struct virtio_so
 		sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
 	}
 
+	KASSERT(sc->sc_soft_ih == NULL);
+	if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
+		u_int flags = SOFTINT_NET;
+		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+			flags |= SOFTINT_MPSAFE;
+
+		sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
+		if (sc->sc_soft_ih == NULL) {
+			virtio_free_interrupts(sc);
+			aprint_error_dev(sc->sc_dev,
+			    "failed to establish soft interrupt\n");
+			return -1;
+		}
+	}
+
 	return 0;
 }
 
 static void
+virtio_free_interrupts(struct virtio_softc *sc)
+{
+	for (int i = 0; i < sc->sc_ihs_num; i++) {
+		if (sc->sc_ihs[i] == NULL)
+			continue;
+		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
+		sc->sc_ihs[i] = NULL;
+	}
+
+	if (sc->sc_ihs_num > 0)
+		pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
+
+	if (sc->sc_soft_ih) {
+		softint_disestablish(sc->sc_soft_ih);
+		sc->sc_soft_ih = NULL;
+	}
+
+	if (sc->sc_ihs != NULL) {
+		kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
+		sc->sc_ihs = NULL;
+	}
+	sc->sc_ihs_num = 0;
+}
+
+static void
 virtio_attach(device_t parent, device_t self, void *aux)
 {
 	struct virtio_softc *sc = device_private(self);
@@ -398,7 +441,6 @@ virtio_attach(device_t parent, device_t 
 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
 
-	/* XXX: use softc as aux... */
 	sc->sc_childdevid = PCI_SUBSYS_ID(id);
 	sc->sc_child = NULL;
 	sc->sc_pa = *pa;
@@ -411,43 +453,34 @@ static int
 virtio_rescan(device_t self, const char *attr, const int *scan_flags)
 {
 	struct virtio_softc *sc;
-	int r;
+	struct virtio_attach_args va;
 
 	sc = device_private(self);
 	if (sc->sc_child)	/* Child already attached? */
 		return 0;
-	config_found_ia(self, attr, sc, NULL);
+
+	memset(&va, 0, sizeof(va));
+	va.sc_childdevid = sc->sc_childdevid;
+
+	config_found_ia(self, attr, &va, NULL);
+
 	if (sc->sc_child == NULL) {
 		aprint_error_dev(self,
 				 "no matching child driver; not configured\n");
 		return 0;
 	}
-	if (sc->sc_child == (void*)1) { /* this shows error */
+	
+	if (sc->sc_child == VIRTIO_CHILD_FAILED) {
 		aprint_error_dev(self,
 				 "virtio configuration failed\n");
-		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
 		return 0;
 	}
 
-	r = virtio_setup_interrupts(sc);
-	if (r != 0) {
-		aprint_error_dev(self, "failed to setup interrupts\n");
-		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
-		return 0;
-	}
-
-	sc->sc_soft_ih = NULL;
-	if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
-		u_int flags = SOFTINT_NET;
-		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
-			flags |= SOFTINT_MPSAFE;
-
-		sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
-		if (sc->sc_soft_ih == NULL)
-			aprint_error(": failed to establish soft interrupt\n");
-	}
-
-	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
+	/*
+	 * Make sure child drivers initialize interrupts via call
+	 * to virtio_child_attach_finish().
+	 */
+	KASSERT(sc->sc_ihs_num != 0);
 
 	return 0;
 }
@@ -457,24 +490,18 @@ virtio_detach(device_t self, int flags)
 {
 	struct virtio_softc *sc = device_private(self);
 	int r;
-	int i;
 
-	if (sc->sc_child != 0 && sc->sc_child != (void*)1) {
+	if (sc->sc_child != NULL) {
 		r = config_detach(sc->sc_child, flags);
 		if (r)
 			return r;
 	}
-	KASSERT(sc->sc_child == 0 || sc->sc_child == (void*)1);
-	KASSERT(sc->sc_vqs == 0);
-	for (i = 0; i < sc->sc_ihs_num; i++) {
-		if (sc->sc_ihs[i] == NULL)
-			continue;
-		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
-	}
-	pci_intr_release(sc->sc_pc, sc->sc_ihp, sc->sc_ihs_num);
-	if (sc->sc_ihs != NULL)
-		kmem_free(sc->sc_ihs, sizeof(*sc->sc_ihs) * sc->sc_ihs_num);
-	sc->sc_ihs_num = 0;
+
+	/* Check that child detached properly */
+	KASSERT(sc->sc_child == NULL);
+	KASSERT(sc->sc_vqs == NULL);
+	KASSERT(sc->sc_ihs_num == 0);
+
 	if (sc->sc_iosize)
 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_iosize);
 	sc->sc_iosize = 0;
@@ -860,6 +887,9 @@ virtio_alloc_vq(struct virtio_softc *sc,
 #define VIRTQUEUE_ALIGN(n)	(((n)+(VIRTIO_PAGE_SIZE-1))&	\
 				 ~(VIRTIO_PAGE_SIZE-1))
 
+	/* Make sure callers allocate vqs in order */
+	KASSERT(sc->sc_nvqs == index);
+
 	memset(vq, 0, sizeof(*vq));
 
 	nbo_bus_space_write_2(sc->sc_iot, sc->sc_ioh,
@@ -961,6 +991,9 @@ virtio_alloc_vq(struct virtio_softc *sc,
 				   "using %d byte (%d entries) "
 				   "indirect descriptors\n",
 				   allocsize3, maxnsegs * vq_size);
+
+	sc->sc_nvqs++;
+
 	return 0;
 
 err:
@@ -1010,6 +1043,8 @@ virtio_free_vq(struct virtio_softc *sc, 
 	mutex_destroy(&vq->vq_aring_lock);
 	memset(vq, 0, sizeof(*vq));
 
+	sc->sc_nvqs--;
+
 	return 0;
 }
 
@@ -1337,6 +1372,94 @@ virtio_dequeue_commit(struct virtio_soft
 	return 0;
 }
 
+/*
+ * Attach a child, fill all the members.
+ */
+void
+virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, 
+		    struct virtqueue *vqs,
+		    virtio_callback config_change,
+		    virtio_callback intr_hand,
+		    int req_flags, int req_features, const char *feat_bits)
+{
+	char buf[256];
+	int features;
+
+	sc->sc_child = child;
+	sc->sc_ipl = ipl;
+	sc->sc_vqs = vqs;
+	sc->sc_config_change = config_change;
+	sc->sc_intrhand = intr_hand;
+	sc->sc_flags = req_flags;
+
+	features = virtio_negotiate_features(sc, req_features);
+	snprintb(buf, sizeof(buf), feat_bits, features);
+	aprint_normal(": Features: %s\n", buf);
+	aprint_naive("\n");
+}
+
+int
+virtio_child_attach_finish(struct virtio_softc *sc)
+{
+	int r;
+
+	r = virtio_setup_interrupts(sc);
+	if (r != 0) {
+		aprint_error_dev(sc->sc_dev, "failed to setup interrupts\n");
+		virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
+		return 1;
+	}
+
+	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);
+
+	return 0;
+}
+
+void
+virtio_child_detach(struct virtio_softc *sc)
+{
+	sc->sc_child = NULL;
+	sc->sc_vqs = NULL;
+
+	virtio_device_reset(sc);
+
+	virtio_free_interrupts(sc);
+}
+
+void
+virtio_child_attach_failed(struct virtio_softc *sc)
+{
+	virtio_child_detach(sc);
+
+	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
+
+	sc->sc_child = VIRTIO_CHILD_FAILED;
+}
+
+bus_dma_tag_t
+virtio_dmat(struct virtio_softc *sc)
+{
+	return sc->sc_dmat;
+}
+
+device_t
+virtio_child(struct virtio_softc *sc)
+{
+	return sc->sc_child;
+}
+
+int
+virtio_intrhand(struct virtio_softc *sc)
+{
+	return (sc->sc_intrhand)(sc);
+}
+
+uint32_t
+virtio_features(struct virtio_softc *sc)
+{
+	return sc->sc_features;
+}
+
 MODULE(MODULE_CLASS_DRIVER, virtio, "pci");
  
 #ifdef _MODULE

Index: src/sys/dev/pci/virtiovar.h
diff -u src/sys/dev/pci/virtiovar.h:1.6 src/sys/dev/pci/virtiovar.h:1.7
--- src/sys/dev/pci/virtiovar.h:1.6	Tue Sep 27 03:33:32 2016
+++ src/sys/dev/pci/virtiovar.h	Sat Mar 25 18:02:06 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: virtiovar.h,v 1.6 2016/09/27 03:33:32 pgoyette Exp $	*/
+/*	$NetBSD: virtiovar.h,v 1.7 2017/03/25 18:02:06 jdolecek Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -118,6 +118,13 @@ struct virtqueue {
 	int			(*vq_done)(struct virtqueue*);
 };
 
+struct virtio_attach_args {
+	int			sc_childdevid;
+};
+
+typedef int (*virtio_callback)(struct virtio_softc*);
+
+#ifdef VIRTIO_PRIVATE
 struct virtio_softc {
 	device_t		sc_dev;
 	pci_chipset_tag_t	sc_pc;
@@ -144,18 +151,21 @@ struct virtio_softc {
 	struct virtqueue	*sc_vqs; /* set by child */
 
 	int			sc_childdevid;
-	device_t		sc_child; /* set by child */
-	int			(*sc_config_change)(struct virtio_softc*);
-					 /* set by child */
-	int			(*sc_intrhand)(struct virtio_softc*);
-					 /* set by child */
+	device_t		sc_child; 		/* set by child */
+	virtio_callback		sc_config_change; 	/* set by child */
+	virtio_callback		sc_intrhand;		/* set by child */
 	struct pci_attach_args	sc_pa;	/* need for rescan to set interrupts */
 };
+#else
+struct virtio_softc;
+#endif
 
 #define VIRTIO_F_PCI_INTR_MPSAFE	(1 << 0)
 #define VIRTIO_F_PCI_INTR_SOFTINT	(1 << 1)
 #define VIRTIO_F_PCI_INTR_MSIX		(1 << 2)
 
+#define	VIRTIO_CHILD_FAILED		((void *)1)
+
 /* public interface */
 uint32_t virtio_negotiate_features(struct virtio_softc*, uint32_t);
 
@@ -174,6 +184,13 @@ int virtio_free_vq(struct virtio_softc*,
 void virtio_reset(struct virtio_softc *);
 void virtio_reinit_start(struct virtio_softc *);
 void virtio_reinit_end(struct virtio_softc *);
+void virtio_child_attach_start(struct virtio_softc *, device_t, int,
+                    struct virtqueue *,
+                    virtio_callback, virtio_callback, int,
+		    int, const char *);
+int virtio_child_attach_finish(struct virtio_softc *);
+void virtio_child_attach_failed(struct virtio_softc *);
+void virtio_child_detach(struct virtio_softc *);
 
 int virtio_enqueue_prep(struct virtio_softc*, struct virtqueue*, int*);
 int virtio_enqueue_reserve(struct virtio_softc*, struct virtqueue*, int, int);
@@ -191,4 +208,10 @@ int virtio_vq_intr(struct virtio_softc *
 void virtio_stop_vq_intr(struct virtio_softc *, struct virtqueue *);
 void virtio_start_vq_intr(struct virtio_softc *, struct virtqueue *);
 
+/* encapsulation */
+bus_dma_tag_t	virtio_dmat(struct virtio_softc *);
+device_t	virtio_child(struct virtio_softc *);
+int		virtio_intrhand(struct virtio_softc *);
+uint32_t	virtio_features(struct virtio_softc *);
+
 #endif /* _DEV_PCI_VIRTIOVAR_H_ */

Reply via email to