Author: gibbs
Date: Fri May 31 21:05:07 2013
New Revision: 251195
URL: http://svnweb.freebsd.org/changeset/base/251195

Log:
  Style cleanups.  No intended functional changes.
  
   o This driver is the "xbd" driver, not the "blkfront", "blkif", "xbf", or
     "xb" driver.  Use the "xbd_" naming conventions for all functions,
     structures, and constants.
   o The prevailing convention for structure fields in this driver is to
     prefix them with an abreviation of the structure type.  Update
     "recently added" fields to match this style.
   o Remove unused data structures.
   o Remove superfluous casts.
   o Make a pass over the whole driver and bring it closer to
     style(9) conformance.
  
  Sponsored by: Spectra Logic Corporation
  MFC after:    1 week

Modified:
  head/sys/dev/xen/blkfront/blkfront.c
  head/sys/dev/xen/blkfront/block.h

Modified: head/sys/dev/xen/blkfront/blkfront.c
==============================================================================
--- head/sys/dev/xen/blkfront/blkfront.c        Fri May 31 20:46:08 2013        
(r251194)
+++ head/sys/dev/xen/blkfront/blkfront.c        Fri May 31 21:05:07 2013        
(r251195)
@@ -1,6 +1,7 @@
 /*
  * XenBSD block device driver
  *
+ * Copyright (c) 2010-2013 Spectra Logic Corporation
  * Copyright (c) 2009 Scott Long, Yahoo!
  * Copyright (c) 2009 Frank Suchomel, Citrix
  * Copyright (c) 2009 Doug F. Rabson, Citrix
@@ -70,17 +71,17 @@ __FBSDID("$FreeBSD$");
 #include "xenbus_if.h"
 
 /* prototypes */
-static void xb_free_command(struct xb_command *cm);
-static void xb_startio(struct xb_softc *sc);
-static void blkfront_connect(struct xb_softc *);
-static void blkfront_closing(device_t);
-static int blkfront_detach(device_t);
-static int setup_blkring(struct xb_softc *);
-static void blkif_int(void *);
-static void blkfront_initialize(struct xb_softc *);
-static int blkif_completion(struct xb_command *);
-static void blkif_free(struct xb_softc *);
-static void blkif_queue_cb(void *, bus_dma_segment_t *, int, int);
+static void xbd_free_command(struct xbd_command *cm);
+static void xbd_startio(struct xbd_softc *sc);
+static void xbd_connect(struct xbd_softc *);
+static void xbd_closing(device_t);
+static int xbd_detach(device_t);
+static int xbd_setup_ring(struct xbd_softc *);
+static void xbd_int(void *);
+static void xbd_initialize(struct xbd_softc *);
+static int xbd_completion(struct xbd_command *);
+static void xbd_free(struct xbd_softc *);
+static void xbd_queue_cb(void *, bus_dma_segment_t *, int, int);
 
 static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data");
 
@@ -93,18 +94,18 @@ static MALLOC_DEFINE(M_XENBLOCKFRONT, "x
 static void vbd_update(void);
 #endif
 
-#define BLKIF_STATE_DISCONNECTED 0
-#define BLKIF_STATE_CONNECTED    1
-#define BLKIF_STATE_SUSPENDED    2
+#define XBD_STATE_DISCONNECTED 0
+#define XBD_STATE_CONNECTED    1
+#define XBD_STATE_SUSPENDED    2
 
 #ifdef notyet
-static char *blkif_state_name[] = {
-       [BLKIF_STATE_DISCONNECTED] = "disconnected",
-       [BLKIF_STATE_CONNECTED]    = "connected",
-       [BLKIF_STATE_SUSPENDED]    = "closed",
+static char *xbd_state_name[] = {
+       [XBD_STATE_DISCONNECTED] = "disconnected",
+       [XBD_STATE_CONNECTED]    = "connected",
+       [XBD_STATE_SUSPENDED]    = "closed",
 };
 
-static char * blkif_status_name[] = {
+static char * xbd_status_name[] = {
        [BLKIF_INTERFACE_STATUS_CLOSED]       = "closed",
        [BLKIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
        [BLKIF_INTERFACE_STATUS_CONNECTED]    = "connected",
@@ -118,17 +119,17 @@ static char * blkif_status_name[] = {
 #define DPRINTK(fmt, args...) 
 #endif
 
-static int blkif_open(struct disk *dp);
-static int blkif_close(struct disk *dp);
-static int blkif_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, 
struct thread *td);
-static int blkif_queue_request(struct xb_softc *sc, struct xb_command *cm);
-static void xb_strategy(struct bio *bp);
+static int xbd_open(struct disk *dp);
+static int xbd_close(struct disk *dp);
+static int xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct 
thread *td);
+static int xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm);
+static void xbd_strategy(struct bio *bp);
 
 // In order to quiesce the device during kernel dumps, outstanding requests to
 // DOM0 for disk reads/writes need to be accounted for.
-static int     xb_dump(void *, void *, vm_offset_t, off_t, size_t);
+static int     xbd_dump(void *, void *, vm_offset_t, off_t, size_t);
 
-/* XXX move to xb_vbd.c when VBD update support is added */
+/* XXX move to xbd_vbd.c when VBD update support is added */
 #define MAX_VBDS 64
 
 #define XBD_SECTOR_SIZE                512     /* XXX: assume for now */
@@ -140,7 +141,7 @@ static      int     xb_dump(void *, void *, vm_of
  * with blkfront as the emulated drives, easing transition slightly.
  */
 static void
-blkfront_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
+xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name)
 {
        static struct vdev_info {
                int major;
@@ -203,34 +204,34 @@ blkfront_vdevice_to_unit(uint32_t vdevic
 }
 
 int
-xlvbd_add(struct xb_softc *sc, blkif_sector_t sectors,
+xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
     int vdevice, uint16_t vdisk_info, unsigned long sector_size)
 {
-       int     unit, error = 0;
+       int unit, error = 0;
        const char *name;
 
-       blkfront_vdevice_to_unit(vdevice, &unit, &name);
+       xbd_vdevice_to_unit(vdevice, &unit, &name);
 
-       sc->xb_unit = unit;
+       sc->xbd_unit = unit;
 
        if (strcmp(name, "xbd"))
-               device_printf(sc->xb_dev, "attaching as %s%d\n", name, unit);
+               device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit);
 
-       sc->xb_disk = disk_alloc();
-       sc->xb_disk->d_unit = sc->xb_unit;
-       sc->xb_disk->d_open = blkif_open;
-       sc->xb_disk->d_close = blkif_close;
-       sc->xb_disk->d_ioctl = blkif_ioctl;
-       sc->xb_disk->d_strategy = xb_strategy;
-       sc->xb_disk->d_dump = xb_dump;
-       sc->xb_disk->d_name = name;
-       sc->xb_disk->d_drv1 = sc;
-       sc->xb_disk->d_sectorsize = sector_size;
-
-       sc->xb_disk->d_mediasize = sectors * sector_size;
-       sc->xb_disk->d_maxsize = sc->max_request_size;
-       sc->xb_disk->d_flags = 0;
-       disk_create(sc->xb_disk, DISK_VERSION);
+       sc->xbd_disk = disk_alloc();
+       sc->xbd_disk->d_unit = sc->xbd_unit;
+       sc->xbd_disk->d_open = xbd_open;
+       sc->xbd_disk->d_close = xbd_close;
+       sc->xbd_disk->d_ioctl = xbd_ioctl;
+       sc->xbd_disk->d_strategy = xbd_strategy;
+       sc->xbd_disk->d_dump = xbd_dump;
+       sc->xbd_disk->d_name = name;
+       sc->xbd_disk->d_drv1 = sc;
+       sc->xbd_disk->d_sectorsize = sector_size;
+
+       sc->xbd_disk->d_mediasize = sectors * sector_size;
+       sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
+       sc->xbd_disk->d_flags = 0;
+       disk_create(sc->xbd_disk, DISK_VERSION);
 
        return error;
 }
@@ -242,9 +243,9 @@ xlvbd_add(struct xb_softc *sc, blkif_sec
  * the sortq and kick the controller.
  */
 static void
-xb_strategy(struct bio *bp)
+xbd_strategy(struct bio *bp)
 {
-       struct xb_softc *sc = (struct xb_softc *)bp->bio_disk->d_drv1;
+       struct xbd_softc *sc = bp->bio_disk->d_drv1;
 
        /* bogus disk? */
        if (sc == NULL) {
@@ -258,25 +259,25 @@ xb_strategy(struct bio *bp)
        /*
         * Place it in the queue of disk activities for this disk
         */
-       mtx_lock(&sc->xb_io_lock);
+       mtx_lock(&sc->xbd_io_lock);
 
-       xb_enqueue_bio(sc, bp);
-       xb_startio(sc);
+       xbd_enqueue_bio(sc, bp);
+       xbd_startio(sc);
 
-       mtx_unlock(&sc->xb_io_lock);
+       mtx_unlock(&sc->xbd_io_lock);
        return;
 }
 
 static void
-xb_bio_complete(struct xb_softc *sc, struct xb_command *cm)
+xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
 {
        struct bio *bp;
 
-       bp = cm->bp;
+       bp = cm->cm_bp;
 
-       if ( unlikely(cm->status != BLKIF_RSP_OKAY) ) {
+       if (unlikely(cm->cm_status != BLKIF_RSP_OKAY)) {
                disk_err(bp, "disk error" , -1, 0);
-               printf(" status: %x\n", cm->status);
+               printf(" status: %x\n", cm->cm_status);
                bp->bio_flags |= BIO_ERROR;
        }
 
@@ -285,24 +286,24 @@ xb_bio_complete(struct xb_softc *sc, str
        else
                bp->bio_resid = 0;
 
-       xb_free_command(cm);
+       xbd_free_command(cm);
        biodone(bp);
 }
 
 // Quiesce the disk writes for a dump file before allowing the next buffer.
 static void
-xb_quiesce(struct xb_softc *sc)
+xbd_quiesce(struct xbd_softc *sc)
 {
-       int             mtd;
+       int mtd;
 
        // While there are outstanding requests
-       while (!TAILQ_EMPTY(&sc->cm_busy)) {
-               RING_FINAL_CHECK_FOR_RESPONSES(&sc->ring, mtd);
+       while (!TAILQ_EMPTY(&sc->xbd_cm_busy)) {
+               RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd);
                if (mtd) {
                        /* Recieved request completions, update queue. */
-                       blkif_int(sc);
+                       xbd_int(sc);
                }
-               if (!TAILQ_EMPTY(&sc->cm_busy)) {
+               if (!TAILQ_EMPTY(&sc->xbd_cm_busy)) {
                        /*
                         * Still pending requests, wait for the disk i/o
                         * to complete.
@@ -314,60 +315,60 @@ xb_quiesce(struct xb_softc *sc)
 
 /* Kernel dump function for a paravirtualized disk device */
 static void
-xb_dump_complete(struct xb_command *cm)
+xbd_dump_complete(struct xbd_command *cm)
 {
 
-       xb_enqueue_complete(cm);
+       xbd_enqueue_complete(cm);
 }
 
 static int
-xb_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
-        size_t length)
+xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
+    size_t length)
 {
-       struct  disk    *dp = arg;
-       struct xb_softc *sc = (struct xb_softc *) dp->d_drv1;
-       struct xb_command *cm;
-       size_t          chunk;
-       int             sbp;
-       int             rc = 0;
+       struct disk *dp = arg;
+       struct xbd_softc *sc = dp->d_drv1;
+       struct xbd_command *cm;
+       size_t chunk;
+       int sbp;
+       int rc = 0;
 
        if (length <= 0)
                return (rc);
 
-       xb_quiesce(sc); /* All quiet on the western front. */
+       xbd_quiesce(sc);        /* All quiet on the western front. */
 
        /*
         * If this lock is held, then this module is failing, and a
         * successful kernel dump is highly unlikely anyway.
         */
-       mtx_lock(&sc->xb_io_lock);
+       mtx_lock(&sc->xbd_io_lock);
 
        /* Split the 64KB block as needed */
        for (sbp=0; length > 0; sbp++) {
-               cm = xb_dequeue_free(sc);
+               cm = xbd_dequeue_free(sc);
                if (cm == NULL) {
-                       mtx_unlock(&sc->xb_io_lock);
-                       device_printf(sc->xb_dev, "dump: no more commands?\n");
+                       mtx_unlock(&sc->xbd_io_lock);
+                       device_printf(sc->xbd_dev, "dump: no more commands?\n");
                        return (EBUSY);
                }
 
-               if (gnttab_alloc_grant_references(sc->max_request_segments,
-                                                 &cm->gref_head) != 0) {
-                       xb_free_command(cm);
-                       mtx_unlock(&sc->xb_io_lock);
-                       device_printf(sc->xb_dev, "no more grant allocs?\n");
+               if (gnttab_alloc_grant_references(sc->xbd_max_request_segments,
+                   &cm->cm_gref_head) != 0) {
+                       xbd_free_command(cm);
+                       mtx_unlock(&sc->xbd_io_lock);
+                       device_printf(sc->xbd_dev, "no more grant allocs?\n");
                        return (EBUSY);
                }
 
-               chunk = length > sc->max_request_size
-                     ? sc->max_request_size : length;
-               cm->data = virtual;
-               cm->datalen = chunk;
-               cm->operation = BLKIF_OP_WRITE;
-               cm->sector_number = offset / dp->d_sectorsize;
-               cm->cm_complete = xb_dump_complete;
+               chunk = length > sc->xbd_max_request_size ?
+                   sc->xbd_max_request_size : length;
+               cm->cm_data = virtual;
+               cm->cm_datalen = chunk;
+               cm->cm_operation = BLKIF_OP_WRITE;
+               cm->cm_sector_number = offset / dp->d_sectorsize;
+               cm->cm_complete = xbd_dump_complete;
 
-               xb_enqueue_ready(cm);
+               xbd_enqueue_ready(cm);
 
                length -= chunk;
                offset += chunk;
@@ -375,21 +376,21 @@ xb_dump(void *arg, void *virtual, vm_off
        }
 
        /* Tell DOM0 to do the I/O */
-       xb_startio(sc);
-       mtx_unlock(&sc->xb_io_lock);
+       xbd_startio(sc);
+       mtx_unlock(&sc->xbd_io_lock);
 
        /* Poll for the completion. */
-       xb_quiesce(sc); /* All quite on the eastern front */
+       xbd_quiesce(sc);        /* All quite on the eastern front */
 
        /* If there were any errors, bail out... */
-       while ((cm = xb_dequeue_complete(sc)) != NULL) {
-               if (cm->status != BLKIF_RSP_OKAY) {
-                       device_printf(sc->xb_dev,
+       while ((cm = xbd_dequeue_complete(sc)) != NULL) {
+               if (cm->cm_status != BLKIF_RSP_OKAY) {
+                       device_printf(sc->xbd_dev,
                            "Dump I/O failed at sector %jd\n",
-                           cm->sector_number);
+                           cm->cm_sector_number);
                        rc = EIO;
                }
-               xb_free_command(cm);
+               xbd_free_command(cm);
        }
 
        return (rc);
@@ -397,7 +398,7 @@ xb_dump(void *arg, void *virtual, vm_off
 
 
 static int
-blkfront_probe(device_t dev)
+xbd_probe(device_t dev)
 {
 
        if (!strcmp(xenbus_get_type(dev), "vbd")) {
@@ -410,37 +411,35 @@ blkfront_probe(device_t dev)
 }
 
 static void
-xb_setup_sysctl(struct xb_softc *xb)
+xbd_setup_sysctl(struct xbd_softc *xbd)
 {
        struct sysctl_ctx_list *sysctl_ctx = NULL;
-       struct sysctl_oid      *sysctl_tree = NULL;
+       struct sysctl_oid *sysctl_tree = NULL;
        
-       sysctl_ctx = device_get_sysctl_ctx(xb->xb_dev);
+       sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev);
        if (sysctl_ctx == NULL)
                return;
 
-       sysctl_tree = device_get_sysctl_tree(xb->xb_dev);
+       sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev);
        if (sysctl_tree == NULL)
                return;
 
        SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
-                       "max_requests", CTLFLAG_RD, &xb->max_requests, -1,
-                       "maximum outstanding requests (negotiated)");
+           "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1,
+           "maximum outstanding requests (negotiated)");
 
        SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
-                       "max_request_segments", CTLFLAG_RD,
-                       &xb->max_request_segments, 0,
-                       "maximum number of pages per requests (negotiated)");
+           "max_request_segments", CTLFLAG_RD,
+           &xbd->xbd_max_request_segments, 0,
+           "maximum number of pages per requests (negotiated)");
 
        SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
-                       "max_request_size", CTLFLAG_RD,
-                       &xb->max_request_size, 0,
-                       "maximum size in bytes of a request (negotiated)");
+           "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0,
+           "maximum size in bytes of a request (negotiated)");
 
        SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
-                       "ring_pages", CTLFLAG_RD,
-                       &xb->ring_pages, 0,
-                       "communication channel pages (negotiated)");
+           "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0,
+           "communication channel pages (negotiated)");
 }
 
 /*
@@ -449,9 +448,9 @@ xb_setup_sysctl(struct xb_softc *xb)
  * ok.
  */
 static int
-blkfront_attach(device_t dev)
+xbd_attach(device_t dev)
 {
-       struct xb_softc *sc;
+       struct xbd_softc *sc;
        const char *name;
        uint32_t vdevice;
        int error;
@@ -467,25 +466,25 @@ blkfront_attach(device_t dev)
                return (error);
        }
 
-       blkfront_vdevice_to_unit(vdevice, &unit, &name);
+       xbd_vdevice_to_unit(vdevice, &unit, &name);
        if (!strcmp(name, "xbd"))
                device_set_unit(dev, unit);
 
        sc = device_get_softc(dev);
-       mtx_init(&sc->xb_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
-       xb_initq_free(sc);
-       xb_initq_busy(sc);
-       xb_initq_ready(sc);
-       xb_initq_complete(sc);
-       xb_initq_bio(sc);
-       for (i = 0; i < XBF_MAX_RING_PAGES; i++)
-               sc->ring_ref[i] = GRANT_INVALID_REF;
-
-       sc->xb_dev = dev;
-       sc->vdevice = vdevice;
-       sc->connected = BLKIF_STATE_DISCONNECTED;
+       mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF);
+       xbd_initq_free(sc);
+       xbd_initq_busy(sc);
+       xbd_initq_ready(sc);
+       xbd_initq_complete(sc);
+       xbd_initq_bio(sc);
+       for (i = 0; i < XBD_MAX_RING_PAGES; i++)
+               sc->xbd_ring_ref[i] = GRANT_INVALID_REF;
+
+       sc->xbd_dev = dev;
+       sc->xbd_vdevice = vdevice;
+       sc->xbd_connected = XBD_STATE_DISCONNECTED;
 
-       xb_setup_sysctl(sc);
+       xbd_setup_sysctl(sc);
 
        /* Wait for backend device to publish its protocol capabilities. */
        xenbus_set_state(dev, XenbusStateInitialising);
@@ -494,48 +493,48 @@ blkfront_attach(device_t dev)
 }
 
 static int
-blkfront_suspend(device_t dev)
+xbd_suspend(device_t dev)
 {
-       struct xb_softc *sc = device_get_softc(dev);
+       struct xbd_softc *sc = device_get_softc(dev);
        int retval;
        int saved_state;
 
        /* Prevent new requests being issued until we fix things up. */
-       mtx_lock(&sc->xb_io_lock);
-       saved_state = sc->connected;
-       sc->connected = BLKIF_STATE_SUSPENDED;
+       mtx_lock(&sc->xbd_io_lock);
+       saved_state = sc->xbd_connected;
+       sc->xbd_connected = XBD_STATE_SUSPENDED;
 
        /* Wait for outstanding I/O to drain. */
        retval = 0;
-       while (TAILQ_EMPTY(&sc->cm_busy) == 0) {
-               if (msleep(&sc->cm_busy, &sc->xb_io_lock,
-                          PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
+       while (TAILQ_EMPTY(&sc->xbd_cm_busy) == 0) {
+               if (msleep(&sc->xbd_cm_busy, &sc->xbd_io_lock,
+                   PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) {
                        retval = EBUSY;
                        break;
                }
        }
-       mtx_unlock(&sc->xb_io_lock);
+       mtx_unlock(&sc->xbd_io_lock);
 
        if (retval != 0)
-               sc->connected = saved_state;
+               sc->xbd_connected = saved_state;
 
        return (retval);
 }
 
 static int
-blkfront_resume(device_t dev)
+xbd_resume(device_t dev)
 {
-       struct xb_softc *sc = device_get_softc(dev);
+       struct xbd_softc *sc = device_get_softc(dev);
 
-       DPRINTK("blkfront_resume: %s\n", xenbus_get_node(dev));
+       DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev));
 
-       blkif_free(sc);
-       blkfront_initialize(sc);
+       xbd_free(sc);
+       xbd_initialize(sc);
        return (0);
 }
 
 static void
-blkfront_initialize(struct xb_softc *sc)
+xbd_initialize(struct xbd_softc *sc)
 {
        const char *otherend_path;
        const char *node_path;
@@ -543,7 +542,7 @@ blkfront_initialize(struct xb_softc *sc)
        int error;
        int i;
 
-       if (xenbus_get_state(sc->xb_dev) != XenbusStateInitialising) {
+       if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) {
                /* Initialization has already been performed. */
                return;
        }
@@ -553,10 +552,12 @@ blkfront_initialize(struct xb_softc *sc)
         * setting fails.
         */
        max_ring_page_order = 0;
-       sc->ring_pages = 1;
-       sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
-       sc->max_request_size = XBF_SEGS_TO_SIZE(sc->max_request_segments);
-       sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
+       sc->xbd_ring_pages = 1;
+       sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
+       sc->xbd_max_request_size =
+           XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
+       sc->xbd_max_request_blocks =
+           BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
 
        /*
         * Protocol negotiation.
@@ -569,266 +570,279 @@ blkfront_initialize(struct xb_softc *sc)
         * \note xs_scanf() does not update variables for unmatched
         *       fields.
         */
-       otherend_path = xenbus_get_otherend_path(sc->xb_dev);
-       node_path = xenbus_get_node(sc->xb_dev);
+       otherend_path = xenbus_get_otherend_path(sc->xbd_dev);
+       node_path = xenbus_get_node(sc->xbd_dev);
 
        /* Support both backend schemes for relaying ring page limits. */
        (void)xs_scanf(XST_NIL, otherend_path,
-                      "max-ring-page-order", NULL, "%" PRIu32,
-                      &max_ring_page_order);
-       sc->ring_pages = 1 << max_ring_page_order;
+           "max-ring-page-order", NULL, "%" PRIu32,
+           &max_ring_page_order);
+       sc->xbd_ring_pages = 1 << max_ring_page_order;
        (void)xs_scanf(XST_NIL, otherend_path,
-                      "max-ring-pages", NULL, "%" PRIu32,
-                      &sc->ring_pages);
-       if (sc->ring_pages < 1)
-               sc->ring_pages = 1;
+           "max-ring-pages", NULL, "%" PRIu32,
+           &sc->xbd_ring_pages);
+       if (sc->xbd_ring_pages < 1)
+               sc->xbd_ring_pages = 1;
 
-       sc->max_requests = BLKIF_MAX_RING_REQUESTS(sc->ring_pages * PAGE_SIZE);
+       sc->xbd_max_requests =
+           BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE);
        (void)xs_scanf(XST_NIL, otherend_path,
-                      "max-requests", NULL, "%" PRIu32,
-                      &sc->max_requests);
+           "max-requests", NULL, "%" PRIu32,
+           &sc->xbd_max_requests);
 
        (void)xs_scanf(XST_NIL, otherend_path,
-                      "max-request-segments", NULL, "%" PRIu32,
-                      &sc->max_request_segments);
+           "max-request-segments", NULL, "%" PRIu32,
+           &sc->xbd_max_request_segments);
 
        (void)xs_scanf(XST_NIL, otherend_path,
-                      "max-request-size", NULL, "%" PRIu32,
-                      &sc->max_request_size);
+           "max-request-size", NULL, "%" PRIu32,
+           &sc->xbd_max_request_size);
 
-       if (sc->ring_pages > XBF_MAX_RING_PAGES) {
-               device_printf(sc->xb_dev, "Back-end specified ring-pages of "
-                             "%u limited to front-end limit of %zu.\n",
-                             sc->ring_pages, XBF_MAX_RING_PAGES);
-               sc->ring_pages = XBF_MAX_RING_PAGES;
+       if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) {
+               device_printf(sc->xbd_dev,
+                   "Back-end specified ring-pages of %u "
+                   "limited to front-end limit of %zu.\n",
+                   sc->xbd_ring_pages, XBD_MAX_RING_PAGES);
+               sc->xbd_ring_pages = XBD_MAX_RING_PAGES;
        }
 
-       if (powerof2(sc->ring_pages) == 0) {
+       if (powerof2(sc->xbd_ring_pages) == 0) {
                uint32_t new_page_limit;
 
-               new_page_limit = 0x01 << (fls(sc->ring_pages) - 1);
-               device_printf(sc->xb_dev, "Back-end specified ring-pages of "
-                             "%u is not a power of 2. Limited to %u.\n",
-                             sc->ring_pages, new_page_limit);
-               sc->ring_pages = new_page_limit;
-       }
-
-       if (sc->max_requests > XBF_MAX_REQUESTS) {
-               device_printf(sc->xb_dev, "Back-end specified max_requests of "
-                             "%u limited to front-end limit of %u.\n",
-                             sc->max_requests, XBF_MAX_REQUESTS);
-               sc->max_requests = XBF_MAX_REQUESTS;
-       }
-
-       if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) {
-               device_printf(sc->xb_dev, "Back-end specified "
-                             "max_request_segments of %u limited to "
-                             "front-end limit of %u.\n",
-                             sc->max_request_segments,
-                             XBF_MAX_SEGMENTS_PER_REQUEST);
-               sc->max_request_segments = XBF_MAX_SEGMENTS_PER_REQUEST;
-       }
-
-       if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) {
-               device_printf(sc->xb_dev, "Back-end specified "
-                             "max_request_size of %u limited to front-end "
-                             "limit of %u.\n", sc->max_request_size,
-                             XBF_MAX_REQUEST_SIZE);
-               sc->max_request_size = XBF_MAX_REQUEST_SIZE;
+               new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1);
+               device_printf(sc->xbd_dev,
+                   "Back-end specified ring-pages of %u "
+                   "is not a power of 2. Limited to %u.\n",
+                   sc->xbd_ring_pages, new_page_limit);
+               sc->xbd_ring_pages = new_page_limit;
+       }
+
+       if (sc->xbd_max_requests > XBD_MAX_REQUESTS) {
+               device_printf(sc->xbd_dev,
+                   "Back-end specified max_requests of %u "
+                   "limited to front-end limit of %u.\n",
+                   sc->xbd_max_requests, XBD_MAX_REQUESTS);
+               sc->xbd_max_requests = XBD_MAX_REQUESTS;
+       }
+
+       if (sc->xbd_max_request_segments > XBD_MAX_SEGMENTS_PER_REQUEST) {
+               device_printf(sc->xbd_dev,
+                   "Back-end specified max_request_segments of %u "
+                   "limited to front-end limit of %u.\n",
+                   sc->xbd_max_request_segments,
+                   XBD_MAX_SEGMENTS_PER_REQUEST);
+               sc->xbd_max_request_segments = XBD_MAX_SEGMENTS_PER_REQUEST;
+       }
+
+       if (sc->xbd_max_request_size > XBD_MAX_REQUEST_SIZE) {
+               device_printf(sc->xbd_dev,
+                   "Back-end specified max_request_size of %u "
+                   "limited to front-end limit of %u.\n",
+                   sc->xbd_max_request_size,
+                   XBD_MAX_REQUEST_SIZE);
+               sc->xbd_max_request_size = XBD_MAX_REQUEST_SIZE;
        }
  
-       if (sc->max_request_size > XBF_SEGS_TO_SIZE(sc->max_request_segments)) {
-               device_printf(sc->xb_dev, "Back-end specified "
-                             "max_request_size of %u limited to front-end "
-                             "limit of %u.  (Too few segments.)\n",
-                             sc->max_request_size,
-                             XBF_SEGS_TO_SIZE(sc->max_request_segments));
-               sc->max_request_size =
-                   XBF_SEGS_TO_SIZE(sc->max_request_segments);
+       if (sc->xbd_max_request_size >
+           XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments)) {
+               device_printf(sc->xbd_dev,
+                   "Back-end specified max_request_size of %u "
+                   "limited to front-end limit of %u.  (Too few segments.)\n",
+                   sc->xbd_max_request_size,
+                   XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments));
+               sc->xbd_max_request_size =
+                   XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments);
        }
 
-       sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
+       sc->xbd_max_request_blocks =
+           BLKIF_SEGS_TO_BLOCKS(sc->xbd_max_request_segments);
 
        /* Allocate datastructures based on negotiated values. */
-       error = bus_dma_tag_create(bus_get_dma_tag(sc->xb_dev), /* parent */
-                                  512, PAGE_SIZE,      /* algnmnt, boundary */
-                                  BUS_SPACE_MAXADDR,   /* lowaddr */
-                                  BUS_SPACE_MAXADDR,   /* highaddr */
-                                  NULL, NULL,          /* filter, filterarg */
-                                  sc->max_request_size,
-                                  sc->max_request_segments,
-                                  PAGE_SIZE,           /* maxsegsize */
-                                  BUS_DMA_ALLOCNOW,    /* flags */
-                                  busdma_lock_mutex,   /* lockfunc */
-                                  &sc->xb_io_lock,     /* lockarg */
-                                  &sc->xb_io_dmat);
+       error = bus_dma_tag_create(
+           bus_get_dma_tag(sc->xbd_dev),       /* parent */
+           512, PAGE_SIZE,                     /* algnmnt, boundary */
+           BUS_SPACE_MAXADDR,                  /* lowaddr */
+           BUS_SPACE_MAXADDR,                  /* highaddr */
+           NULL, NULL,                         /* filter, filterarg */
+           sc->xbd_max_request_size,
+           sc->xbd_max_request_segments,
+           PAGE_SIZE,                          /* maxsegsize */
+           BUS_DMA_ALLOCNOW,                   /* flags */
+           busdma_lock_mutex,                  /* lockfunc */
+           &sc->xbd_io_lock,                   /* lockarg */
+           &sc->xbd_io_dmat);
        if (error != 0) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "Cannot allocate parent DMA tag\n");
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "Cannot allocate parent DMA tag\n");
                return;
        }
 
        /* Per-transaction data allocation. */
-       sc->shadow = malloc(sizeof(*sc->shadow) * sc->max_requests,
-                           M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
-       if (sc->shadow == NULL) {
-               bus_dma_tag_destroy(sc->xb_io_dmat);
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "Cannot allocate request structures\n");
+       sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests,
+           M_XENBLOCKFRONT, M_NOWAIT|M_ZERO);
+       if (sc->xbd_shadow == NULL) {
+               bus_dma_tag_destroy(sc->xbd_io_dmat);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "Cannot allocate request structures\n");
                return;
        }
 
-       for (i = 0; i < sc->max_requests; i++) {
-               struct xb_command *cm;
+       for (i = 0; i < sc->xbd_max_requests; i++) {
+               struct xbd_command *cm;
 
-               cm = &sc->shadow[i];
-               cm->sg_refs = malloc(sizeof(grant_ref_t)
-                                  * sc->max_request_segments,
-                                    M_XENBLOCKFRONT, M_NOWAIT);
-               if (cm->sg_refs == NULL)
+               cm = &sc->xbd_shadow[i];
+               cm->cm_sg_refs = malloc(
+                   sizeof(grant_ref_t) * sc->xbd_max_request_segments,
+                   M_XENBLOCKFRONT, M_NOWAIT);
+               if (cm->cm_sg_refs == NULL)
                        break;
-               cm->id = i;
+               cm->cm_id = i;
                cm->cm_sc = sc;
-               if (bus_dmamap_create(sc->xb_io_dmat, 0, &cm->map) != 0)
+               if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
                        break;
-               xb_free_command(cm);
+               xbd_free_command(cm);
        }
 
-       if (setup_blkring(sc) != 0)
+       if (xbd_setup_ring(sc) != 0)
                return;
 
        /* Support both backend schemes for relaying ring page limits. */
-       if (sc->ring_pages > 1) {
+       if (sc->xbd_ring_pages > 1) {
                error = xs_printf(XST_NIL, node_path,
-                                "num-ring-pages","%u", sc->ring_pages);
+                   "num-ring-pages","%u",
+                   sc->xbd_ring_pages);
                if (error) {
-                       xenbus_dev_fatal(sc->xb_dev, error,
-                                        "writing %s/num-ring-pages",
-                                        node_path);
+                       xenbus_dev_fatal(sc->xbd_dev, error,
+                           "writing %s/num-ring-pages",
+                           node_path);
                        return;
                }
 
                error = xs_printf(XST_NIL, node_path,
-                                "ring-page-order", "%u",
-                                fls(sc->ring_pages) - 1);
+                   "ring-page-order", "%u",
+                   fls(sc->xbd_ring_pages) - 1);
                if (error) {
-                       xenbus_dev_fatal(sc->xb_dev, error,
-                                        "writing %s/ring-page-order",
-                                        node_path);
+                       xenbus_dev_fatal(sc->xbd_dev, error,
+                           "writing %s/ring-page-order",
+                           node_path);
                        return;
                }
        }
 
        error = xs_printf(XST_NIL, node_path,
-                        "max-requests","%u", sc->max_requests);
+           "max-requests","%u",
+           sc->xbd_max_requests);
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "writing %s/max-requests",
-                                node_path);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "writing %s/max-requests",
+                   node_path);
                return;
        }
 
        error = xs_printf(XST_NIL, node_path,
-                        "max-request-segments","%u", sc->max_request_segments);
+           "max-request-segments","%u",
+           sc->xbd_max_request_segments);
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "writing %s/max-request-segments",
-                                node_path);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "writing %s/max-request-segments",
+                   node_path);
                return;
        }
 
        error = xs_printf(XST_NIL, node_path,
-                        "max-request-size","%u", sc->max_request_size);
+           "max-request-size","%u",
+           sc->xbd_max_request_size);
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "writing %s/max-request-size",
-                                node_path);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "writing %s/max-request-size",
+                   node_path);
                return;
        }
 
        error = xs_printf(XST_NIL, node_path, "event-channel",
-                         "%u", irq_to_evtchn_port(sc->irq));
+           "%u", irq_to_evtchn_port(sc->xbd_irq));
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "writing %s/event-channel",
-                                node_path);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "writing %s/event-channel",
+                   node_path);
                return;
        }
 
-       error = xs_printf(XST_NIL, node_path,
-                         "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE);
+       error = xs_printf(XST_NIL, node_path, "protocol",
+           "%s", XEN_IO_PROTO_ABI_NATIVE);
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
-                                "writing %s/protocol",
-                                node_path);
+               xenbus_dev_fatal(sc->xbd_dev, error,
+                   "writing %s/protocol",
+                   node_path);
                return;
        }
 
-       xenbus_set_state(sc->xb_dev, XenbusStateInitialised);
+       xenbus_set_state(sc->xbd_dev, XenbusStateInitialised);
 }
 
 static int 
-setup_blkring(struct xb_softc *sc)
+xbd_setup_ring(struct xbd_softc *sc)
 {
        blkif_sring_t *sring;
        uintptr_t sring_page_addr;
        int error;
        int i;
 
-       sring = malloc(sc->ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
-                      M_NOWAIT|M_ZERO);
+       sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT,
+           M_NOWAIT|M_ZERO);
        if (sring == NULL) {
-               xenbus_dev_fatal(sc->xb_dev, ENOMEM, "allocating shared ring");
+               xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring");
                return (ENOMEM);
        }
        SHARED_RING_INIT(sring);
-       FRONT_RING_INIT(&sc->ring, sring, sc->ring_pages * PAGE_SIZE);
+       FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE);
 
        for (i = 0, sring_page_addr = (uintptr_t)sring;
-            i < sc->ring_pages;
+            i < sc->xbd_ring_pages;
             i++, sring_page_addr += PAGE_SIZE) {
 
-               error = xenbus_grant_ring(sc->xb_dev,
-                   (vtomach(sring_page_addr) >> PAGE_SHIFT), &sc->ring_ref[i]);
+               error = xenbus_grant_ring(sc->xbd_dev,
+                   (vtomach(sring_page_addr) >> PAGE_SHIFT),
+                   &sc->xbd_ring_ref[i]);
                if (error) {
-                       xenbus_dev_fatal(sc->xb_dev, error,
-                                        "granting ring_ref(%d)", i);
+                       xenbus_dev_fatal(sc->xbd_dev, error,
+                           "granting ring_ref(%d)", i);
                        return (error);
                }
        }
-       if (sc->ring_pages == 1) {
-               error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
-                                 "ring-ref", "%u", sc->ring_ref[0]);
+       if (sc->xbd_ring_pages == 1) {
+               error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
+                   "ring-ref", "%u", sc->xbd_ring_ref[0]);
                if (error) {
-                       xenbus_dev_fatal(sc->xb_dev, error,
-                                        "writing %s/ring-ref",
-                                        xenbus_get_node(sc->xb_dev));
+                       xenbus_dev_fatal(sc->xbd_dev, error,
+                           "writing %s/ring-ref",
+                           xenbus_get_node(sc->xbd_dev));
                        return (error);
                }
        } else {
-               for (i = 0; i < sc->ring_pages; i++) {
+               for (i = 0; i < sc->xbd_ring_pages; i++) {
                        char ring_ref_name[]= "ring_refXX";
 
                        snprintf(ring_ref_name, sizeof(ring_ref_name),
-                                "ring-ref%u", i);
-                       error = xs_printf(XST_NIL, xenbus_get_node(sc->xb_dev),
-                                        ring_ref_name, "%u", sc->ring_ref[i]);
+                           "ring-ref%u", i);
+                       error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev),
+                            ring_ref_name, "%u", sc->xbd_ring_ref[i]);
                        if (error) {
-                               xenbus_dev_fatal(sc->xb_dev, error,
-                                                "writing %s/%s",
-                                                xenbus_get_node(sc->xb_dev),
-                                                ring_ref_name);
+                               xenbus_dev_fatal(sc->xbd_dev, error,
+                                   "writing %s/%s",
+                                   xenbus_get_node(sc->xbd_dev),
+                                   ring_ref_name);
                                return (error);
                        }
                }
        }
 
        error = bind_listening_port_to_irqhandler(
-           xenbus_get_otherend_id(sc->xb_dev),
-           "xbd", (driver_intr_t *)blkif_int, sc,
-           INTR_TYPE_BIO | INTR_MPSAFE, &sc->irq);
+           xenbus_get_otherend_id(sc->xbd_dev),
+           "xbd", (driver_intr_t *)xbd_int, sc,
+           INTR_TYPE_BIO | INTR_MPSAFE, &sc->xbd_irq);
        if (error) {
-               xenbus_dev_fatal(sc->xb_dev, error,
+               xenbus_dev_fatal(sc->xbd_dev, error,
                    "bind_evtchn_to_irqhandler failed");
                return (error);
        }
@@ -840,9 +854,9 @@ setup_blkring(struct xb_softc *sc)
  * Callback received when the backend's state changes.
  */
 static void
-blkfront_backend_changed(device_t dev, XenbusState backend_state)
+xbd_backend_changed(device_t dev, XenbusState backend_state)
 {
-       struct xb_softc *sc = device_get_softc(dev);
+       struct xbd_softc *sc = device_get_softc(dev);
 
        DPRINTK("backend_state=%d\n", backend_state);
 
@@ -856,47 +870,47 @@ blkfront_backend_changed(device_t dev, X
 

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to