The field names in the IO emulation are really long and use repeatedly
the term handler which make some line cumbersome to read:

mmio_handler->mmio_handler_ops->write_handler

Also take the opportunity to do some clean up:
    - Avoid "handler" vs "handle" in register_mmio_handler
    - Use a local variable to initialize handler in
    register_mmio_handler
    - Add a comment explaining the dsb(ish) in register_mmio_handler
    - Rename the structure io_handler into vmmio because the io_handler
    is in fine handling multiple handlers and the name a the fields was
    io_handlers. Also rename the field io_handlers to vmmmio
    - Rename the field mmio_handler_ops to ops because we are in the
    structure mmio_handler to not need to repeat it
    - Rename the field mmio_handlers to handlers because we are in the
    vmmio structure
    - Make it clear that register_mmio_ops is taking an ops and not an
    handle
    - Clean up local variable to helpe to understand the code

Signed-off-by: Julien Grall <julien.gr...@citrix.com>

---
    This is the last patch of the series so to make easy to backport the
    previous patches to Xen 4.6 if necessary.
---
 xen/arch/arm/io.c            | 52 +++++++++++++++++++++++++-------------------
 xen/arch/arm/vgic-v2.c       |  4 ++--
 xen/arch/arm/vgic-v3.c       |  8 +++----
 xen/arch/arm/vuart.c         |  4 ++--
 xen/include/asm-arm/domain.h |  3 ++-
 xen/include/asm-arm/mmio.h   | 12 +++++-----
 6 files changed, 46 insertions(+), 37 deletions(-)

diff --git a/xen/arch/arm/io.c b/xen/arch/arm/io.c
index 85797f1..f7443d2 100644
--- a/xen/arch/arm/io.c
+++ b/xen/arch/arm/io.c
@@ -27,15 +27,15 @@ int handle_mmio(mmio_info_t *info)
 {
     struct vcpu *v = current;
     int i;
-    const struct mmio_handler *mmio_handler;
-    const struct io_handler *io_handlers = &v->domain->arch.io_handlers;
+    const struct mmio_handler *handler;
+    const struct vmmio *vmmio = &v->domain->arch.vmmio;
 
-    for ( i = 0; i < io_handlers->num_entries; i++ )
+    for ( i = 0; i < vmmio->num_entries; i++ )
     {
-        mmio_handler = &io_handlers->mmio_handlers[i];
+        handler = &vmmio->handlers[i];
 
-        if ( (info->gpa >= mmio_handler->addr) &&
-             (info->gpa < (mmio_handler->addr + mmio_handler->size)) )
+        if ( (info->gpa >= handler->addr) &&
+             (info->gpa < (handler->addr + handler->size)) )
         {
             goto found;
         }
@@ -45,37 +45,45 @@ int handle_mmio(mmio_info_t *info)
 
 found:
     if ( info->dabt.write )
-        return mmio_handler->mmio_handler_ops->write_handler(v, info,
-                                                             
mmio_handler->priv);
+        return handler->ops->write(v, info, handler->priv);
     else
-        return mmio_handler->mmio_handler_ops->read_handler(v, info,
-                                                            
mmio_handler->priv);
+        return handler->ops->read(v, info, handler->priv);
 }
 
 void register_mmio_handler(struct domain *d,
-                           const struct mmio_handler_ops *handle,
+                           const struct mmio_handler_ops *ops,
                            paddr_t addr, paddr_t size, void *priv)
 {
-    struct io_handler *handler = &d->arch.io_handlers;
+    struct vmmio *vmmio = &d->arch.vmmio;
+    struct mmio_handler *handler;
 
-    BUG_ON(handler->num_entries >= MAX_IO_HANDLER);
+    BUG_ON(vmmio->num_entries >= MAX_IO_HANDLER);
 
-    spin_lock(&handler->lock);
+    spin_lock(&vmmio->lock);
 
-    handler->mmio_handlers[handler->num_entries].mmio_handler_ops = handle;
-    handler->mmio_handlers[handler->num_entries].addr = addr;
-    handler->mmio_handlers[handler->num_entries].size = size;
-    handler->mmio_handlers[handler->num_entries].priv = priv;
+    handler = &vmmio->handlers[vmmio->num_entries];
+
+    handler->ops = ops;
+    handler->addr = addr;
+    handler->size = size;
+    handler->priv = priv;
+
+    /*
+     * handle_mmio is not using the lock to avoid contention.
+     * Make sure the other processors see the new handler before
+     * updating the number of entries
+     */
     dsb(ish);
-    handler->num_entries++;
 
-    spin_unlock(&handler->lock);
+    vmmio->num_entries++;
+
+    spin_unlock(&vmmio->lock);
 }
 
 int domain_io_init(struct domain *d)
 {
-   spin_lock_init(&d->arch.io_handlers.lock);
-   d->arch.io_handlers.num_entries = 0;
+   spin_lock_init(&d->arch.vmmio.lock);
+   d->arch.vmmio.num_entries = 0;
 
    return 0;
 }
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 8e50f22..f886724 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -495,8 +495,8 @@ write_ignore:
 }
 
 static const struct mmio_handler_ops vgic_v2_distr_mmio_handler = {
-    .read_handler  = vgic_v2_distr_mmio_read,
-    .write_handler = vgic_v2_distr_mmio_write,
+    .read  = vgic_v2_distr_mmio_read,
+    .write = vgic_v2_distr_mmio_write,
 };
 
 static struct vcpu *vgic_v2_get_target_vcpu(struct vcpu *v, unsigned int irq)
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index 0a14184..beb3621 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -1037,13 +1037,13 @@ static int vgic_v3_emulate_sysreg(struct cpu_user_regs 
*regs, union hsr hsr)
 }
 
 static const struct mmio_handler_ops vgic_rdistr_mmio_handler = {
-    .read_handler  = vgic_v3_rdistr_mmio_read,
-    .write_handler = vgic_v3_rdistr_mmio_write,
+    .read  = vgic_v3_rdistr_mmio_read,
+    .write = vgic_v3_rdistr_mmio_write,
 };
 
 static const struct mmio_handler_ops vgic_distr_mmio_handler = {
-    .read_handler  = vgic_v3_distr_mmio_read,
-    .write_handler = vgic_v3_distr_mmio_write,
+    .read  = vgic_v3_distr_mmio_read,
+    .write = vgic_v3_distr_mmio_write,
 };
 
 static int vgic_v3_get_irq_priority(struct vcpu *v, unsigned int irq)
diff --git a/xen/arch/arm/vuart.c b/xen/arch/arm/vuart.c
index 51d0557..2495e87 100644
--- a/xen/arch/arm/vuart.c
+++ b/xen/arch/arm/vuart.c
@@ -49,8 +49,8 @@ static int vuart_mmio_read(struct vcpu *v, mmio_info_t *info, 
void *priv);
 static int vuart_mmio_write(struct vcpu *v, mmio_info_t *info, void *priv);
 
 static const struct mmio_handler_ops vuart_mmio_handler = {
-    .read_handler  = vuart_mmio_read,
-    .write_handler = vuart_mmio_write,
+    .read  = vuart_mmio_read,
+    .write = vuart_mmio_write,
 };
 
 int domain_vuart_init(struct domain *d)
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index c3f5a95..01859cc 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -55,7 +55,8 @@ struct arch_domain
     struct hvm_domain hvm_domain;
     xen_pfn_t *grant_table_gpfn;
 
-    struct io_handler io_handlers;
+    struct vmmio vmmio;
+
     /* Continuable domain_relinquish_resources(). */
     enum {
         RELMEM_not_started,
diff --git a/xen/include/asm-arm/mmio.h b/xen/include/asm-arm/mmio.h
index 294c18b..1cd7a7a 100644
--- a/xen/include/asm-arm/mmio.h
+++ b/xen/include/asm-arm/mmio.h
@@ -37,26 +37,26 @@ typedef int (*mmio_write_t)(struct vcpu *v, mmio_info_t 
*info, void *priv);
 typedef int (*mmio_check_t)(struct vcpu *v, paddr_t addr);
 
 struct mmio_handler_ops {
-    mmio_read_t read_handler;
-    mmio_write_t write_handler;
+    mmio_read_t read;
+    mmio_write_t write;
 };
 
 struct mmio_handler {
     paddr_t addr;
     paddr_t size;
-    const struct mmio_handler_ops *mmio_handler_ops;
+    const struct mmio_handler_ops *ops;
     void *priv;
 };
 
-struct io_handler {
+struct vmmio {
     int num_entries;
     spinlock_t lock;
-    struct mmio_handler mmio_handlers[MAX_IO_HANDLER];
+    struct mmio_handler handlers[MAX_IO_HANDLER];
 };
 
 extern int handle_mmio(mmio_info_t *info);
 void register_mmio_handler(struct domain *d,
-                           const struct mmio_handler_ops *handle,
+                           const struct mmio_handler_ops *ops,
                            paddr_t addr, paddr_t size, void *priv);
 int domain_io_init(struct domain *d);
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to