[PATCH v2 5/5] arm/arm64: KVM: Keep elrsr/aisr in sync with software model

2015-03-02 Thread Alex Bennée
From: Christoffer Dall christoffer.d...@linaro.org

There is an interesting bug in the vgic code, which manifests itself
when the KVM run loop has a signal pending or needs a vmid generation
rollover after having disabled interrupts but before actually switching
to the guest.

In this case, we flush the vgic as usual, but we sync back the vgic
state and exit to userspace before entering the guest.  The consequence
is that we will be syncing the list registers back to the software model
using the GICH_ELRSR and GICH_EISR from the last execution of the guest,
potentially overwriting a list register containing an interrupt.

This showed up during migration testing where we would capture a state
where the VM has masked the arch timer but there were no interrupts,
resulting in a hung test.

Cc: Marc Zyngier marc.zyng...@arm.com
Reported-by: Alex Bennee alex.ben...@linaro.org
Signed-off-by: Christoffer Dall christoffer.d...@linaro.org
Signed-off-by: Alex Bennée alex.ben...@linaro.org
Acked-by: Marc Zyngier marc.zyng...@arm.com

diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 7042251..e2a676e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -114,6 +114,7 @@ struct vgic_ops {
void(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
+   void(*clear_eisr)(struct kvm_vcpu *vcpu);
u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
void(*enable_underflow)(struct kvm_vcpu *vcpu);
void(*disable_underflow)(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index a0a7b5d..f9b9c7c 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int 
lr,
 {
if (!(lr_desc.state  LR_STATE_MASK))
vcpu-arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL  lr);
+   else
+   vcpu-arch.vgic_cpu.vgic_v2.vgic_elrsr = ~(1ULL  lr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
return vcpu-arch.vgic_cpu.vgic_v2.vgic_eisr;
 }
 
+static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
+{
+   vcpu-arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
+}
+
 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
u32 misr = vcpu-arch.vgic_cpu.vgic_v2.vgic_misr;
@@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
.sync_lr_elrsr  = vgic_v2_sync_lr_elrsr,
.get_elrsr  = vgic_v2_get_elrsr,
.get_eisr   = vgic_v2_get_eisr,
+   .clear_eisr = vgic_v2_clear_eisr,
.get_interrupt_status   = vgic_v2_get_interrupt_status,
.enable_underflow   = vgic_v2_enable_underflow,
.disable_underflow  = vgic_v2_disable_underflow,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 3a62d8a..dff0602 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -104,6 +104,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, 
int lr,
 {
if (!(lr_desc.state  LR_STATE_MASK))
vcpu-arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U  lr);
+   else
+   vcpu-arch.vgic_cpu.vgic_v3.vgic_elrsr = ~(1U  lr);
 }
 
 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
@@ -116,6 +118,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
return vcpu-arch.vgic_cpu.vgic_v3.vgic_eisr;
 }
 
+static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
+{
+   vcpu-arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
+}
+
 static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
 {
u32 misr = vcpu-arch.vgic_cpu.vgic_v3.vgic_misr;
@@ -192,6 +199,7 @@ static const struct vgic_ops vgic_v3_ops = {
.sync_lr_elrsr  = vgic_v3_sync_lr_elrsr,
.get_elrsr  = vgic_v3_get_elrsr,
.get_eisr   = vgic_v3_get_eisr,
+   .clear_eisr = vgic_v3_clear_eisr,
.get_interrupt_status   = vgic_v3_get_interrupt_status,
.enable_underflow   = vgic_v3_enable_underflow,
.disable_underflow  = vgic_v3_disable_underflow,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 3b4ded2..3690c1e 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -980,6 +980,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
return vgic_ops-get_eisr(vcpu);
 }
 
+static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
+{
+   vgic_ops-clear_eisr(vcpu);
+}
+
 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
 {
return vgic_ops-get_interrupt_status(vcpu);
@@ -1019,6 +1024,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct 
kvm_vcpu *vcpu)
vgic_set_lr(vcpu, lr_nr, vlr);
clear_bit(lr_nr, vgic_cpu-lr_used);
 

Re: [kvm-unit-tests PATCH 00/18] arm/arm64: add smp support

2015-03-02 Thread Christoffer Dall
On Thu, Feb 26, 2015 at 02:50:38PM +0100, Andrew Jones wrote:
 On Thu, Feb 26, 2015 at 12:34:02PM +0100, Christoffer Dall wrote:
  On Sun, Feb 01, 2015 at 07:34:28PM +0100, Andrew Jones wrote:
   This series extends the kvm-unit-tests/arm[64] framework to support smp.
   A break down of the patches is as follows
   
   01-02: prepare general framework for smp use
   03-06: arm/arm64 fixups not 100% related to this series,
  but need to post some time...
   07-09: add thread_info (for per-thread data) and suck some global
  data into it
   10-11: add cpumask support (for per-cpu data) and suck some more
  global data in
  12: add arm64 simple spinlock implementation
   13-14: add some PSCI support
   15-16: further prep for smp_boot_secondary
  17: finally add smp_boot_secondary
  18: as usual, add a selftest to make sure it all works
   
   These patches are also available here:
   https://github.com/rhdrjones/kvm-unit-tests/tree/arm/smp
   
  I've tested these patches on Juno and they seem to run fine, however,
  since we don't support big.LITTLE yet, you have to run them under
  'taskset mask', but the config script uses $(getconf
  _NPROCESSORS_CONF), which returns 6, and QEMU fails.  The interesting
 
 Should I try to read the number of host cpus from some other source?
 If you know something I can read that also works on big.LITTLE, then
 I can change it now.
 

I have no idea what the right scripting fix would be. But we should
really fix big.LITTLE support in KVM.  Hmmm.

  bit is that the unit-tests still report 'PASS' - not sure why.
 
 Ah, this is due to the weird way qemu's debugexit device sets its exit
 code
 
 hw/misc/debugexit.c:debug_exit_write()
 {
   exit((val  1) | 1);
 }
 
 To be consistent with that we made chr-testdev do the same thing (see
 backends/testdev.c:testdev_eat_packet():case 'q'). Now, the
 kvm-unit-tests run_tests.sh script knows about that, so it has
 
   eval $cmdline  test.log
   if [ $? -le 1 ]; then
  echo -e \e[32mPASS\e[0m $1
   else
  echo -e \e[31mFAIL\e[0m $1
   fi
 
 Yes, this sucks, as we can't tell the difference between qemu failing
 to run the test, and exiting with 1 vs. the test running, passing -
 exiting with (0  1) | 1. It's too bad debugexit didn't set a higher
 bit (like 5 or 6) to flag a debug exit. Maybe it's not too late to
 change it? Paolo?
 

This would be really good to address somehow, because we don't want
to report that everything is happy when the test harness broke, that
really goes against the whole idea of this work.

Thanks,
-Christoffer
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: tlbi va, vaa vs. val, vaal

2015-03-02 Thread Catalin Marinas
On Fri, Feb 27, 2015 at 01:15:57PM -0800, Mario Smarduch wrote:
 On 02/27/2015 02:24 AM, Will Deacon wrote:
  On Fri, Feb 27, 2015 at 12:12:32AM +, Mario Smarduch wrote:
  I noticed kernel tlbflush.h use tlbi va*, vaa* variants instead of
  val, vaal ones. Reading the manual D.5.7.2 it appears that
  va*, vaa* versions invalidate intermediate caching of
  translation structures.
 
  With stage2 enabled that may result in 20+ memory lookups
  for a 4 level page table walk. That's assuming that intermediate
  caching structures cache mappings from stage1 table entry to
  host page.
  
  Yeah, Catalin and I discussed improving the kernel support for this,
  but it requires some changes to the generic mmu_gather code so that we
  can distinguish the leaf cases. I'd also like to see that done in a way
  that takes into account different granule sizes (we currently iterate
  over huge pages in 4k chunks). Last time I touched that, I entered a
  world of pain and don't plan to return there immediately :)
  
  Catalin -- feeling brave?
  
  FWIW: the new IOMMU page-table stuff I just got merged *does* make use
  of leaf-invalidation for the SMMU.
 
   thanks for the background. I'm guessing how much of PTWalk
 is cached is implementation dependent. One old paper quotes upto 40%
 improvement for some industry benchmarks that cache all stage1/2 PTWalk
 entries.

Is it caching in the TLB or in the level 1 CPU cache?

I would indeed expect some improvement without many drawbacks. The only
thing we need in Linux is to distinguish between leaf TLBI and TLBI for
page table tearing down. It's not complicated, it just needs some
testing (strangely enough, I tried to replace all user TLBI with the L
variants on a Juno board and no signs of any crashes).

-- 
Catalin
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v14 10/20] vfio/platform: return IRQ info

2015-03-02 Thread Baptiste Reynal
From: Antonios Motakis a.mota...@virtualopensystems.com

Return information for the interrupts exposed by the device.
This patch extends VFIO_DEVICE_GET_INFO with the number of IRQs
and enables VFIO_DEVICE_GET_IRQ_INFO.

Signed-off-by: Antonios Motakis a.mota...@virtualopensystems.com
Signed-off-by: Baptiste Reynal b.rey...@virtualopensystems.com
---
 drivers/vfio/platform/Makefile|  2 +-
 drivers/vfio/platform/vfio_platform_common.c  | 31 +---
 drivers/vfio/platform/vfio_platform_irq.c | 51 +++
 drivers/vfio/platform/vfio_platform_private.h | 10 ++
 4 files changed, 89 insertions(+), 5 deletions(-)
 create mode 100644 drivers/vfio/platform/vfio_platform_irq.c

diff --git a/drivers/vfio/platform/Makefile b/drivers/vfio/platform/Makefile
index 1957170..81de144 100644
--- a/drivers/vfio/platform/Makefile
+++ b/drivers/vfio/platform/Makefile
@@ -1,5 +1,5 @@
 
-vfio-platform-y := vfio_platform.o vfio_platform_common.o
+vfio-platform-y := vfio_platform.o vfio_platform_common.o vfio_platform_irq.o
 
 obj-$(CONFIG_VFIO_PLATFORM) += vfio-platform.o
 
diff --git a/drivers/vfio/platform/vfio_platform_common.c 
b/drivers/vfio/platform/vfio_platform_common.c
index d7fe2c7..908d510 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -101,6 +101,7 @@ static void vfio_platform_release(void *device_data)
 
if (!(--vdev-refcnt)) {
vfio_platform_regions_cleanup(vdev);
+   vfio_platform_irq_cleanup(vdev);
}
 
mutex_unlock(driver_lock);
@@ -122,6 +123,10 @@ static int vfio_platform_open(void *device_data)
ret = vfio_platform_regions_init(vdev);
if (ret)
goto err_reg;
+
+   ret = vfio_platform_irq_init(vdev);
+   if (ret)
+   goto err_irq;
}
 
vdev-refcnt++;
@@ -129,6 +134,8 @@ static int vfio_platform_open(void *device_data)
mutex_unlock(driver_lock);
return 0;
 
+err_irq:
+   vfio_platform_regions_cleanup(vdev);
 err_reg:
mutex_unlock(driver_lock);
module_put(THIS_MODULE);
@@ -154,7 +161,7 @@ static long vfio_platform_ioctl(void *device_data,
 
info.flags = vdev-flags;
info.num_regions = vdev-num_regions;
-   info.num_irqs = 0;
+   info.num_irqs = vdev-num_irqs;
 
return copy_to_user((void __user *)arg, info, minsz);
 
@@ -179,10 +186,26 @@ static long vfio_platform_ioctl(void *device_data,
 
return copy_to_user((void __user *)arg, info, minsz);
 
-   } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO)
-   return -EINVAL;
+   } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+   struct vfio_irq_info info;
+
+   minsz = offsetofend(struct vfio_irq_info, count);
+
+   if (copy_from_user(info, (void __user *)arg, minsz))
+   return -EFAULT;
+
+   if (info.argsz  minsz)
+   return -EINVAL;
+
+   if (info.index = vdev-num_irqs)
+   return -EINVAL;
+
+   info.flags = vdev-irqs[info.index].flags;
+   info.count = vdev-irqs[info.index].count;
+
+   return copy_to_user((void __user *)arg, info, minsz);
 
-   else if (cmd == VFIO_DEVICE_SET_IRQS)
+   } else if (cmd == VFIO_DEVICE_SET_IRQS)
return -EINVAL;
 
else if (cmd == VFIO_DEVICE_RESET)
diff --git a/drivers/vfio/platform/vfio_platform_irq.c 
b/drivers/vfio/platform/vfio_platform_irq.c
new file mode 100644
index 000..c6c3ec1
--- /dev/null
+++ b/drivers/vfio/platform/vfio_platform_irq.c
@@ -0,0 +1,51 @@
+/*
+ * VFIO platform devices interrupt handling
+ *
+ * Copyright (C) 2013 - Virtual Open Systems
+ * Author: Antonios Motakis a.mota...@virtualopensystems.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include linux/eventfd.h
+#include linux/interrupt.h
+#include linux/slab.h
+#include linux/types.h
+#include linux/vfio.h
+#include linux/irq.h
+
+#include vfio_platform_private.h
+
+int vfio_platform_irq_init(struct vfio_platform_device *vdev)
+{
+   int cnt = 0, i;
+
+   while (vdev-get_irq(vdev, cnt) = 0)
+   cnt++;
+
+   vdev-irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL);
+   if (!vdev-irqs)
+   return -ENOMEM;
+
+   for (i = 0; i  cnt; i++) {
+   vdev-irqs[i].flags = 0;
+   

[PATCH v4 2/5] vfio: introduce the VFIO_DMA_MAP_FLAG_NOEXEC flag

2015-03-02 Thread Baptiste Reynal
From: Antonios Motakis a.mota...@virtualopensystems.com

We introduce the VFIO_DMA_MAP_FLAG_NOEXEC flag to the VFIO dma map call,
and expose its availability via the capability VFIO_DMA_NOEXEC_IOMMU.
This way the user can control whether the XN flag will be set on the
requested mappings. The IOMMU_NOEXEC flag needs to be available for all
the IOMMUs of the container used.

Signed-off-by: Antonios Motakis a.mota...@virtualopensystems.com
Signed-off-by: Baptiste Reynal b.rey...@virtualopensystems.com
---
 include/uapi/linux/vfio.h | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 5fb3d46..30801a7 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -31,6 +31,7 @@ enum vfio_iommu_cap {
   (ex. PCIe NoSnoop stripping) */
VFIO_EEH = 5,   /* Check if EEH is supported */
VFIO_TYPE1_NESTING_IOMMU = 6,   /* Two-stage IOMMU, implies v2  */
+   VFIO_DMA_NOEXEC_IOMMU = 7,
 };
 
 
@@ -397,12 +398,17 @@ struct vfio_iommu_type1_info {
  *
  * Map process virtual addresses to IO virtual addresses using the
  * provided struct vfio_dma_map. Caller sets argsz. READ / WRITE required.
+ *
+ * To use the VFIO_DMA_MAP_FLAG_NOEXEC flag, the container must support the
+ * VFIO_DMA_NOEXEC_IOMMU capability. If mappings are created using this flag,
+ * any groups subsequently added to the container must support this capability.
  */
 struct vfio_iommu_type1_dma_map {
__u32   argsz;
__u32   flags;
 #define VFIO_DMA_MAP_FLAG_READ (1  0)/* readable from device 
*/
 #define VFIO_DMA_MAP_FLAG_WRITE (1  1)   /* writable from device */
+#define VFIO_DMA_MAP_FLAG_NOEXEC (1  2)  /* not executable from device */
__u64   vaddr;  /* Process virtual address */
__u64   iova;   /* IO virtual address */
__u64   size;   /* Size of mapping (bytes) */
-- 
2.3.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v4 5/5] vfio: type1: implement the VFIO_DMA_MAP_FLAG_NOEXEC flag

2015-03-02 Thread Baptiste Reynal
From: Antonios Motakis a.mota...@virtualopensystems.com

Some IOMMU drivers, such as the ARM SMMU driver, make available the
IOMMU_NOEXEC flag to set the page tables for a device as XN (execute never).
This affects devices such as the ARM PL330 DMA Controller, which respects
this flag and will refuse to fetch DMA instructions from memory where the
XN flag has been set.

The flag can be used only if all IOMMU domains behind the container support
the IOMMU_NOEXEC flag. Also, if any mappings are created with the flag, any
new domains with devices will have to support it as well.

Signed-off-by: Antonios Motakis a.mota...@virtualopensystems.com
Signed-off-by: Baptiste Reynal b.rey...@virtualopensystems.com
---
 drivers/vfio/vfio_iommu_type1.c | 25 -
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ea371b..2bbd311 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -596,6 +596,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (!prot || !size || (size | iova | vaddr)  mask)
return -EINVAL;
 
+   if (map-flags  VFIO_DMA_MAP_FLAG_NOEXEC) {
+   if (!vfio_domains_have_iommu_cap(iommu, IOMMU_CAP_NOEXEC))
+   return -EINVAL;
+   prot |= IOMMU_NOEXEC;
+   }
+
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1  iova || vaddr + size - 1  vaddr)
return -EINVAL;
@@ -686,6 +692,14 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
dma = rb_entry(n, struct vfio_dma, node);
iova = dma-iova;
 
+   /*
+* if any of the mappings to be replayed has the NOEXEC flag
+* set, then the new iommu domain must support it
+*/
+   if ((dma-prot  IOMMU_NOEXEC) 
+   !(domain-caps  IOMMU_CAP_NOEXEC))
+   return -EINVAL;
+
while (iova  dma-iova + dma-size) {
phys_addr_t phys = iommu_iova_to_phys(d-domain, iova);
size_t size;
@@ -819,6 +833,9 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
domain-caps |= (1  IOMMU_CAP_CACHE_COHERENCY);
 
+   if (iommu_capable(bus, IOMMU_CAP_NOEXEC))
+   domain-caps |= IOMMU_CAP_NOEXEC;
+
/*
 * Try to match an existing compatible domain.  We don't want to
 * preclude an IOMMU driver supporting multiple bus_types and being
@@ -982,6 +999,11 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return 0;
return vfio_domains_have_iommu_cap(iommu,
  IOMMU_CAP_CACHE_COHERENCY);
+   case VFIO_DMA_NOEXEC_IOMMU:
+   if (!iommu)
+   return 0;
+   return vfio_domains_have_iommu_cap(iommu,
+  IOMMU_CAP_NOEXEC);
default:
return 0;
}
@@ -1005,7 +1027,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_MAP_DMA) {
struct vfio_iommu_type1_dma_map map;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-   VFIO_DMA_MAP_FLAG_WRITE;
+   VFIO_DMA_MAP_FLAG_WRITE |
+   VFIO_DMA_MAP_FLAG_NOEXEC;
 
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
-- 
2.3.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v14 14/20] vfio: add a vfio_ prefix to virqfd_enable and virqfd_disable and export

2015-03-02 Thread Baptiste Reynal
From: Antonios Motakis a.mota...@virtualopensystems.com

We want to reuse virqfd functionality in multiple VFIO drivers; before
moving these functions to core VFIO, add the vfio_ prefix to the
virqfd_enable and virqfd_disable functions, and export them so they can
be used from other modules.

Signed-off-by: Antonios Motakis a.mota...@virtualopensystems.com
Signed-off-by: Baptiste Reynal b.rey...@virtualopensystems.com
---
 drivers/vfio/pci/vfio_pci_intrs.c | 30 --
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci_intrs.c 
b/drivers/vfio/pci/vfio_pci_intrs.c
index f88bfdf..4d38c93 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -126,10 +126,10 @@ static void virqfd_inject(struct work_struct *work)
virqfd-thread(virqfd-vdev, virqfd-data);
 }
 
-static int virqfd_enable(struct vfio_pci_device *vdev,
-int (*handler)(struct vfio_pci_device *, void *),
-void (*thread)(struct vfio_pci_device *, void *),
-void *data, struct virqfd **pvirqfd, int fd)
+int vfio_virqfd_enable(struct vfio_pci_device *vdev,
+  int (*handler)(struct vfio_pci_device *, void *),
+  void (*thread)(struct vfio_pci_device *, void *),
+  void *data, struct virqfd **pvirqfd, int fd)
 {
struct fd irqfd;
struct eventfd_ctx *ctx;
@@ -215,9 +215,9 @@ err_fd:
 
return ret;
 }
+EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
 
-static void virqfd_disable(struct vfio_pci_device *vdev,
-  struct virqfd **pvirqfd)
+void vfio_virqfd_disable(struct vfio_pci_device *vdev, struct virqfd **pvirqfd)
 {
unsigned long flags;
 
@@ -237,6 +237,7 @@ static void virqfd_disable(struct vfio_pci_device *vdev,
 */
flush_workqueue(vfio_irqfd_cleanup_wq);
 }
+EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
 
 /*
  * INTx
@@ -440,8 +441,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device 
*vdev, int fd)
 static void vfio_intx_disable(struct vfio_pci_device *vdev)
 {
vfio_intx_set_signal(vdev, -1);
-   virqfd_disable(vdev, vdev-ctx[0].unmask);
-   virqfd_disable(vdev, vdev-ctx[0].mask);
+   vfio_virqfd_disable(vdev, vdev-ctx[0].unmask);
+   vfio_virqfd_disable(vdev, vdev-ctx[0].mask);
vdev-irq_type = VFIO_PCI_NUM_IRQS;
vdev-num_ctx = 0;
kfree(vdev-ctx);
@@ -605,8 +606,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, 
bool msix)
vfio_msi_set_block(vdev, 0, vdev-num_ctx, NULL, msix);
 
for (i = 0; i  vdev-num_ctx; i++) {
-   virqfd_disable(vdev, vdev-ctx[i].unmask);
-   virqfd_disable(vdev, vdev-ctx[i].mask);
+   vfio_virqfd_disable(vdev, vdev-ctx[i].unmask);
+   vfio_virqfd_disable(vdev, vdev-ctx[i].mask);
}
 
if (msix) {
@@ -639,11 +640,12 @@ static int vfio_pci_set_intx_unmask(struct 
vfio_pci_device *vdev,
} else if (flags  VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
if (fd = 0)
-   return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
-vfio_send_intx_eventfd, NULL,
-vdev-ctx[0].unmask, fd);
+   return vfio_virqfd_enable(vdev,
+ vfio_pci_intx_unmask_handler,
+ vfio_send_intx_eventfd, NULL,
+ vdev-ctx[0].unmask, fd);
 
-   virqfd_disable(vdev, vdev-ctx[0].unmask);
+   vfio_virqfd_disable(vdev, vdev-ctx[0].unmask);
}
 
return 0;
-- 
2.3.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH v14 05/20] vfio: amba: add the VFIO for AMBA devices module to Kconfig

2015-03-02 Thread Baptiste Reynal
From: Antonios Motakis a.mota...@virtualopensystems.com

Enable building the VFIO AMBA driver. VFIO_AMBA depends on VFIO_PLATFORM,
since it is sharing a portion of the code, and it is essentially implemented
as a platform device whose resources are discovered via AMBA specific APIs
in the kernel.

Signed-off-by: Antonios Motakis a.mota...@virtualopensystems.com
Signed-off-by: Baptiste Reynal b.rey...@virtualopensystems.com
---
 drivers/vfio/platform/Kconfig  | 10 ++
 drivers/vfio/platform/Makefile |  4 
 2 files changed, 14 insertions(+)

diff --git a/drivers/vfio/platform/Kconfig b/drivers/vfio/platform/Kconfig
index c51af17..c0a3bff 100644
--- a/drivers/vfio/platform/Kconfig
+++ b/drivers/vfio/platform/Kconfig
@@ -7,3 +7,13 @@ config VFIO_PLATFORM
  framework.
 
  If you don't know what to do here, say N.
+
+config VFIO_AMBA
+   tristate VFIO support for AMBA devices
+   depends on VFIO_PLATFORM  ARM_AMBA
+   help
+ Support for ARM AMBA devices with VFIO. This is required to make
+ use of ARM AMBA devices present on the system using the VFIO
+ framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/platform/Makefile b/drivers/vfio/platform/Makefile
index 279862b..1957170 100644
--- a/drivers/vfio/platform/Makefile
+++ b/drivers/vfio/platform/Makefile
@@ -2,3 +2,7 @@
 vfio-platform-y := vfio_platform.o vfio_platform_common.o
 
 obj-$(CONFIG_VFIO_PLATFORM) += vfio-platform.o
+
+vfio-amba-y := vfio_amba.o
+
+obj-$(CONFIG_VFIO_AMBA) += vfio-amba.o
-- 
2.3.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm