Re: [PATCH 2/5] virtio-iommu: Implement RESV_MEM probe request

2020-05-08 Thread Auger Eric
Hi Jean,
On 5/8/20 11:26 AM, Jean-Philippe Brucker wrote:
> On Thu, May 07, 2020 at 04:31:58PM +0200, Eric Auger wrote:
>> +static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
>> +   uint8_t *buf, size_t free)
>> +{
>> +struct virtio_iommu_probe_resv_mem prop = {};
>> +size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
>> +int i;
>> +
>> +total = size * s->nb_reserved_regions;
>> +
>> +if (total > free) {
>> +return -ENOSPC;
>> +}
>> +
>> +for (i = 0; i < s->nb_reserved_regions; i++) {
>> +prop.head.type = VIRTIO_IOMMU_PROBE_T_RESV_MEM;
> 
> cpu_to_le16
> 
>> +prop.head.length = cpu_to_le64(length);
> 
> same here
> 
>> +prop.subtype = cpu_to_le64(s->reserved_regions[i].type);
> 
> and subtype is a byte

All fixed. Thanks for spotting this.
> 
>> +prop.start = cpu_to_le64(s->reserved_regions[i].low);
>> +prop.end = cpu_to_le64(s->reserved_regions[i].high);
>> +
>> +memcpy(buf, , size);
>> +
>> +trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
>> +  prop.start, prop.end);
>> +buf += size;
>> +}
>> +return total;
>> +}
>> +
>> +/**
>> + * virtio_iommu_probe - Fill the probe request buffer with
>> + * the properties the device is able to return and add a NONE
>> + * property at the end.
>> + */
>> +static int virtio_iommu_probe(VirtIOIOMMU *s,
>> +  struct virtio_iommu_req_probe *req,
>> +  uint8_t *buf)
>> +{
>> +uint32_t ep_id = le32_to_cpu(req->endpoint);
> 
> I think we should check that the endpoint ID is sane even if we're not
> using it at the moment (it is a SHOULD in the spec, and the page size mask
> patch will need the ep anyway).
OK
> 
>> +struct virtio_iommu_probe_property last = {};
>> +size_t free = VIOMMU_PROBE_SIZE - sizeof(last);
> 
> last isn't needed, you can fill the whole probe buffer with valid
> properties
yep. This is a reminder from none prop.
> 
>> +ssize_t count;
>> +
>> +count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
>> +if (count < 0) {
>> +return VIRTIO_IOMMU_S_INVAL;
> 
> indentation?
yes.

Thank you for the review

Eric
> 
> Thanks,
> Jean
> 
>> +}
>> +buf += count;
>> +free -= count;
>> +
>> +/* Fill the rest with zeroes */
>> +memset(buf, 0, free);
>> +
>> +return VIRTIO_IOMMU_S_OK;
>> +}
>> +
>>  static int virtio_iommu_iov_to_req(struct iovec *iov,
>> unsigned int iov_cnt,
>> void *req, size_t req_sz)
>> @@ -407,6 +464,17 @@ virtio_iommu_handle_req(detach)
>>  virtio_iommu_handle_req(map)
>>  virtio_iommu_handle_req(unmap)
>>  
>> +static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
>> + struct iovec *iov,
>> + unsigned int iov_cnt,
>> + uint8_t *buf)
>> +{
>> +struct virtio_iommu_req_probe req;
>> +int ret = virtio_iommu_iov_to_req(iov, iov_cnt, , sizeof(req));
>> +
>> +return ret ? ret : virtio_iommu_probe(s, , buf);
>> +}
>> +
>>  static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
>>  {
>>  VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
>> @@ -452,17 +520,33 @@ static void virtio_iommu_handle_command(VirtIODevice 
>> *vdev, VirtQueue *vq)
>>  case VIRTIO_IOMMU_T_UNMAP:
>>  tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
>>  break;
>> +case VIRTIO_IOMMU_T_PROBE:
>> +{
>> +struct virtio_iommu_req_tail *ptail;
>> +uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
>> +
>> +ptail = (struct virtio_iommu_req_tail *)
>> +(buf + s->config.probe_size);
>> +ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
>> +
>> +sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>> +  buf, s->config.probe_size + sizeof(tail));
>> +g_free(buf);
>> +assert(sz == s->config.probe_size + sizeof(tail));
>> +goto push;
>> +}
>>  default:
>>  tail.status = VIRTIO_IOMMU_S_UNSUPP;
>>  }
>> -qemu_mutex_unlock(>mutex);
>>  
>>  out:
>>  sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>>, sizeof(tail));
>>  assert(sz == sizeof(tail));
>>  
>> -virtqueue_push(vq, elem, sizeof(tail));
>> +push:
>> +qemu_mutex_unlock(>mutex);
>> +virtqueue_push(vq, elem, sz);
>>  virtio_notify(vdev, vq);
>>  g_free(elem);
>>  }
>> @@ -667,6 +751,7 @@ static void virtio_iommu_device_realize(DeviceState 
>> *dev, Error **errp)
>>  s->config.page_size_mask = TARGET_PAGE_MASK;
>>  

Re: [PATCH 2/5] virtio-iommu: Implement RESV_MEM probe request

2020-05-08 Thread Jean-Philippe Brucker
On Thu, May 07, 2020 at 04:31:58PM +0200, Eric Auger wrote:
> +static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
> +   uint8_t *buf, size_t free)
> +{
> +struct virtio_iommu_probe_resv_mem prop = {};
> +size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
> +int i;
> +
> +total = size * s->nb_reserved_regions;
> +
> +if (total > free) {
> +return -ENOSPC;
> +}
> +
> +for (i = 0; i < s->nb_reserved_regions; i++) {
> +prop.head.type = VIRTIO_IOMMU_PROBE_T_RESV_MEM;

cpu_to_le16

> +prop.head.length = cpu_to_le64(length);

same here

> +prop.subtype = cpu_to_le64(s->reserved_regions[i].type);

and subtype is a byte

> +prop.start = cpu_to_le64(s->reserved_regions[i].low);
> +prop.end = cpu_to_le64(s->reserved_regions[i].high);
> +
> +memcpy(buf, , size);
> +
> +trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
> +  prop.start, prop.end);
> +buf += size;
> +}
> +return total;
> +}
> +
> +/**
> + * virtio_iommu_probe - Fill the probe request buffer with
> + * the properties the device is able to return and add a NONE
> + * property at the end.
> + */
> +static int virtio_iommu_probe(VirtIOIOMMU *s,
> +  struct virtio_iommu_req_probe *req,
> +  uint8_t *buf)
> +{
> +uint32_t ep_id = le32_to_cpu(req->endpoint);

I think we should check that the endpoint ID is sane even if we're not
using it at the moment (it is a SHOULD in the spec, and the page size mask
patch will need the ep anyway).

> +struct virtio_iommu_probe_property last = {};
> +size_t free = VIOMMU_PROBE_SIZE - sizeof(last);

last isn't needed, you can fill the whole probe buffer with valid
properties

> +ssize_t count;
> +
> +count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
> +if (count < 0) {
> +return VIRTIO_IOMMU_S_INVAL;

indentation?

Thanks,
Jean

> +}
> +buf += count;
> +free -= count;
> +
> +/* Fill the rest with zeroes */
> +memset(buf, 0, free);
> +
> +return VIRTIO_IOMMU_S_OK;
> +}
> +
>  static int virtio_iommu_iov_to_req(struct iovec *iov,
> unsigned int iov_cnt,
> void *req, size_t req_sz)
> @@ -407,6 +464,17 @@ virtio_iommu_handle_req(detach)
>  virtio_iommu_handle_req(map)
>  virtio_iommu_handle_req(unmap)
>  
> +static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
> + struct iovec *iov,
> + unsigned int iov_cnt,
> + uint8_t *buf)
> +{
> +struct virtio_iommu_req_probe req;
> +int ret = virtio_iommu_iov_to_req(iov, iov_cnt, , sizeof(req));
> +
> +return ret ? ret : virtio_iommu_probe(s, , buf);
> +}
> +
>  static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
>  {
>  VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
> @@ -452,17 +520,33 @@ static void virtio_iommu_handle_command(VirtIODevice 
> *vdev, VirtQueue *vq)
>  case VIRTIO_IOMMU_T_UNMAP:
>  tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
>  break;
> +case VIRTIO_IOMMU_T_PROBE:
> +{
> +struct virtio_iommu_req_tail *ptail;
> +uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
> +
> +ptail = (struct virtio_iommu_req_tail *)
> +(buf + s->config.probe_size);
> +ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
> +
> +sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
> +  buf, s->config.probe_size + sizeof(tail));
> +g_free(buf);
> +assert(sz == s->config.probe_size + sizeof(tail));
> +goto push;
> +}
>  default:
>  tail.status = VIRTIO_IOMMU_S_UNSUPP;
>  }
> -qemu_mutex_unlock(>mutex);
>  
>  out:
>  sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>, sizeof(tail));
>  assert(sz == sizeof(tail));
>  
> -virtqueue_push(vq, elem, sizeof(tail));
> +push:
> +qemu_mutex_unlock(>mutex);
> +virtqueue_push(vq, elem, sz);
>  virtio_notify(vdev, vq);
>  g_free(elem);
>  }
> @@ -667,6 +751,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, 
> Error **errp)
>  s->config.page_size_mask = TARGET_PAGE_MASK;
>  s->config.input_range.end = -1UL;
>  s->config.domain_range.end = 32;
> +s->config.probe_size = VIOMMU_PROBE_SIZE;
>  
>  virtio_add_feature(>features, VIRTIO_RING_F_EVENT_IDX);
>  virtio_add_feature(>features, VIRTIO_RING_F_INDIRECT_DESC);
> @@ -676,6 +761,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, 
> Error 

Re: [PATCH 2/5] virtio-iommu: Implement RESV_MEM probe request

2020-05-08 Thread Auger Eric
Hi Peter,
On 5/7/20 9:40 PM, Peter Xu wrote:
> Hi, Eric,
> 
> On Thu, May 07, 2020 at 04:31:58PM +0200, Eric Auger wrote:
> 
> [...]
> 
>> @@ -452,17 +520,33 @@ static void virtio_iommu_handle_command(VirtIODevice 
>> *vdev, VirtQueue *vq)
>>  case VIRTIO_IOMMU_T_UNMAP:
>>  tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
>>  break;
>> +case VIRTIO_IOMMU_T_PROBE:
>> +{
>> +struct virtio_iommu_req_tail *ptail;
>> +uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
>> +
>> +ptail = (struct virtio_iommu_req_tail *)
>> +(buf + s->config.probe_size);
>> +ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
>> +
>> +sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>> +  buf, s->config.probe_size + sizeof(tail));
>> +g_free(buf);
>> +assert(sz == s->config.probe_size + sizeof(tail));
>> +goto push;
>> +}
>>  default:
>>  tail.status = VIRTIO_IOMMU_S_UNSUPP;
>>  }
>> -qemu_mutex_unlock(>mutex);
>>  
>>  out:
>>  sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>>, sizeof(tail));
>>  assert(sz == sizeof(tail));
>>  
>> -virtqueue_push(vq, elem, sizeof(tail));
>> +push:
>> +qemu_mutex_unlock(>mutex);
> 
> I think we can't move this unlock to here because otherwise "goto out" could
> potentially try to unlock it without locked first.  Thanks,
You're right. I will revisit that.

Thanks!

Eric
> 
>> +virtqueue_push(vq, elem, sz);
>>  virtio_notify(vdev, vq);
>>  g_free(elem);
>>  }
> 




Re: [PATCH 2/5] virtio-iommu: Implement RESV_MEM probe request

2020-05-07 Thread Peter Xu
Hi, Eric,

On Thu, May 07, 2020 at 04:31:58PM +0200, Eric Auger wrote:

[...]

> @@ -452,17 +520,33 @@ static void virtio_iommu_handle_command(VirtIODevice 
> *vdev, VirtQueue *vq)
>  case VIRTIO_IOMMU_T_UNMAP:
>  tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
>  break;
> +case VIRTIO_IOMMU_T_PROBE:
> +{
> +struct virtio_iommu_req_tail *ptail;
> +uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
> +
> +ptail = (struct virtio_iommu_req_tail *)
> +(buf + s->config.probe_size);
> +ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
> +
> +sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
> +  buf, s->config.probe_size + sizeof(tail));
> +g_free(buf);
> +assert(sz == s->config.probe_size + sizeof(tail));
> +goto push;
> +}
>  default:
>  tail.status = VIRTIO_IOMMU_S_UNSUPP;
>  }
> -qemu_mutex_unlock(>mutex);
>  
>  out:
>  sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
>, sizeof(tail));
>  assert(sz == sizeof(tail));
>  
> -virtqueue_push(vq, elem, sizeof(tail));
> +push:
> +qemu_mutex_unlock(>mutex);

I think we can't move this unlock to here because otherwise "goto out" could
potentially try to unlock it without locked first.  Thanks,

> +virtqueue_push(vq, elem, sz);
>  virtio_notify(vdev, vq);
>  g_free(elem);
>  }

-- 
Peter Xu




[PATCH 2/5] virtio-iommu: Implement RESV_MEM probe request

2020-05-07 Thread Eric Auger
This patch implements the PROBE request. At the moment,
only THE RESV_MEM property is handled. The first goal is
to report iommu wide reserved regions such as the MSI regions
set by the machine code. On x86 this will be the IOAPIC MSI
region, [0xFEE0 - 0xFEEF], on ARM this may be the ITS
doorbell.

In the future we may introduce per device reserved regions.
This will be useful when protecting host assigned devices
which may expose their own reserved regions

Signed-off-by: Eric Auger 
Reviewed-by: Peter Xu 
Reviewed-by: Jean-Philippe Brucker 

---

Previous version was reviewed by Peter and Jean-Philippe. I
simply removed the NONE property and memset the free space
and reworded the commit message. I dared to keep the R-b.
---
 include/hw/virtio/virtio-iommu.h |  2 +
 hw/virtio/virtio-iommu.c | 90 +++-
 hw/virtio/trace-events   |  1 +
 3 files changed, 91 insertions(+), 2 deletions(-)

diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h
index e653004d7c..49eb105cd8 100644
--- a/include/hw/virtio/virtio-iommu.h
+++ b/include/hw/virtio/virtio-iommu.h
@@ -53,6 +53,8 @@ typedef struct VirtIOIOMMU {
 GHashTable *as_by_busptr;
 IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX];
 PCIBus *primary_bus;
+ReservedRegion *reserved_regions;
+uint32_t nb_reserved_regions;
 GTree *domains;
 QemuMutex mutex;
 GTree *endpoints;
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 22ba8848c2..95059eff70 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -38,6 +38,7 @@
 
 /* Max size */
 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
+#define VIOMMU_PROBE_SIZE 512
 
 typedef struct VirtIOIOMMUDomain {
 uint32_t id;
@@ -378,6 +379,62 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s,
 return ret;
 }
 
+static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
+   uint8_t *buf, size_t free)
+{
+struct virtio_iommu_probe_resv_mem prop = {};
+size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
+int i;
+
+total = size * s->nb_reserved_regions;
+
+if (total > free) {
+return -ENOSPC;
+}
+
+for (i = 0; i < s->nb_reserved_regions; i++) {
+prop.head.type = VIRTIO_IOMMU_PROBE_T_RESV_MEM;
+prop.head.length = cpu_to_le64(length);
+prop.subtype = cpu_to_le64(s->reserved_regions[i].type);
+prop.start = cpu_to_le64(s->reserved_regions[i].low);
+prop.end = cpu_to_le64(s->reserved_regions[i].high);
+
+memcpy(buf, , size);
+
+trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
+  prop.start, prop.end);
+buf += size;
+}
+return total;
+}
+
+/**
+ * virtio_iommu_probe - Fill the probe request buffer with
+ * the properties the device is able to return and add a NONE
+ * property at the end.
+ */
+static int virtio_iommu_probe(VirtIOIOMMU *s,
+  struct virtio_iommu_req_probe *req,
+  uint8_t *buf)
+{
+uint32_t ep_id = le32_to_cpu(req->endpoint);
+struct virtio_iommu_probe_property last = {};
+size_t free = VIOMMU_PROBE_SIZE - sizeof(last);
+ssize_t count;
+
+count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
+if (count < 0) {
+return VIRTIO_IOMMU_S_INVAL;
+}
+buf += count;
+free -= count;
+
+/* Fill the rest with zeroes */
+memset(buf, 0, free);
+
+return VIRTIO_IOMMU_S_OK;
+}
+
 static int virtio_iommu_iov_to_req(struct iovec *iov,
unsigned int iov_cnt,
void *req, size_t req_sz)
@@ -407,6 +464,17 @@ virtio_iommu_handle_req(detach)
 virtio_iommu_handle_req(map)
 virtio_iommu_handle_req(unmap)
 
+static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt,
+ uint8_t *buf)
+{
+struct virtio_iommu_req_probe req;
+int ret = virtio_iommu_iov_to_req(iov, iov_cnt, , sizeof(req));
+
+return ret ? ret : virtio_iommu_probe(s, , buf);
+}
+
 static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
 {
 VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
@@ -452,17 +520,33 @@ static void virtio_iommu_handle_command(VirtIODevice 
*vdev, VirtQueue *vq)
 case VIRTIO_IOMMU_T_UNMAP:
 tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
 break;
+case VIRTIO_IOMMU_T_PROBE:
+{
+struct virtio_iommu_req_tail *ptail;
+uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
+
+ptail = (struct virtio_iommu_req_tail *)
+(buf + s->config.probe_size);
+ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt,