Signed-off-by: Srujana Challa <scha...@marvell.com>
---
drivers/vhost/vdpa.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index bc4a51e4638b..d071c30125aa 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -36,6 +36,11 @@ enum {
#define VHOST_VDPA_IOTLB_BUCKETS 16
+bool vhost_vdpa_noiommu;
+module_param_named(enable_vhost_vdpa_unsafe_noiommu_mode,
+ vhost_vdpa_noiommu, bool, 0644);
+MODULE_PARM_DESC(enable_vhost_vdpa_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU
mode. This mode provides no device isolation, no DMA translation, no host kernel
protection, cannot be used for device assignment to virtual machines, requires RAWIO
permissions, and will taint the kernel. If you do not know what this is for, step away.
(default: false)");
+
struct vhost_vdpa_as {
struct hlist_node hash_link;
struct vhost_iotlb iotlb;
@@ -60,6 +65,7 @@ struct vhost_vdpa {
struct vdpa_iova_range range;
u32 batch_asid;
bool suspended;
+ bool noiommu_en;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -887,6 +893,10 @@ static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (v->noiommu_en)
+ return;
+
if (ops->dma_map) {
ops->dma_unmap(vdpa, asid, map->start, map->size);
} else if (ops->set_map == NULL) {
@@ -980,6 +990,9 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct
vhost_iotlb *iotlb,
if (r)
return r;
+ if (v->noiommu_en)
+ goto skip_map;
+
if (ops->dma_map) {
r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
} else if (ops->set_map) {
@@ -995,6 +1008,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct
vhost_iotlb *iotlb,
return r;
}
+skip_map:
if (!vdpa->use_va)
atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
@@ -1298,6 +1312,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ struct iommu_domain *domain;
const struct bus_type *bus;
int ret;
@@ -1305,6 +1320,14 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
if (ops->set_map || ops->dma_map)
return 0;
+ domain = iommu_get_domain_for_dev(dma_dev);
+ if ((!domain || domain->type == IOMMU_DOMAIN_IDENTITY) &&
+ vhost_vdpa_noiommu && capable(CAP_SYS_RAWIO)) {
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ dev_warn(&v->dev, "Adding kernel taint for noiommu on
device\n");
+ v->noiommu_en = true;
+ return 0;
+ }
bus = dma_dev->bus;
if (!bus)
return -EFAULT;
--
2.25.1