Signed-off-by: Mukesh Rathor <[email protected]>
---
drivers/hv/mshv_root_main.c | 20 ++++----------------
1 file changed, 4 insertions(+), 16 deletions(-)
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 27313419828d..03f3aa9f5541 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -1258,16 +1258,8 @@ static int mshv_prepare_pinned_region(struct
mshv_mem_region *region)
}
/*
- * This maps two things: guest RAM and for pci passthru mmio space.
- *
- * mmio:
- * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
- * - Two things need to happen for mapping mmio range:
- * 1. mapped in the uaddr so VMM can access it.
- * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
- *
- * This function takes care of the second. The first one is managed by vfio,
- * and hence is taken care of via vfio_pci_mmap_fault().
+ * This is called for both user ram and mmio space. The mmio space is not
+ * mapped here, but later during intercept.
*/
static long
mshv_map_user_memory(struct mshv_partition *partition,
@@ -1276,7 +1268,6 @@ mshv_map_user_memory(struct mshv_partition *partition,
struct mshv_mem_region *region;
struct vm_area_struct *vma;
bool is_mmio;
- ulong mmio_pfn;
long ret;
if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
@@ -1286,7 +1277,6 @@ mshv_map_user_memory(struct mshv_partition *partition,
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, mem.userspace_addr);
is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0;
- mmio_pfn = is_mmio ? vma->vm_pgoff : 0;
mmap_read_unlock(current->mm);
if (!vma)
@@ -1313,10 +1303,8 @@ mshv_map_user_memory(struct mshv_partition *partition,
HV_MAP_GPA_NO_ACCESS, NULL);
break;
case MSHV_REGION_TYPE_MMIO:
- ret = hv_call_map_mmio_pages(partition->pt_id,
- region->start_gfn,
- mmio_pfn,
- region->nr_pages);
+ /* mmio mappings are handled later during intercepts */
+ ret = 0;
break;
}