From: Nicolin Chen <[email protected]> VINTF page0 is backed by host memory returned by the kernel via mmap. Instead of trapping and emulating accesses to this region, map it directly into the guest-visible MMIO space.
The VINTF page0 MMIO region is created lazily when the first VCMDQ hardware queue is allocated. When CMDQV or VINTF is disabled, the region is removed. Signed-off-by: Nicolin Chen <[email protected]> Signed-off-by: Shameer Kolothum <[email protected]> --- hw/arm/tegra241-cmdqv.h | 1 + hw/arm/tegra241-cmdqv.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/hw/arm/tegra241-cmdqv.h b/hw/arm/tegra241-cmdqv.h index 139e14b61b..914977c2ef 100644 --- a/hw/arm/tegra241-cmdqv.h +++ b/hw/arm/tegra241-cmdqv.h @@ -38,6 +38,7 @@ typedef struct Tegra241CMDQV { qemu_irq irq; IOMMUFDHWqueue *vcmdq[TEGRA241_CMDQV_MAX_CMDQ]; void *vintf_page0; + MemoryRegion *mr_vintf_page0; /* Register Cache */ uint32_t config; diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c index 17b9552906..ce144add54 100644 --- a/hw/arm/tegra241-cmdqv.c +++ b/hw/arm/tegra241-cmdqv.c @@ -178,6 +178,38 @@ static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size) } } +static void tegra241_cmdqv_guest_unmap_vintf_page0(Tegra241CMDQV *cmdqv) +{ + if (!cmdqv->mr_vintf_page0) { + return; + } + + memory_region_del_subregion(&cmdqv->mmio_cmdqv, cmdqv->mr_vintf_page0); + object_unparent(OBJECT(cmdqv->mr_vintf_page0)); + g_free(cmdqv->mr_vintf_page0); + cmdqv->mr_vintf_page0 = NULL; +} + +static void tegra241_cmdqv_guest_map_vintf_page0(Tegra241CMDQV *cmdqv) +{ + char *name; + + if (cmdqv->mr_vintf_page0) { + return; + } + + name = g_strdup_printf("%s vintf-page0", + memory_region_name(&cmdqv->mmio_cmdqv)); + cmdqv->mr_vintf_page0 = g_malloc0(sizeof(*cmdqv->mr_vintf_page0)); + memory_region_init_ram_device_ptr(cmdqv->mr_vintf_page0, + memory_region_owner(&cmdqv->mmio_cmdqv), + name, VINTF_PAGE_SIZE, + cmdqv->vintf_page0); + memory_region_add_subregion_overlap(&cmdqv->mmio_cmdqv, 0x30000, + cmdqv->mr_vintf_page0, 1); + g_free(name); +} + static void tegra241_cmdqv_free_vcmdq(Tegra241CMDQV *cmdqv, int index) { SMMUv3AccelState *accel = cmdqv->s_accel; @@ -236,6 +268,7 @@ static bool tegra241_cmdqv_setup_vcmdq(Tegra241CMDQV *cmdqv, int index, hw_queue->viommu = viommu; cmdqv->vcmdq[index] = hw_queue; + tegra241_cmdqv_guest_map_vintf_page0(cmdqv); return true; } @@ -371,6 +404,7 @@ static void tegra241_cmdqv_write_vintf(Tegra241CMDQV *cmdqv, hwaddr offset, tegra241_cmdqv_mmap_vintf_page0(cmdqv, errp); cmdqv->vintf_status |= R_VINTF0_STATUS_ENABLE_OK_MASK; } else { + tegra241_cmdqv_guest_unmap_vintf_page0(cmdqv); tegra241_cmdqv_free_all_vcmdq(cmdqv); tegra241_cmdqv_munmap_vintf_page0(cmdqv, errp); cmdqv->vintf_status &= ~R_VINTF0_STATUS_ENABLE_OK_MASK; @@ -411,6 +445,7 @@ static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value, if (value & R_CONFIG_CMDQV_EN_MASK) { cmdqv->status |= R_STATUS_CMDQV_ENABLED_MASK; } else { + tegra241_cmdqv_guest_unmap_vintf_page0(cmdqv); tegra241_cmdqv_free_all_vcmdq(cmdqv); cmdqv->status &= ~R_STATUS_CMDQV_ENABLED_MASK; } -- 2.43.0
