From: "Yingshiuan Pan" <yingshiuan....@mediatek.com>

The memory protection mechanism performs better with batch operations on
memory pages. To leverage this, we pre-allocate memory for VMs that are
set to protected mode. As a result, the memory protection mechanism can
proactively protect the pre-allocated memory in advance through batch
operations, leading to improved performance during VM booting.

Signed-off-by: Yingshiuan Pan <yingshiuan....@mediatek.com>
Signed-off-by: Jerry Wang <ze-yu.w...@mediatek.com>
Signed-off-by: Liju Chen <liju-clr.c...@mediatek.com>
Signed-off-by: Yi-De Wu <yi-de...@mediatek.com>
---
 arch/arm64/geniezone/vm.c         | 146 ++++++++++++++++++++++++++++++
 drivers/virt/geniezone/Makefile   |   3 +-
 drivers/virt/geniezone/gzvm_mmu.c | 108 ++++++++++++++++++++++
 3 files changed, 256 insertions(+), 1 deletion(-)
 create mode 100644 drivers/virt/geniezone/gzvm_mmu.c

diff --git a/arch/arm64/geniezone/vm.c b/arch/arm64/geniezone/vm.c
index 02f94c86fbf1..e669a63ccac3 100644
--- a/arch/arm64/geniezone/vm.c
+++ b/arch/arm64/geniezone/vm.c
@@ -156,6 +156,122 @@ static int gzvm_vm_ioctl_get_pvmfw_size(struct gzvm *gzvm,
        return 0;
 }
 
+/**
+ * fill_constituents() - Populate pa to buffer until full
+ * @consti: Pointer to struct mem_region_addr_range.
+ * @consti_cnt: Constituent count.
+ * @max_nr_consti: Maximum number of constituent count.
+ * @gfn: Guest frame number.
+ * @total_pages: Total page numbers.
+ * @slot: Pointer to struct gzvm_memslot.
+ *
+ * Return: how many pages we've fill in, negative if error
+ */
+static int fill_constituents(struct mem_region_addr_range *consti,
+                            int *consti_cnt, int max_nr_consti, u64 gfn,
+                            u32 total_pages, struct gzvm_memslot *slot)
+{
+       u64 pfn, prev_pfn, gfn_end;
+       int nr_pages = 1;
+       int i = 0;
+
+       if (unlikely(total_pages == 0))
+               return -EINVAL;
+       gfn_end = gfn + total_pages;
+
+       /* entry 0 */
+       if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
+               return -EFAULT;
+       consti[0].address = PFN_PHYS(pfn);
+       consti[0].pg_cnt = 1;
+       gfn++;
+       prev_pfn = pfn;
+
+       while (i < max_nr_consti && gfn < gfn_end) {
+               if (gzvm_gfn_to_pfn_memslot(slot, gfn, &pfn) != 0)
+                       return -EFAULT;
+               if (pfn == (prev_pfn + 1)) {
+                       consti[i].pg_cnt++;
+               } else {
+                       i++;
+                       if (i >= max_nr_consti)
+                               break;
+                       consti[i].address = PFN_PHYS(pfn);
+                       consti[i].pg_cnt = 1;
+               }
+               prev_pfn = pfn;
+               gfn++;
+               nr_pages++;
+       }
+       if (i != max_nr_consti)
+               i++;
+       *consti_cnt = i;
+
+       return nr_pages;
+}
+
+/**
+ * populate_mem_region() - Iterate all mem slot and populate pa to buffer 
until it's full
+ * @gzvm: Pointer to struct gzvm.
+ *
+ * Return: 0 if it is successful, negative if error
+ */
+static int populate_mem_region(struct gzvm *gzvm)
+{
+       int slot_cnt = 0;
+
+       while (slot_cnt < GZVM_MAX_MEM_REGION && gzvm->memslot[slot_cnt].npages 
!= 0) {
+               struct gzvm_memslot *memslot = &gzvm->memslot[slot_cnt];
+               struct gzvm_memory_region_ranges *region;
+               int max_nr_consti, remain_pages;
+               u64 gfn, gfn_end;
+               u32 buf_size;
+
+               buf_size = PAGE_SIZE * 2;
+               region = alloc_pages_exact(buf_size, GFP_KERNEL);
+               if (!region)
+                       return -ENOMEM;
+
+               max_nr_consti = (buf_size - sizeof(*region)) /
+                               sizeof(struct mem_region_addr_range);
+
+               region->slot = memslot->slot_id;
+               remain_pages = memslot->npages;
+               gfn = memslot->base_gfn;
+               gfn_end = gfn + remain_pages;
+
+               while (gfn < gfn_end) {
+                       int nr_pages;
+
+                       nr_pages = fill_constituents(region->constituents,
+                                                    &region->constituent_cnt,
+                                                    max_nr_consti, gfn,
+                                                    remain_pages, memslot);
+
+                       if (nr_pages < 0) {
+                               pr_err("Failed to fill constituents\n");
+                               free_pages_exact(region, buf_size);
+                               return -EFAULT;
+                       }
+
+                       region->gpa = PFN_PHYS(gfn);
+                       region->total_pages = nr_pages;
+                       remain_pages -= nr_pages;
+                       gfn += nr_pages;
+
+                       if (gzvm_arch_set_memregion(gzvm->vm_id, buf_size,
+                                                   virt_to_phys(region))) {
+                               pr_err("Failed to register memregion to 
hypervisor\n");
+                               free_pages_exact(region, buf_size);
+                               return -EFAULT;
+                       }
+               }
+               free_pages_exact(region, buf_size);
+               ++slot_cnt;
+       }
+       return 0;
+}
+
 /**
  * gzvm_vm_ioctl_cap_pvm() - Proceed GZVM_CAP_PROTECTED_VM's subcommands
  * @gzvm: Pointer to struct gzvm.
@@ -177,6 +293,11 @@ static int gzvm_vm_ioctl_cap_pvm(struct gzvm *gzvm,
        case GZVM_CAP_PVM_SET_PVMFW_GPA:
                fallthrough;
        case GZVM_CAP_PVM_SET_PROTECTED_VM:
+               /*
+                * To improve performance for protected VM, we have to populate 
VM's memory
+                * before VM booting
+                */
+               populate_mem_region(gzvm);
                ret = gzvm_vm_arch_enable_cap(gzvm, cap, &res);
                return ret;
        case GZVM_CAP_PVM_GET_PVMFW_SIZE:
@@ -205,3 +326,28 @@ int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
 
        return -EINVAL;
 }
+
+/**
+ * gzvm_hva_to_pa_arch() - converts hva to pa with arch-specific way
+ * @hva: Host virtual address.
+ *
+ * Return: GZVM_PA_ERR_BAD for translation error
+ */
+u64 gzvm_hva_to_pa_arch(u64 hva)
+{
+       unsigned long flags;
+       u64 par;
+
+       local_irq_save(flags);
+       asm volatile("at s1e1r, %0" :: "r" (hva));
+       isb();
+       par = read_sysreg_par();
+       local_irq_restore(flags);
+
+       if (par & SYS_PAR_EL1_F)
+               return GZVM_PA_ERR_BAD;
+       par = par & PAR_PA47_MASK;
+       if (!par)
+               return GZVM_PA_ERR_BAD;
+       return par;
+}
diff --git a/drivers/virt/geniezone/Makefile b/drivers/virt/geniezone/Makefile
index 25614ea3dea2..59fc4510a843 100644
--- a/drivers/virt/geniezone/Makefile
+++ b/drivers/virt/geniezone/Makefile
@@ -6,4 +6,5 @@
 
 GZVM_DIR ?= ../../../drivers/virt/geniezone
 
-gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o
+gzvm-y := $(GZVM_DIR)/gzvm_main.o $(GZVM_DIR)/gzvm_vm.o \
+         $(GZVM_DIR)/gzvm_mmu.o
diff --git a/drivers/virt/geniezone/gzvm_mmu.c 
b/drivers/virt/geniezone/gzvm_mmu.c
new file mode 100644
index 000000000000..f24fa7e6d975
--- /dev/null
+++ b/drivers/virt/geniezone/gzvm_mmu.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+
+#include <linux/gzvm_drv.h>
+
+/**
+ * hva_to_pa_fast() - converts hva to pa in generic fast way
+ * @hva: Host virtual address.
+ *
+ * Return: GZVM_PA_ERR_BAD for translation error
+ */
+u64 hva_to_pa_fast(u64 hva)
+{
+       struct page *page[1];
+       u64 pfn;
+
+       if (get_user_page_fast_only(hva, 0, page)) {
+               pfn = page_to_phys(page[0]);
+               put_page(page[0]);
+               return pfn;
+       }
+       return GZVM_PA_ERR_BAD;
+}
+
+/**
+ * hva_to_pa_slow() - converts hva to pa in a slow way
+ * @hva: Host virtual address
+ *
+ * This function converts HVA to PA in a slow way because the target hva is not
+ * yet allocated and mapped in the host stage1 page table, we cannot find it
+ * directly from current page table.
+ * Thus, we have to allocate it and this operation is much slower than directly
+ * find via current page table.
+ *
+ * Context: This function may sleep
+ * Return: PA or GZVM_PA_ERR_BAD for translation error
+ */
+u64 hva_to_pa_slow(u64 hva)
+{
+       struct page *page = NULL;
+       u64 pfn = 0;
+       int npages;
+
+       npages = get_user_pages_unlocked(hva, 1, &page, 0);
+       if (npages != 1)
+               return GZVM_PA_ERR_BAD;
+
+       if (page) {
+               pfn = page_to_phys(page);
+               put_page(page);
+               return pfn;
+       }
+
+       return GZVM_PA_ERR_BAD;
+}
+
+static u64 __gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn)
+{
+       u64 hva, pa;
+
+       hva = gzvm_gfn_to_hva_memslot(memslot, gfn);
+
+       pa = gzvm_hva_to_pa_arch(hva);
+       if (pa != GZVM_PA_ERR_BAD)
+               return PHYS_PFN(pa);
+
+       pa = hva_to_pa_fast(hva);
+       if (pa != GZVM_PA_ERR_BAD)
+               return PHYS_PFN(pa);
+
+       pa = hva_to_pa_slow(hva);
+       if (pa != GZVM_PA_ERR_BAD)
+               return PHYS_PFN(pa);
+
+       return GZVM_PA_ERR_BAD;
+}
+
+/**
+ * gzvm_gfn_to_pfn_memslot() - Translate gfn (guest ipa) to pfn (host pa),
+ *                            result is in @pfn
+ * @memslot: Pointer to struct gzvm_memslot.
+ * @gfn: Guest frame number.
+ * @pfn: Host page frame number.
+ *
+ * Return:
+ * * 0                 - Succeed
+ * * -EFAULT           - Failed to convert
+ */
+int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn,
+                           u64 *pfn)
+{
+       u64 __pfn;
+
+       if (!memslot)
+               return -EFAULT;
+
+       __pfn = __gzvm_gfn_to_pfn_memslot(memslot, gfn);
+       if (__pfn == GZVM_PA_ERR_BAD) {
+               *pfn = 0;
+               return -EFAULT;
+       }
+
+       *pfn = __pfn;
+
+       return 0;
+}
-- 
2.18.0


Reply via email to