Gunyah hypervisor supports several APIs for a host VM to assign some of its memory to the VM being created.
Lend - assigned memory is made private to VM (host loses access) Share - assigned memory is shared between host and guest VM No APIs exist however, at this time, for a protected VM to share some of its (private) memory with host at runtime. Since, in practice, even a protected VM may need some shared memory to exchange information with its host, we split VM's memory into two portions - one that is kept private (via Lend API) and other that is shared with host (via Share API). Shared portion size is specified via 'preshmem-size' property of VM. Note: Gunyah kernel driver from Android common kernel repository exposes two separate ioctls for lend and share operations [1]. A subsequent version of kernel driver that will be merged in Linux kernel repository will combine the two ioctls into one, when this patch will be updated. 1. Refer GH_VM_ANDROID_LEND_USER_MEM and GH_VM_SET_USER_MEM_REGION in https://android.googlesource.com/kernel/common/+/refs/heads/android14-6.1/drivers/virt/gunyah/vm_mgr.c Signed-off-by: Srivatsa Vaddagiri <quic_svadd...@quicinc.com> --- accel/gunyah/gunyah-all.c | 248 ++++++++++++++++++++++++++++++++++++ include/sysemu/gunyah_int.h | 25 ++++ 2 files changed, 273 insertions(+) diff --git a/accel/gunyah/gunyah-all.c b/accel/gunyah/gunyah-all.c index 370add75f9..8b6b2d2017 100644 --- a/accel/gunyah/gunyah-all.c +++ b/accel/gunyah/gunyah-all.c @@ -14,12 +14,20 @@ #include <sys/ioctl.h> #include "qemu/osdep.h" #include "qemu/typedefs.h" +#include "qemu/units.h" #include "hw/core/cpu.h" #include "sysemu/cpus.h" #include "sysemu/gunyah.h" #include "sysemu/gunyah_int.h" #include "linux-headers/linux/gunyah.h" +#include "exec/memory.h" #include "qemu/error-report.h" +#include "exec/address-spaces.h" + +static void gunyah_region_add(MemoryListener *listener, + MemoryRegionSection *section); +static void gunyah_region_del(MemoryListener *listener, + MemoryRegionSection *section); static int gunyah_ioctl(int type, ...) { @@ -36,9 +44,32 @@ static int gunyah_ioctl(int type, ...) return ioctl(s->fd, type, arg); } +int gunyah_vm_ioctl(int type, ...) +{ + void *arg; + va_list ap; + GUNYAHState *s = GUNYAH_STATE(current_accel()); + + assert(s->vmfd); + + va_start(ap, type); + arg = va_arg(ap, void *); + va_end(ap); + + return ioctl(s->vmfd, type, arg); +} + +static MemoryListener gunyah_memory_listener = { + .name = "gunyah", + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, + .region_add = gunyah_region_add, + .region_del = gunyah_region_del, +}; + int gunyah_create_vm(void) { GUNYAHState *s; + int i; s = GUNYAH_STATE(current_accel()); @@ -55,9 +86,226 @@ int gunyah_create_vm(void) exit(1); } + qemu_mutex_init(&s->slots_lock); + s->nr_slots = GUNYAH_MAX_MEM_SLOTS; + for (i = 0; i < s->nr_slots; ++i) { + s->slots[i].start = 0; + s->slots[i].size = 0; + s->slots[i].id = i; + } + + memory_listener_register(&gunyah_memory_listener, &address_space_memory); return 0; } +#define gunyah_slots_lock(s) qemu_mutex_lock(&s->slots_lock) +#define gunyah_slots_unlock(s) qemu_mutex_unlock(&s->slots_lock) + +static gunyah_slot *gunyah_find_overlap_slot(GUNYAHState *s, + uint64_t start, uint64_t size) +{ + gunyah_slot *slot; + int i; + + for (i = 0; i < s->nr_slots; ++i) { + slot = &s->slots[i]; + if (slot->size && start < (slot->start + slot->size) && + (start + size) > slot->start) { + return slot; + } + } + + return NULL; +} + +/* Called with s->slots_lock held */ +static gunyah_slot *gunyah_get_free_slot(GUNYAHState *s) +{ + int i; + + for (i = 0; i < s->nr_slots; i++) { + if (s->slots[i].size == 0) { + return &s->slots[i]; + } + } + + return NULL; +} + +static void gunyah_add_mem(GUNYAHState *s, MemoryRegionSection *section, + bool lend, enum gh_mem_flags flags) +{ + gunyah_slot *slot; + MemoryRegion *area = section->mr; + struct gh_userspace_memory_region gumr; + int ret; + + slot = gunyah_get_free_slot(s); + if (!slot) { + error_report("No free slots to add memory!"); + exit(1); + } + + slot->size = int128_get64(section->size); + slot->mem = memory_region_get_ram_ptr(area) + section->offset_within_region; + slot->start = section->offset_within_address_space; + slot->lend = lend; + + gumr.label = slot->id; + gumr.flags = flags; + gumr.guest_phys_addr = slot->start; + gumr.memory_size = slot->size; + gumr.userspace_addr = (__u64) slot->mem; + + /* + * GH_VM_ANDROID_LEND_USER_MEM is temporary, until + * GH_VM_SET_USER_MEM_REGION is enhanced to support lend option also. + */ + if (lend) { + ret = gunyah_vm_ioctl(GH_VM_ANDROID_LEND_USER_MEM, &gumr); + } else { + ret = gunyah_vm_ioctl(GH_VM_SET_USER_MEM_REGION, &gumr); + } + + if (ret) { + error_report("failed to add mem (%s)", strerror(errno)); + exit(1); + } +} + +/* + * Check if memory of a protected VM needs to be split into two portions - one + * private to it and other shared with host. + */ +static bool split_mem(GUNYAHState *s, + MemoryRegion *area, MemoryRegionSection *section) +{ + bool writeable = !area->readonly && !area->rom_device; + + /* + * Do not split if its not a protected VM OR if the shared mem size is not + * specified. + */ + if (!s->is_protected_vm || !s->preshmem_size) { + return false; + } + + /* Split only memory that can be written to by guest */ + if (!memory_region_is_ram(area) || !writeable) { + return false; + } + + /* Have we reserved already? */ + if (qatomic_read(&s->preshmem_reserved)) { + return false; + } + + /* Do we have enough available memory? */ + if (section->size <= s->preshmem_size) { + return false; + } + + return true; +} + +static void gunyah_set_phys_mem(GUNYAHState *s, + MemoryRegionSection *section, bool add) +{ + MemoryRegion *area = section->mr; + bool writable = !area->readonly && !area->rom_device; + enum gh_mem_flags flags = 0; + uint64_t page_size = qemu_real_host_page_size(); + MemoryRegionSection mrs = *section; + bool lend = s->is_protected_vm, split = false; + struct gunyah_slot *slot; + + /* + * Gunyah hypervisor, at this time, does not support mapping memory + * at low address (< 1GiB). Below code will be updated once + * that limitation is addressed. + */ + if (section->offset_within_address_space < GiB) { + return; + } + + if (!memory_region_is_ram(area)) { + if (writable) { + return; + } else if (!memory_region_is_romd(area)) { + /* + * If the memory device is not in romd_mode, then we actually want + * to remove the gunyah memory slot so all accesses will trap. + */ + add = false; + } + } + + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + error_report("Not page aligned"); + add = false; + } + + gunyah_slots_lock(s); + + slot = gunyah_find_overlap_slot(s, + section->offset_within_address_space, + int128_get64(section->size)); + + if (!add) { + if (slot) { + error_report("Memory slot removal not yet supported!"); + exit(1); + } + /* Nothing to be done as address range was not previously registered */ + goto done; + } else { + if (slot) { + error_report("Overlapping slot registration not supported!"); + exit(1); + } + } + + if (area->readonly || + (!memory_region_is_ram(area) && memory_region_is_romd(area))) { + flags = GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC; + } else { + flags = GH_MEM_ALLOW_READ | GH_MEM_ALLOW_WRITE | GH_MEM_ALLOW_EXEC; + } + + split = split_mem(s, area, &mrs); + if (split) { + mrs.size -= s->preshmem_size; + gunyah_add_mem(s, &mrs, true, flags); + lend = false; + mrs.offset_within_region += mrs.size; + mrs.offset_within_address_space += mrs.size; + mrs.size = s->preshmem_size; + qatomic_set(&s->preshmem_reserved, true); + } + + gunyah_add_mem(s, &mrs, lend, flags); + +done: + gunyah_slots_unlock(s); +} + +static void gunyah_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + GUNYAHState *s = GUNYAH_STATE(current_accel()); + + gunyah_set_phys_mem(s, section, true); +} + +static void gunyah_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + GUNYAHState *s = GUNYAH_STATE(current_accel()); + + gunyah_set_phys_mem(s, section, false); +} + void *gunyah_cpu_thread_fn(void *arg) { CPUState *cpu = arg; diff --git a/include/sysemu/gunyah_int.h b/include/sysemu/gunyah_int.h index b1fd7f9ea2..17b4ef9920 100644 --- a/include/sysemu/gunyah_int.h +++ b/include/sysemu/gunyah_int.h @@ -13,17 +13,42 @@ #include "qemu/accel.h" #include "qemu/typedefs.h" +#include "qemu/thread.h" + +typedef struct gunyah_slot { + uint64_t start; + uint64_t size; + uint8_t *mem; + uint32_t id; + uint32_t flags; + + /* + * @lend indicates if memory was lent. + * + * This flag is temporarily used until the upstream Gunyah kernel driver + * patches are updated to support indication of lend vs share via flags + * field of GH_SET_USER_MEM_API interface. + */ + bool lend; +} gunyah_slot; + +#define GUNYAH_MAX_MEM_SLOTS 32 struct GUNYAHState { AccelState parent_obj; + QemuMutex slots_lock; + gunyah_slot slots[GUNYAH_MAX_MEM_SLOTS]; + uint32_t nr_slots; int fd; int vmfd; bool is_protected_vm; + bool preshmem_reserved; uint32_t preshmem_size; }; int gunyah_create_vm(void); +int gunyah_vm_ioctl(int type, ...); void *gunyah_cpu_thread_fn(void *arg); #endif /* GUNYAH_INT_H */ -- 2.25.1