From: Ackerley Tng <[email protected]> Override the architecture-specific guest_memfd content mode functions for x86 to provide support for KVM_X86_SW_PROTECTED_VM.
For software-protected VMs, specify KVM_SET_MEMORY_ATTRIBUTES2_ZERO and KVM_SET_MEMORY_ATTRIBUTES2_PRESERVE as supported content modes. Implement the logic for these modes as follows: + ZERO: Zero out the memory using the generic guest_memfd helper. + PRESERVE: Maintain the existing memory content without modification. + UNSPECIFIED: KVM_X86_SW_PROTECTED_VM is guest_memfd's testing vehicle. Scramble the memory range by filling it with random bytes so test behavior will differ from that of PRESERVE. Signed-off-by: Ackerley Tng <[email protected]> --- arch/x86/kvm/x86.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6609957ecfea3..e8abff71001eb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -14194,6 +14194,99 @@ void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) kvm_x86_call(gmem_invalidate)(start, end); } #endif + +u64 kvm_arch_gmem_supported_content_modes(struct kvm *kvm, bool to_private) +{ + if (!kvm) { + return KVM_SET_MEMORY_ATTRIBUTES2_ZERO | + KVM_SET_MEMORY_ATTRIBUTES2_PRESERVE; + } + + switch (kvm->arch.vm_type) { + case KVM_X86_SW_PROTECTED_VM: + return KVM_SET_MEMORY_ATTRIBUTES2_ZERO | + KVM_SET_MEMORY_ATTRIBUTES2_PRESERVE; + default: + return 0; + } +} + +int kvm_arch_gmem_apply_content_mode_zero(struct kvm *kvm, struct inode *inode, + pgoff_t start, pgoff_t end) +{ + switch (kvm->arch.vm_type) { + case KVM_X86_SW_PROTECTED_VM: + return kvm_gmem_apply_content_mode_zero(inode, start, end); + default: + return 0; + } +} + +int kvm_arch_gmem_apply_content_mode_preserve(struct kvm *kvm, + struct inode *inode, + pgoff_t start, pgoff_t end) +{ + switch (kvm->arch.vm_type) { + case KVM_X86_SW_PROTECTED_VM: + /* Do nothing to preserve content. */ + return 0; + default: + /* Not a valid content mode for other types, so do nothing. */ + return 0; + } +} + +static int __scramble_range(struct inode *inode, pgoff_t start, pgoff_t end) +{ + struct address_space *mapping = inode->i_mapping; + struct folio_batch fbatch; + struct folio *f; + char *kaddr; + int ret = 0; + int i; + + folio_batch_init(&fbatch); + while (!ret && filemap_get_folios(mapping, &start, end - 1, &fbatch)) { + for (i = 0; !ret && i < folio_batch_count(&fbatch); ++i) { + f = fbatch.folios[i]; + + folio_lock(f); + + if (folio_test_hwpoison(f)) { + ret = -EHWPOISON; + } else { + /* + * Hard-coding range to scramble since + * guest_memfd only supports PAGE_SIZE + * folios now. + */ + kaddr = kmap_local_folio(f, 0); + get_random_bytes(kaddr, PAGE_SIZE); + kunmap_local(kaddr); + } + + folio_unlock(f); + } + + folio_batch_release(&fbatch); + cond_resched(); + } + + return ret; +} + +int kvm_arch_gmem_apply_content_mode_unspecified(struct kvm *kvm, + struct inode *inode, + pgoff_t start, pgoff_t end) +{ + switch (kvm->arch.vm_type) { + case KVM_X86_SW_PROTECTED_VM: + return __scramble_range(inode, start, end); + default: + return 0; + } +} + #endif int kvm_spec_ctrl_test_value(u64 value) -- 2.54.0.545.g6539524ca2-goog
