In preparation for supporting protected guests, where guest memory
defaults to being inaccessible to the host, extend our memory protection
mechanisms to support donation of pages from the host to a specific
guest.

Signed-off-by: Will Deacon <w...@kernel.org>
---
 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  1 +
 arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 62 +++++++++++++++++++
 arch/arm64/kvm/hyp/pgtable.c                  |  2 +-
 3 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h 
b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 364432276fe0..b01b5cdb38de 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -69,6 +69,7 @@ int __pkvm_host_reclaim_page(u64 pfn);
 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
 int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu);
+int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu);
 
 bool addr_is_memory(phys_addr_t phys);
 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot 
prot);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c 
b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 2e92be8bb463..d0544259eb01 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -890,6 +890,14 @@ static int guest_ack_share(u64 addr, const struct 
pkvm_mem_transition *tx,
                                              size, PKVM_NOPAGE);
 }
 
+static int guest_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+       u64 size = tx->nr_pages * PAGE_SIZE;
+
+       return __guest_check_page_state_range(tx->completer.guest.vcpu, addr,
+                                             size, PKVM_NOPAGE);
+}
+
 static int guest_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
                                enum kvm_pgtable_prot perms)
 {
@@ -903,6 +911,17 @@ static int guest_complete_share(u64 addr, const struct 
pkvm_mem_transition *tx,
                                      prot, &vcpu->arch.pkvm_memcache);
 }
 
+static int guest_complete_donation(u64 addr, const struct pkvm_mem_transition 
*tx)
+{
+       enum kvm_pgtable_prot prot = pkvm_mkstate(KVM_PGTABLE_PROT_RWX, 
PKVM_PAGE_OWNED);
+       struct kvm_vcpu *vcpu = tx->completer.guest.vcpu;
+       struct kvm_shadow_vm *vm = get_shadow_vm(vcpu);
+       u64 size = tx->nr_pages * PAGE_SIZE;
+
+       return kvm_pgtable_stage2_map(&vm->pgt, addr, size, 
tx->completer.guest.phys,
+                                     prot, &vcpu->arch.pkvm_memcache);
+}
+
 static int check_share(struct pkvm_mem_share *share)
 {
        const struct pkvm_mem_transition *tx = &share->tx;
@@ -1088,6 +1107,9 @@ static int check_donation(struct pkvm_mem_donation 
*donation)
        case PKVM_ID_HYP:
                ret = hyp_ack_donation(completer_addr, tx);
                break;
+       case PKVM_ID_GUEST:
+               ret = guest_ack_donation(completer_addr, tx);
+               break;
        default:
                ret = -EINVAL;
        }
@@ -1122,6 +1144,9 @@ static int __do_donate(struct pkvm_mem_donation *donation)
        case PKVM_ID_HYP:
                ret = hyp_complete_donation(completer_addr, tx);
                break;
+       case PKVM_ID_GUEST:
+               ret = guest_complete_donation(completer_addr, tx);
+               break;
        default:
                ret = -EINVAL;
        }
@@ -1362,6 +1387,43 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct 
kvm_vcpu *vcpu)
        return ret;
 }
 
+int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct kvm_vcpu *vcpu)
+{
+       int ret;
+       u64 host_addr = hyp_pfn_to_phys(pfn);
+       u64 guest_addr = hyp_pfn_to_phys(gfn);
+       struct kvm_shadow_vm *vm = get_shadow_vm(vcpu);
+       struct pkvm_mem_donation donation = {
+               .tx     = {
+                       .nr_pages       = 1,
+                       .initiator      = {
+                               .id     = PKVM_ID_HOST,
+                               .addr   = host_addr,
+                               .host   = {
+                                       .completer_addr = guest_addr,
+                               },
+                       },
+                       .completer      = {
+                               .id     = PKVM_ID_GUEST,
+                               .guest  = {
+                                       .vcpu = vcpu,
+                                       .phys = host_addr,
+                               },
+                       },
+               },
+       };
+
+       host_lock_component();
+       guest_lock_component(vm);
+
+       ret = do_donate(&donation);
+
+       guest_unlock_component(vm);
+       host_unlock_component();
+
+       return ret;
+}
+
 static int hyp_zero_page(phys_addr_t phys)
 {
        void *addr;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index a6676fd14cf9..2069e6833831 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -47,7 +47,7 @@
                                         KVM_PTE_LEAF_ATTR_HI_S2_XN)
 
 #define KVM_INVALID_PTE_OWNER_MASK     GENMASK(9, 2)
-#define KVM_MAX_OWNER_ID               1
+#define KVM_MAX_OWNER_ID               FIELD_MAX(KVM_INVALID_PTE_OWNER_MASK)
 
 struct kvm_pgtable_walk_data {
        struct kvm_pgtable              *pgt;
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to