From: Sagi Shahar <sa...@google.com> This should mostly match the logic in sev_vm_move_enc_context_from.
Signed-off-by: Sagi Shahar <sa...@google.com> Signed-off-by: Ryan Afranji <afra...@google.com> --- arch/x86/kvm/vmx/main.c | 12 +++++++++++- arch/x86/kvm/vmx/tdx.c | 24 ++++++++++++++++++++++++ arch/x86/kvm/vmx/x86_ops.h | 1 + 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index d1e02e567b57..125af25fd09a 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -879,6 +879,14 @@ static int vt_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn) return 0; } +static int vt_move_enc_context_from(struct kvm *kvm, struct kvm *source_kvm) +{ + if (!is_td(kvm)) + return -ENOTTY; + + return tdx_vm_move_enc_context_from(kvm, source_kvm); +} + #define vt_op(name) vt_##name #define vt_op_tdx_only(name) vt_##name #else /* CONFIG_KVM_INTEL_TDX */ @@ -1044,7 +1052,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .mem_enc_ioctl = vt_op_tdx_only(mem_enc_ioctl), .vcpu_mem_enc_ioctl = vt_op_tdx_only(vcpu_mem_enc_ioctl), - .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level) + .private_max_mapping_level = vt_op_tdx_only(gmem_private_max_mapping_level), + + .vm_move_enc_context_from = vt_move_enc_context_from }; struct kvm_x86_init_ops vt_init_ops __initdata = { diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index b952bc673271..07583a11d6e3 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -626,6 +626,7 @@ int tdx_vm_init(struct kvm *kvm) kvm->arch.has_protected_state = true; kvm->arch.has_private_mem = true; kvm->arch.disabled_quirks |= KVM_X86_QUIRK_IGNORE_GUEST_PAT; + kvm->arch.use_vm_enc_ctxt_op = true; /* * Because guest TD is protected, VMM can't parse the instruction in TD. @@ -3524,3 +3525,26 @@ int __init tdx_bringup(void) enable_tdx = 0; return 0; } + +static __always_inline bool tdx_finalized(struct kvm *kvm) +{ + struct kvm_tdx *tdx_kvm = to_kvm_tdx(kvm); + + return tdx_kvm->state == TD_STATE_RUNNABLE; +} + +static int tdx_migrate_from(struct kvm *dst, struct kvm *src) +{ + return -EINVAL; +} + +int tdx_vm_move_enc_context_from(struct kvm *kvm, struct kvm *src_kvm) +{ + if (!is_td(kvm) || !is_td(src_kvm)) + return -EINVAL; + + if (tdx_finalized(kvm) || !tdx_finalized(src_kvm)) + return -EINVAL; + + return tdx_migrate_from(kvm, src_kvm); +} diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index b4596f651232..001f1540a560 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -164,6 +164,7 @@ void tdx_flush_tlb_current(struct kvm_vcpu *vcpu); void tdx_flush_tlb_all(struct kvm_vcpu *vcpu); void tdx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); int tdx_gmem_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); +int tdx_vm_move_enc_context_from(struct kvm *kvm, struct kvm *source_kvm); #endif #endif /* __KVM_X86_VMX_X86_OPS_H */ -- 2.50.0.rc1.591.g9c95f17f64-goog