The current limit for guest CPUID leaves (KVM_MAX_CPUID_ENTRIES, 80)
is reported to be insufficient but before we bump it let's switch to
allocating vcpu->arch.cpuid_entries dynamically. Currenly,
'struct kvm_cpuid_entry2' is 40 bytes so vcpu->arch.cpuid_entries is
3200 bytes which accounts for 1/4 of the whole 'struct kvm_vcpu_arch'
but having it pre-allocated (for all vCPUs which we also pre-allocate)
gives us no benefits.

Signed-off-by: Vitaly Kuznetsov <vkuzn...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/cpuid.c            | 55 ++++++++++++++++++++++++---------
 arch/x86/kvm/x86.c              |  1 +
 3 files changed, 42 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5303dbc5c9bc..0c5f2ca3e838 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -636,7 +636,7 @@ struct kvm_vcpu_arch {
        int halt_request; /* real mode on Intel only */
 
        int cpuid_nent;
-       struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+       struct kvm_cpuid_entry2 *cpuid_entries;
 
        int maxphyaddr;
        int max_tdp_level;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3fd6eec202d7..0ce943a8a39a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -195,6 +195,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 {
        int r, i;
        struct kvm_cpuid_entry *cpuid_entries = NULL;
+       struct kvm_cpuid_entry2 *cpuid_entries2 = NULL;
 
        r = -E2BIG;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
@@ -207,31 +208,42 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
                        r = PTR_ERR(cpuid_entries);
                        goto out;
                }
+               cpuid_entries2 = kvmalloc_array(cpuid->nent, 
sizeof(cpuid_entries2[0]),
+                                               GFP_KERNEL_ACCOUNT);
+               if (!cpuid_entries2) {
+                       r = -ENOMEM;
+                       goto out_free_cpuid;
+               }
        }
        for (i = 0; i < cpuid->nent; i++) {
-               vcpu->arch.cpuid_entries[i].function = 
cpuid_entries[i].function;
-               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
-               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
-               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
-               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
-               vcpu->arch.cpuid_entries[i].index = 0;
-               vcpu->arch.cpuid_entries[i].flags = 0;
-               vcpu->arch.cpuid_entries[i].padding[0] = 0;
-               vcpu->arch.cpuid_entries[i].padding[1] = 0;
-               vcpu->arch.cpuid_entries[i].padding[2] = 0;
+               cpuid_entries2[i].function = cpuid_entries[i].function;
+               cpuid_entries2[i].eax = cpuid_entries[i].eax;
+               cpuid_entries2[i].ebx = cpuid_entries[i].ebx;
+               cpuid_entries2[i].ecx = cpuid_entries[i].ecx;
+               cpuid_entries2[i].edx = cpuid_entries[i].edx;
+               cpuid_entries2[i].index = 0;
+               cpuid_entries2[i].flags = 0;
+               cpuid_entries2[i].padding[0] = 0;
+               cpuid_entries2[i].padding[1] = 0;
+               cpuid_entries2[i].padding[2] = 0;
        }
+       kvfree(vcpu->arch.cpuid_entries);
+       vcpu->arch.cpuid_entries = cpuid_entries2;
        vcpu->arch.cpuid_nent = cpuid->nent;
+
        r = kvm_check_cpuid(vcpu);
        if (r) {
+               kvfree(vcpu->arch.cpuid_entries);
+               vcpu->arch.cpuid_entries = NULL;
                vcpu->arch.cpuid_nent = 0;
-               kvfree(cpuid_entries);
-               goto out;
+               goto out_free_cpuid;
        }
 
        cpuid_fix_nx_cap(vcpu);
        kvm_update_cpuid_runtime(vcpu);
        kvm_vcpu_after_set_cpuid(vcpu);
 
+out_free_cpuid:
        kvfree(cpuid_entries);
 out:
        return r;
@@ -241,18 +253,31 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
                              struct kvm_cpuid2 *cpuid,
                              struct kvm_cpuid_entry2 __user *entries)
 {
+       struct kvm_cpuid_entry2 *cpuid_entries2 = NULL;
        int r;
 
        r = -E2BIG;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
                goto out;
        r = -EFAULT;
-       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
-               goto out;
+
+       if (cpuid->nent) {
+               cpuid_entries2 = vmemdup_user(entries,
+                                             
array_size(sizeof(cpuid_entries2[0]),
+                                                        cpuid->nent));
+               if (IS_ERR(cpuid_entries2)) {
+                       r = PTR_ERR(cpuid_entries2);
+                       goto out;
+               }
+       }
+       kvfree(vcpu->arch.cpuid_entries);
+       vcpu->arch.cpuid_entries = cpuid_entries2;
        vcpu->arch.cpuid_nent = cpuid->nent;
+
        r = kvm_check_cpuid(vcpu);
        if (r) {
+               kvfree(vcpu->arch.cpuid_entries);
+               vcpu->arch.cpuid_entries = NULL;
                vcpu->arch.cpuid_nent = 0;
                goto out;
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1994602a0851..42259a6ec1d8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9610,6 +9610,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        kvm_mmu_destroy(vcpu);
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        free_page((unsigned long)vcpu->arch.pio_data);
+       kvfree(vcpu->arch.cpuid_entries);
        if (!lapic_in_kernel(vcpu))
                static_key_slow_dec(&kvm_no_apic_vcpu);
 }
-- 
2.25.4

Reply via email to