Rename PML_ENTITY_NUM to PML_LOG_NR_ENTRIES
Add PML_HEAD_INDEX to specify the first entry that CPU writes.

No functional change intended.

Suggested-by: Sean Christopherson <sea...@google.com>
Signed-off-by: Maxim Levitsky <mlevi...@redhat.com>
---
 arch/x86/kvm/vmx/main.c   |  2 +-
 arch/x86/kvm/vmx/nested.c |  2 +-
 arch/x86/kvm/vmx/vmx.c    | 10 +++++-----
 arch/x86/kvm/vmx/vmx.h    |  5 ++++-
 4 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 92d35cc6cd15..0c0ea786a947 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -126,7 +126,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .check_intercept = vmx_check_intercept,
        .handle_exit_irqoff = vmx_handle_exit_irqoff,
 
-       .cpu_dirty_log_size = PML_ENTITY_NUM,
+       .cpu_dirty_log_size = PML_LOG_NR_ENTRIES,
        .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
 
        .nested_ops = &vmx_nested_ops,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index aa78b6f38dfe..71e0d887ff9c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3442,7 +3442,7 @@ static int nested_vmx_write_pml_buffer(struct kvm_vcpu 
*vcpu, gpa_t gpa)
        if (!nested_cpu_has_pml(vmcs12))
                return 0;
 
-       if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+       if (vmcs12->guest_pml_index >= PML_LOG_NR_ENTRIES) {
                vmx->nested.pml_full = true;
                return 1;
        }
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0f008f5ef6f0..30fc54eefeb4 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4828,7 +4828,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
 
        if (enable_pml) {
                vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
-               vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+               vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
        }
 
        vmx_write_encls_bitmap(&vmx->vcpu, NULL);
@@ -6216,17 +6216,17 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
        pml_idx = vmcs_read16(GUEST_PML_INDEX);
 
        /* Do nothing if PML buffer is empty */
-       if (pml_idx == (PML_ENTITY_NUM - 1))
+       if (pml_idx == PML_HEAD_INDEX)
                return;
 
        /* PML index always points to next available PML buffer entity */
-       if (pml_idx >= PML_ENTITY_NUM)
+       if (pml_idx >= PML_LOG_NR_ENTRIES)
                pml_idx = 0;
        else
                pml_idx++;
 
        pml_buf = page_address(vmx->pml_pg);
-       for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
+       for (; pml_idx < PML_LOG_NR_ENTRIES; pml_idx++) {
                u64 gpa;
 
                gpa = pml_buf[pml_idx];
@@ -6235,7 +6235,7 @@ static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
        }
 
        /* reset PML index */
-       vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+       vmcs_write16(GUEST_PML_INDEX, PML_HEAD_INDEX);
 }
 
 static void vmx_dump_sel(char *name, uint32_t sel)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 43f573f6ca46..83ca5c574704 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -330,7 +330,10 @@ struct vcpu_vmx {
        bool ple_window_dirty;
 
        /* Support for PML */
-#define PML_ENTITY_NUM         512
+#define PML_LOG_NR_ENTRIES     512
+       /* PML is written backwards: this is the first entry written by the CPU 
*/
+#define PML_HEAD_INDEX         (PML_LOG_NR_ENTRIES-1)
+
        struct page *pml_pg;
 
        /* apic deadline value in host tsc */
-- 
2.26.3


Reply via email to