Grab the per-vCPU GPA and number of pages from perf_util in the demand
paging test instead of duplicating perf_util's calculations.

Note, this may or may not result in a functional change.  It's not clear
that the test's calculations are guaranteed to yield the same value as
perf_util, e.g. if guest_percpu_mem_size != vcpu_args->pages.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 .../selftests/kvm/demand_paging_test.c        | 20 +++++--------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/tools/testing/selftests/kvm/demand_paging_test.c 
b/tools/testing/selftests/kvm/demand_paging_test.c
index 5f7a229c3af1..0cbf111e6c21 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -294,24 +294,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
 
                for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
-                       vm_paddr_t vcpu_gpa;
+                       struct perf_test_vcpu_args *vcpu_args;
                        void *vcpu_hva;
-                       uint64_t vcpu_mem_size;
 
-
-                       if (p->partition_vcpu_memory_access) {
-                               vcpu_gpa = guest_test_phys_mem +
-                                          (vcpu_id * guest_percpu_mem_size);
-                               vcpu_mem_size = guest_percpu_mem_size;
-                       } else {
-                               vcpu_gpa = guest_test_phys_mem;
-                               vcpu_mem_size = guest_percpu_mem_size * 
nr_vcpus;
-                       }
-                       PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, 
%lx)\n",
-                                      vcpu_id, vcpu_gpa, vcpu_gpa + 
vcpu_mem_size);
+                       vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
 
                        /* Cache the HVA pointer of the region */
-                       vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
+                       vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
 
                        /*
                         * Set up user fault fd to handle demand paging
@@ -325,7 +314,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                                                &uffd_handler_threads[vcpu_id],
                                                pipefds[vcpu_id * 2],
                                                p->uffd_delay, 
&uffd_args[vcpu_id],
-                                               vcpu_hva, vcpu_mem_size);
+                                               vcpu_hva,
+                                               vcpu_args->pages * 
perf_test_args.guest_page_size);
                        if (r < 0)
                                exit(-r);
                }
-- 
2.30.0.478.g8a0d178c01-goog

Reply via email to