On 8/15/24 5:45 PM, Christoph Schlameuss wrote:
Add a test case verifying basic running and interaction of ucontrol VMs.
Fill the segment and page tables for allocated memory and map memory on
first access.

* uc_map_unmap
   Store and load data to mapped and unmapped memory and use pic segment
   translation handling to map memory on access.

Signed-off-by: Christoph Schlameuss <schlame...@linux.ibm.com>
---
  .../selftests/kvm/s390x/ucontrol_test.c       | 120 +++++++++++++++++-
  1 file changed, 119 insertions(+), 1 deletion(-)


+static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) * self)
+{
+       struct kvm_run *run = self->run;
+
+       TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+       switch (run->s390_ucontrol.pgm_code) {
+       case PGM_SEGMENT_TRANSLATION:
+               pr_info("ucontrol pic segment translation 0x%llx\n",
+                       run->s390_ucontrol.trans_exc_code);
+               /* map / make additional memory available */
+               struct kvm_s390_ucas_mapping map2 = {
+                       .user_addr = (u64)gpa2hva(self, 
run->s390_ucontrol.trans_exc_code),
+                       .vcpu_addr = run->s390_ucontrol.trans_exc_code,
+                       .length = VM_MEM_EXT_SIZE,
+               };
+               pr_info("ucas map %p %p 0x%llx\n",
+                       (void *)map2.user_addr, (void *)map2.vcpu_addr, 
map2.length);
+               TEST_ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, 
&map2));
+               break;

Why is this necessary if you fix up the mapping in the test?

[...]

+TEST_F(uc_kvm, uc_map_unmap)
+{
+       struct kvm_sync_regs *sync_regs = &self->run->s.regs;
+       struct kvm_run *run = self->run;
+       int rc;
+
+       /* copy test_mem_asm to code_hva / code_gpa */
+       TH_LOG("copy code %p to vm mapped memory %p / %p",
+              &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
+       memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
+
+       /* DAT disabled + 64 bit mode */
+       run->psw_mask = 0x0000000180000000ULL;
+       run->psw_addr = self->code_gpa;
+
+       /* set register content for test_mem_asm to access not mapped memory*/
+       sync_regs->gprs[1] = 0x55;
+       sync_regs->gprs[5] = self->base_gpa;
+       sync_regs->gprs[6] = VM_MEM_SIZE;
+       run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+
+       /* run and expect to fail witch ucontrol pic segment translation */

s/witch/with/

+       ASSERT_EQ(0, uc_run_once(self));
+       ASSERT_EQ(1, sync_regs->gprs[0]);
+       ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+
+       ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
+       ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, 
run->s390_ucontrol.trans_exc_code);
+       /* map / make additional memory available */
+       struct kvm_s390_ucas_mapping map2 = {
+               .user_addr = (u64)gpa2hva(self, self->base_gpa + VM_MEM_SIZE),
+               .vcpu_addr = self->base_gpa + VM_MEM_SIZE,
+               .length = VM_MEM_EXT_SIZE,
+       };
+       TH_LOG("ucas map %p %p 0x%llx",
+              (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
+       rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map2);
+       ASSERT_EQ(0, rc)
+               TH_LOG("ucas map result %d not expected, %s", rc, 
strerror(errno));
+       ASSERT_EQ(0, uc_run_once(self));
+       ASSERT_EQ(false, uc_handle_exit(self));
+       uc_assert_diag44(self);
+
+       /* assert registers and memory are in expected state */
+       ASSERT_EQ(2, sync_regs->gprs[0]);
+       ASSERT_EQ(0x55, sync_regs->gprs[1]);
+       ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE));
+
+       /* unmap and run loop again */
+       TH_LOG("ucas unmap %p %p 0x%llx",
+              (void *)map2.user_addr, (void *)map2.vcpu_addr, map2.length);
+       rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map2);
+       ASSERT_EQ(0, rc)
+               TH_LOG("ucas map result %d not expected, %s", rc, 
strerror(errno));

s/map/unmap/


+       ASSERT_EQ(0, uc_run_once(self));
+       ASSERT_EQ(3, sync_regs->gprs[0]);
+       ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
+       ASSERT_EQ(true, uc_handle_exit(self));
+}
+
  TEST_F(uc_kvm, uc_gprs)
  {
        struct kvm_sync_regs *sync_regs = &self->run->s.regs;


Reply via email to