And do some heavy liftig.

Signed-off-by: Ralf Ramsauer <ralf.ramsa...@oth-regensburg.de>
[RFC v2: allow access to cycle counter]
[RFC v2: get vmm deactivation under control (didn't work with -O2)]
---
 hypervisor/arch/riscv/entry.S             |  65 ++++++++++
 hypervisor/arch/riscv/include/asm/csr64.h |   1 +
 hypervisor/arch/riscv/setup.c             | 137 ++++++++++++++++++++--
 3 files changed, 195 insertions(+), 8 deletions(-)

diff --git a/hypervisor/arch/riscv/entry.S b/hypervisor/arch/riscv/entry.S
index 524270cf..a7f1ce6e 100644
--- a/hypervisor/arch/riscv/entry.S
+++ b/hypervisor/arch/riscv/entry.S
@@ -353,4 +353,69 @@ virtual_arch_entry:
 
        mv      a1, a0
        mv      a0, sp
+       li      a2, 0 /* We don't come from an ecall */
        j       riscv_deactivate_vmm
+
+
+.global __riscv_deactivate_vmm
+__riscv_deactivate_vmm:
+       /*
+        * From now on, we can safely access stack variables, but we must not
+        * use any absolute addresses. We may clobber a0-a7, use them as
+        * scratch.
+        */
+       csr_from_csr    satp, CSR_VSATP, a5;
+       sfence.vma
+       /* From here on, Linux's paging is active. */
+
+       /* Restore S-mode CSRs from VS-mode */
+       csr_from_csr    stval, CSR_VSTVAL, a5
+       csr_from_csr    scause, CSR_VSCAUSE, a5
+       csr_from_csr    sscratch, CSR_VSSCRATCH, a5
+       csr_from_csr    sie, CSR_VSIE, a5
+       csr_from_csr    stvec, CSR_VSTVEC, a5
+       csr_from_csr    sstatus, CSR_VSSTATUS, a5
+
+       /* If we didn't come here from en ecall, skip sepc setup */
+       beq a2, x0, 1f
+       /* scratch setup */
+       csrr    a3, sepc
+       addi    a3, a3, 4 /* ecall */
+
+       /* restore original sepc */
+       csrr a4, CSR_VSEPC
+       csrw sepc, a4
+
+1:
+       /* restore registers */
+       ld ra, 8(a0)
+       ld sp, 16(a0)
+       ld gp, 24(a0)
+       ld tp, 32(a0)
+       ld t0, 40(a0)
+       ld t1, 48(a0)
+       ld t2, 56(a0)
+       ld s0, 64(a0)
+       ld s1, 72(a0)
+       /* Skip clobbers a0 - a7 */
+       ld s2, 144(a0)
+       ld s3, 152(a0)
+       ld s4, 160(a0)
+       ld s5, 168(a0)
+       ld s6, 176(a0)
+       ld s7, 184(a0)
+       ld s8, 192(a0)
+       ld s9, 200(a0)
+       ld s10, 208(a0)
+       ld s11, 216(a0)
+       ld t3, 224(a0)
+       ld t4, 232(a0)
+       ld t5, 240(a0)
+       ld t6, 248(a0)
+       mv a0, a1
+
+       /* And we're done. */
+       beq a2, x0, 1f
+       jalr zero, a3, 0
+1:
+       ret
diff --git a/hypervisor/arch/riscv/include/asm/csr64.h 
b/hypervisor/arch/riscv/include/asm/csr64.h
index 3c4fa00f..d0f39e8a 100644
--- a/hypervisor/arch/riscv/include/asm/csr64.h
+++ b/hypervisor/arch/riscv/include/asm/csr64.h
@@ -136,6 +136,7 @@
 #define HGATP_VMID_SHIFT       22
 #define HGATP_VMID_WIDTH       7
 
+#define SCOUNTEREN_CY          0x00000001
 #define SCOUNTEREN_TM          0x00000002
 
 #ifndef __ASSEMBLY__
diff --git a/hypervisor/arch/riscv/setup.c b/hypervisor/arch/riscv/setup.c
index 7b4b6a9e..f3cdfc06 100644
--- a/hypervisor/arch/riscv/setup.c
+++ b/hypervisor/arch/riscv/setup.c
@@ -2,37 +2,158 @@
  * Jailhouse, a Linux-based partitioning hypervisor
  *
  * Copyright (c) Siemens AG, 2020
+ * Copyright (c) OTH Regensburg, 2022
  *
  * Authors:
  *  Konrad Schwarz <konrad.schw...@siemens.com>
+ *  Ralf Ramsauer <ralf.ramsa...@oth-regensburg.de>
  *
  * This work is licensed under the terms of the GNU GPL, version 2.  See
  * the COPYING file in the top-level directory.
  */
 
+#include <jailhouse/control.h>
 #include <jailhouse/entry.h>
-#include <asm/processor.h>
+#include <jailhouse/paging.h>
+#include <jailhouse/percpu.h>
 #include <asm/setup.h>
 
+extern unsigned long bt_tbl_l0[PAGE_SIZE / sizeof(unsigned long)];
+
+void riscv_park_loop(void);
+void __attribute((noreturn))
+__riscv_deactivate_vmm(union registers *regs, int errcode, bool from_ecall);
+
 int arch_init_early(void)
 {
-       return -ENOSYS;
+       int err;
+
+       err = riscv_paging_cell_init(&root_cell);
+       if (err)
+               return err;
+
+       parking_pt.root_paging = root_cell.arch.mm.root_paging;
+
+       err = paging_create(&parking_pt, paging_hvirt2phys(riscv_park_loop),
+                       PAGE_SIZE, 0, PAGE_DEFAULT_FLAGS | RISCV_PTE_FLAG(G) |
+                       RISCV_PTE_FLAG(U), PAGING_COHERENT | PAGING_NO_HUGE);
+
+       return 0;
 }
 
 int arch_cpu_init(struct per_cpu *cpu_data)
 {
-       return -ENOSYS;
+       struct public_per_cpu *ppc = &cpu_data->public;
+       unsigned long final_pt;
+
+       spin_init(&ppc->control_lock);
+
+       ppc->reset = false;
+       ppc->park = false;
+       ppc->wait_for_power_on = false;
+
+       ppc->phys_id =
+               jailhouse_cell_cpus(root_cell.config)[cpu_data->public.cpu_id]
+               .phys_id;
+       ppc->hsm.state = STARTED;
+
+       final_pt = paging_hvirt2phys(&ppc->root_table_page);
+       enable_mmu_satp(hv_atp_mode, final_pt);
+
+       csr_write(CSR_HSTATUS, 0);
+
+       return 0;
 }
 
-void __attribute__((noreturn))
-riscv_deactivate_vmm(union registers *regs, int errcode, bool from_ecall)
+void __attribute__ ((noreturn)) arch_cpu_activate_vmm(void)
 {
-       while (1);
+       union registers *regs;
+       unsigned long tmp;
+
+       regs = &this_cpu_data()->guest_regs;
+
+       /* VSBE = 0 -> VS-Mode mem accesses are LE */
+       csr_set(CSR_HSTATUS,
+               HSTATUS_SPV | /* Return to VS-Mode */
+               (2ULL << HSTATUS_VSXL_SHIFT)); /* xlen = 64 */
+
+       csr_write(CSR_HEDELEG,
+               (1UL << EXC_INST_MISALIGNED) |
+               (1UL << EXC_INST_ACCESS) |
+               (1UL << EXC_INST_ILLEGAL) |
+               (1UL << EXC_BREAKPOINT) |
+               (1UL << EXC_LOAD_ACCESS_MISALIGNED) |
+               (1UL << EXC_LOAD_ACCESS) |
+               (1UL << EXC_AMO_ADDRESS_MISALIGNED) |
+               (1UL << EXC_STORE_ACCESS) |
+               (1UL << EXC_SYSCALL) |
+               (1UL << EXC_INST_PAGE_FAULT) |
+               (1UL << EXC_LOAD_PAGE_FAULT) |
+               (1UL << EXC_STORE_PAGE_FAULT));
+
+       csr_write(CSR_HGEIE, 0);
+       csr_write(CSR_HCOUNTEREN, SCOUNTEREN_CY | SCOUNTEREN_TM);
+       csr_write(CSR_HTIMEDELTA, 0);
+
+       tmp = csr_read(sip);
+       csr_write(CSR_HVIP, tmp << VSIP_TO_HVIP_SHIFT); /* reinject pending */
+
+       riscv_paging_vcpu_init(&this_cell()->arch.mm);
+
+       /* Return value */
+       regs->a0 = 0;
+
+       csr_write(sepc, regs->ra); /* We will use sret, so move ra->sepc */
+
+       tmp = csr_swap(sscratch, regs->sp);
+       asm volatile("mv sp, %0\n"
+                    "j vmreturn\n" : : "r"(tmp));
+
+       __builtin_unreachable();
 }
 
-void __attribute__((noreturn)) arch_cpu_activate_vmm(void)
+static unsigned long symbol_offset(const void *addr)
 {
-       while (1);
+       return (unsigned long)addr - (unsigned long)&hypervisor_header;
+}
+
+void __attribute__((noreturn))
+riscv_deactivate_vmm(union registers *regs, int errcode, bool from_ecall)
+{
+       void __attribute__((noreturn))
+               (*deactivate_vmm)(union registers *, int, bool);
+       unsigned long linux_tables_offset, bootstrap_table_phys;
+       u8 atp_mode;
+
+
+       linux_tables_offset =
+               symbol_offset((void*)hypervisor_header.initial_load_address);
+
+       /* Do not return to VS-mode, rather return to S-Mode */
+       csr_clear(CSR_HSTATUS, HSTATUS_SPV);
+
+       /*
+        * We don't know which page table is currently active. So in any case,
+        * just jump back to the bootstrap tables, as they contain the old
+        * Linux mapping of Jailhouse
+        */
+       bootstrap_table_phys = system_config->hypervisor_memory.phys_start +
+                              symbol_offset(&bt_tbl_l0);
+       /* Take Linux's MMU mode */
+       atp_mode = csr_read(CSR_VSATP) >> ATP_MODE_SHIFT;
+       enable_mmu_satp(atp_mode, bootstrap_table_phys);
+
+       /*
+        * next access to regs will be under Linux's old page table, so amend
+        * the address
+        */
+       regs = (void*)regs + linux_tables_offset;
+       deactivate_vmm = __riscv_deactivate_vmm + linux_tables_offset;
+
+       /* Before switching back, we need to jump to original load location */
+       /* Get stack under control */
+       asm volatile("add sp, sp, %0\n" :: "r"(linux_tables_offset) :);
+       deactivate_vmm(regs, errcode, from_ecall);
 }
 
 void arch_cpu_restore(unsigned int cpu_id, int return_code)
-- 
2.40.1

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to jailhouse-dev+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/20230519204033.643200-49-ralf.ramsauer%40oth-regensburg.de.

Reply via email to