From: Oleksandr Andrushchenko <oleksandr_andrushche...@epam.com>

Port hypervizor related code from mini-os. Update essential
arch code to support required bit operations, memory barriers etc.

Copyright for the bits ported belong to at least the following authors,
please see related files for details:

Copyright (c) 2002-2003, K A Fraser
Copyright (c) 2005, Grzegorz Milos, gm...@cam.ac.uk,Intel Research Cambridge
Copyright (c) 2014, Karim Allah Ahmed <karim.allah.ah...@gmail.com>

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushche...@epam.com>
Signed-off-by: Anastasiia Lukianenko <anastasiia_lukiane...@epam.com>
---
 arch/arm/include/asm/xen/system.h |  96 +++++++++++
 common/board_r.c                  |  11 ++
 drivers/Makefile                  |   1 +
 drivers/xen/Makefile              |   5 +
 drivers/xen/hypervisor.c          | 277 ++++++++++++++++++++++++++++++
 include/xen.h                     |  11 ++
 include/xen/hvm.h                 |  30 ++++
 7 files changed, 431 insertions(+)
 create mode 100644 arch/arm/include/asm/xen/system.h
 create mode 100644 drivers/xen/Makefile
 create mode 100644 drivers/xen/hypervisor.c
 create mode 100644 include/xen.h
 create mode 100644 include/xen/hvm.h

diff --git a/arch/arm/include/asm/xen/system.h 
b/arch/arm/include/asm/xen/system.h
new file mode 100644
index 0000000000..81ab90160e
--- /dev/null
+++ b/arch/arm/include/asm/xen/system.h
@@ -0,0 +1,96 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * (C) 2014 Karim Allah Ahmed <karim.allah.ah...@gmail.com>
+ * (C) 2020, EPAM Systems Inc.
+ */
+#ifndef _ASM_ARM_XEN_SYSTEM_H
+#define _ASM_ARM_XEN_SYSTEM_H
+
+#include <compiler.h>
+#include <asm/bitops.h>
+
+/* If *ptr == old, then store new there (and return new).
+ * Otherwise, return the old value.
+ * Atomic.
+ */
+#define synch_cmpxchg(ptr, old, new) \
+({ __typeof__(*ptr) stored = old; \
+       __atomic_compare_exchange_n(ptr, &stored, new, 0, __ATOMIC_SEQ_CST, 
__ATOMIC_SEQ_CST) ? new : old; \
+})
+
+/* As test_and_clear_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_clear_bit(int nr, volatile void *addr)
+{
+       u8 *byte = ((u8 *)addr) + (nr >> 3);
+       u8 bit = 1 << (nr & 7);
+       u8 orig;
+
+       orig = __atomic_fetch_and(byte, ~bit, __ATOMIC_SEQ_CST);
+
+       return (orig & bit) != 0;
+}
+
+/* As test_and_set_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_set_bit(int nr, volatile void *base)
+{
+       u8 *byte = ((u8 *)base) + (nr >> 3);
+       u8 bit = 1 << (nr & 7);
+       u8 orig;
+
+       orig = __atomic_fetch_or(byte, bit, __ATOMIC_SEQ_CST);
+
+       return (orig & bit) != 0;
+}
+
+/* As set_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_set_bit(int nr, volatile void *addr)
+{
+       synch_test_and_set_bit(nr, addr);
+}
+
+/* As clear_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_clear_bit(int nr, volatile void *addr)
+{
+       synch_test_and_clear_bit(nr, addr);
+}
+
+/* As test_bit, but with a following memory barrier. */
+//static inline int synch_test_bit(int nr, volatile void *addr)
+static inline int synch_test_bit(int nr, const void *addr)
+{
+       int result;
+
+       result = test_bit(nr, addr);
+       barrier();
+       return result;
+}
+
+#define xchg(ptr, v)   __atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+#define xchg(ptr, v)   __atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+
+#define mb()           dsb()
+#define rmb()          dsb()
+#define wmb()          dsb()
+#define __iormb()      dmb()
+#define __iowmb()      dmb()
+#define xen_mb()       mb()
+#define xen_rmb()      rmb()
+#define xen_wmb()      wmb()
+
+#define smp_processor_id()     0
+
+#define to_phys(x)             ((unsigned long)(x))
+#define to_virt(x)             ((void *)(x))
+
+#define PFN_UP(x)              (unsigned long)(((x) + PAGE_SIZE - 1) >> 
PAGE_SHIFT)
+#define PFN_DOWN(x)            (unsigned long)((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x)            ((unsigned long)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x)            (unsigned long)((x) >> PAGE_SHIFT)
+
+#define virt_to_pfn(_virt)     (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt)     (PFN_DOWN(to_phys(_virt)))
+#define mfn_to_virt(_mfn)      (to_virt(PFN_PHYS(_mfn)))
+#define pfn_to_virt(_pfn)      (to_virt(PFN_PHYS(_pfn)))
+
+#endif
diff --git a/common/board_r.c b/common/board_r.c
index fa57fa9b69..fd36edb4e5 100644
--- a/common/board_r.c
+++ b/common/board_r.c
@@ -56,6 +56,7 @@
 #include <timer.h>
 #include <trace.h>
 #include <watchdog.h>
+#include <xen.h>
 #ifdef CONFIG_ADDR_MAP
 #include <asm/mmu.h>
 #endif
@@ -462,6 +463,13 @@ static int initr_mmc(void)
 }
 #endif
 
+#ifdef CONFIG_XEN
+static int initr_xen(void)
+{
+       xen_init();
+       return 0;
+}
+#endif
 /*
  * Tell if it's OK to load the environment early in boot.
  *
@@ -769,6 +777,9 @@ static init_fnc_t init_sequence_r[] = {
 #endif
 #ifdef CONFIG_MMC
        initr_mmc,
+#endif
+#ifdef CONFIG_XEN
+       initr_xen,
 #endif
        initr_env,
 #ifdef CONFIG_SYS_BOOTPARAMS_LEN
diff --git a/drivers/Makefile b/drivers/Makefile
index 94e8c5da17..0dd8891e76 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_$(SPL_)REMOTEPROC) += remoteproc/
 obj-$(CONFIG_$(SPL_TPL_)TPM) += tpm/
 obj-$(CONFIG_$(SPL_TPL_)ACPI_PMC) += power/acpi_pmc/
 obj-$(CONFIG_$(SPL_)BOARD) += board/
+obj-$(CONFIG_XEN) += xen/
 
 ifndef CONFIG_TPL_BUILD
 ifdef CONFIG_SPL_BUILD
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
new file mode 100644
index 0000000000..1211bf2386
--- /dev/null
+++ b/drivers/xen/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier:     GPL-2.0+
+#
+# (C) Copyright 2020 EPAM Systems Inc.
+
+obj-y += hypervisor.o
diff --git a/drivers/xen/hypervisor.c b/drivers/xen/hypervisor.c
new file mode 100644
index 0000000000..5883285142
--- /dev/null
+++ b/drivers/xen/hypervisor.c
@@ -0,0 +1,277 @@
+/******************************************************************************
+ * hypervisor.c
+ *
+ * Communication to/from hypervisor.
+ *
+ * Copyright (c) 2002-2003, K A Fraser
+ * Copyright (c) 2005, Grzegorz Milos, gm...@cam.ac.uk,Intel Research Cambridge
+ * Copyright (c) 2020, EPAM Systems Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <memalign.h>
+
+#include <asm/io.h>
+#include <asm/armv8/mmu.h>
+#include <asm/xen/system.h>
+
+#include <linux/bug.h>
+
+#include <xen/hvm.h>
+#include <xen/interface/memory.h>
+
+#define active_evtchns(cpu, sh, idx)   \
+       ((sh)->evtchn_pending[idx] &    \
+        ~(sh)->evtchn_mask[idx])
+
+int in_callback;
+
+/*
+ * Shared page for communicating with the hypervisor.
+ * Events flags go here, for example.
+ */
+struct shared_info *HYPERVISOR_shared_info;
+
+#ifndef CONFIG_PARAVIRT
+static const char *param_name(int op)
+{
+#define PARAM(x)[HVM_PARAM_##x] = #x
+       static const char *const names[] = {
+               PARAM(CALLBACK_IRQ),
+               PARAM(STORE_PFN),
+               PARAM(STORE_EVTCHN),
+               PARAM(PAE_ENABLED),
+               PARAM(IOREQ_PFN),
+               PARAM(TIMER_MODE),
+               PARAM(HPET_ENABLED),
+               PARAM(IDENT_PT),
+               PARAM(ACPI_S_STATE),
+               PARAM(VM86_TSS),
+               PARAM(VPT_ALIGN),
+               PARAM(CONSOLE_PFN),
+               PARAM(CONSOLE_EVTCHN),
+       };
+#undef PARAM
+
+       if (op >= ARRAY_SIZE(names))
+               return "unknown";
+
+       if (!names[op])
+               return "reserved";
+
+       return names[op];
+}
+
+int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
+{
+       struct xen_hvm_param xhv;
+       int ret;
+
+       xhv.domid = DOMID_SELF;
+       xhv.index = idx;
+       invalidate_dcache_range((unsigned long)&xhv,
+                               (unsigned long)&xhv + sizeof(xhv));
+
+       ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+       if (ret < 0) {
+               pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+                          param_name(idx), idx, ret);
+               BUG();
+       }
+       invalidate_dcache_range((unsigned long)&xhv,
+                               (unsigned long)&xhv + sizeof(xhv));
+
+       *value = xhv.value;
+       return ret;
+}
+
+int hvm_get_parameter(int idx, uint64_t *value)
+{
+       struct xen_hvm_param xhv;
+       int ret;
+
+       xhv.domid = DOMID_SELF;
+       xhv.index = idx;
+       ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+       if (ret < 0) {
+               pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+                          param_name(idx), idx, ret);
+               BUG();
+       }
+
+       *value = xhv.value;
+       return ret;
+}
+
+int hvm_set_parameter(int idx, uint64_t value)
+{
+       struct xen_hvm_param xhv;
+       int ret;
+
+       xhv.domid = DOMID_SELF;
+       xhv.index = idx;
+       xhv.value = value;
+       ret = HYPERVISOR_hvm_op(HVMOP_set_param, &xhv);
+
+       if (ret < 0) {
+               pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+                          param_name(idx), idx, ret);
+               BUG();
+       }
+
+       return ret;
+}
+
+struct shared_info *map_shared_info(void *p)
+{
+       struct xen_add_to_physmap xatp;
+
+       HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
+                                                               PAGE_SIZE);
+       if (HYPERVISOR_shared_info == NULL)
+               BUG();
+
+       xatp.domid = DOMID_SELF;
+       xatp.idx = 0;
+       xatp.space = XENMAPSPACE_shared_info;
+       xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+       if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
+               BUG();
+
+       return HYPERVISOR_shared_info;
+}
+
+void unmap_shared_info(void)
+{
+       struct xen_remove_from_physmap xrtp;
+
+       xrtp.domid = DOMID_SELF;
+       xrtp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+       if (HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrtp) != 0)
+               BUG();
+}
+#endif
+
+void do_hypervisor_callback(struct pt_regs *regs)
+{
+       unsigned long l1, l2, l1i, l2i;
+       unsigned int port;
+       int cpu = 0;
+       struct shared_info *s = HYPERVISOR_shared_info;
+       struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
+
+       in_callback = 1;
+
+       vcpu_info->evtchn_upcall_pending = 0;
+       /* NB x86. No need for a barrier here -- XCHG is a barrier on x86. */
+#if !defined(__i386__) && !defined(__x86_64__)
+       /* Clear master flag /before/ clearing selector flag. */
+       wmb();
+#endif
+       l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+       while (l1 != 0) {
+               l1i = __ffs(l1);
+               l1 &= ~(1UL << l1i);
+
+               while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+                       l2i = __ffs(l2);
+                       l2 &= ~(1UL << l2i);
+
+                       port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
+                       /* TODO: handle new event: do_event(port, regs); */
+                       /* Suppress -Wunused-but-set-variable */
+                       (void)(port);
+               }
+       }
+
+       in_callback = 0;
+}
+
+void force_evtchn_callback(void)
+{
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+       int save;
+#endif
+       struct vcpu_info *vcpu;
+
+       vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+       save = vcpu->evtchn_upcall_mask;
+#endif
+
+       while (vcpu->evtchn_upcall_pending) {
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+               vcpu->evtchn_upcall_mask = 1;
+#endif
+               barrier();
+               do_hypervisor_callback(NULL);
+               barrier();
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+               vcpu->evtchn_upcall_mask = save;
+               barrier();
+#endif
+       };
+}
+
+void mask_evtchn(uint32_t port)
+{
+       struct shared_info *s = HYPERVISOR_shared_info;
+       synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+void unmask_evtchn(uint32_t port)
+{
+       struct shared_info *s = HYPERVISOR_shared_info;
+       struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
+
+       synch_clear_bit(port, &s->evtchn_mask[0]);
+
+       /*
+        * The following is basically the equivalent of 'hw_resend_irq'. Just 
like
+        * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
+        */
+       if (synch_test_bit(port, &s->evtchn_pending[0]) &&
+           !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
+                                   &vcpu_info->evtchn_pending_sel)) {
+               vcpu_info->evtchn_upcall_pending = 1;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+               if (!vcpu_info->evtchn_upcall_mask)
+#endif
+                       force_evtchn_callback();
+       }
+}
+
+void clear_evtchn(uint32_t port)
+{
+       struct shared_info *s = HYPERVISOR_shared_info;
+
+       synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+void xen_init(void)
+{
+       debug("%s\n", __func__);
+
+       map_shared_info(NULL);
+}
+
diff --git a/include/xen.h b/include/xen.h
new file mode 100644
index 0000000000..1d6f74cc92
--- /dev/null
+++ b/include/xen.h
@@ -0,0 +1,11 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * (C) 2020, EPAM Systems Inc.
+ */
+#ifndef __XEN_H__
+#define __XEN_H__
+
+void xen_init(void);
+
+#endif /* __XEN_H__ */
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
new file mode 100644
index 0000000000..89de9625ca
--- /dev/null
+++ b/include/xen/hvm.h
@@ -0,0 +1,30 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Simple wrappers around HVM functions
+ *
+ * Copyright (c) 2002-2003, K A Fraser
+ * Copyright (c) 2005, Grzegorz Milos, gm...@cam.ac.uk,Intel Research Cambridge
+ * Copyright (c) 2020, EPAM Systems Inc.
+ */
+#ifndef XEN_HVM_H__
+#define XEN_HVM_H__
+
+#include <asm/xen/hypercall.h>
+#include <xen/interface/hvm/params.h>
+#include <xen/interface/xen.h>
+
+extern struct shared_info *HYPERVISOR_shared_info;
+
+int hvm_get_parameter(int idx, uint64_t *value);
+int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value);
+int hvm_set_parameter(int idx, uint64_t value);
+
+struct shared_info *map_shared_info(void *p);
+void unmap_shared_info(void);
+void do_hypervisor_callback(struct pt_regs *regs);
+void mask_evtchn(uint32_t port);
+void unmask_evtchn(uint32_t port);
+void clear_evtchn(uint32_t port);
+
+#endif /* XEN_HVM_H__ */
-- 
2.17.1

Reply via email to