From: Sergiy Kibrik <sergiy.kib...@globallogic.com>
Committer: Nadav Har'El <n...@scylladb.com>
Branch: master
bsd: xen: add synch_bitops.h and xen-os.h for AARCH64
evtchn and gnttab drivers use these operations extensively.
Signed-off-by: Sergiy Kibrik <sergiy.kib...@globallogic.com>
Message-Id: <1487949914-3523-1-git-send-email-sergiy.kib...@globallogic.com>
---
diff --git a/bsd/aarch64/machine/xen/synch_bitops.h
b/bsd/aarch64/machine/xen/synch_bitops.h
--- a/bsd/aarch64/machine/xen/synch_bitops.h
+++ b/bsd/aarch64/machine/xen/synch_bitops.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2009 Citrix Systems, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Based on include/arm/os.h from Xen Mini-OS project
+ */
+
+#ifndef __XEN_SYNCH_BITOPS_H__
+#define __XEN_SYNCH_BITOPS_H__
+
+/* If *ptr == old, then store new there (and return new).
+ * Otherwise, return the old value.
+ * Atomic. */
+#define synch_cmpxchg(ptr, old, new) \
+({ __typeof__(*ptr) stored = old; \
+ __atomic_compare_exchange_n(ptr, &stored, new, 0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST) ? new : old; \
+})
+
+/* As test_and_clear_bit, but using __ATOMIC_SEQ_CST */
+static __inline__ int synch_test_and_clear_bit(int nr, volatile void *addr)
+{
+ uint8_t *byte = ((uint8_t *)addr) + (nr >> 3);
+ uint8_t bit = 1 << (nr & 7);
+ uint8_t orig;
+
+ orig = __atomic_fetch_and(byte, ~bit, __ATOMIC_SEQ_CST);
+
+ return (orig & bit) != 0;
+}
+
+/* As test_and_set_bit, but using __ATOMIC_SEQ_CST */
+static __inline__ int synch_test_and_set_bit(int nr, volatile void *base)
+{
+ uint8_t *byte = ((uint8_t *)base) + (nr >> 3);
+ uint8_t bit = 1 << (nr & 7);
+ uint8_t orig;
+
+ orig = __atomic_fetch_or(byte, bit, __ATOMIC_SEQ_CST);
+
+ return (orig & bit) != 0;
+}
+
+static __inline__ void synch_set_bit(int nr, volatile void * addr)
+{
+ synch_test_and_set_bit(nr, addr);
+}
+
+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
+{
+ synch_test_and_clear_bit(nr, addr);
+}
+
+/* As test_bit, but with a following memory barrier. */
+static __inline__ int synch_test_bit(int nr, volatile void *addr)
+{
+ int result;
+ result = test_bit(nr, addr);
+ barrier();
+ return result;
+}
+
+#endif /* __XEN_SYNCH_BITOPS_H__ */
diff --git a/bsd/aarch64/machine/xen/xen-os.h
b/bsd/aarch64/machine/xen/xen-os.h
--- a/bsd/aarch64/machine/xen/xen-os.h
+++ b/bsd/aarch64/machine/xen/xen-os.h
@@ -0,0 +1,132 @@
+/*
+ * Based on x64/machine/xen/xen-os.h and arm/os.h from Xen Mini OS
+ *
+ * Copyright (C) 2017 Sergiy Kibrik <sergiy.kib...@globallogic.com>
+ *
+ * This work is open source software, licensed under the terms of the
+ * BSD license as described in the LICENSE file in the top-level directory.
+ */
+
+#ifndef _XEN_OS_H_
+#define _XEN_OS_H_
+__BEGIN_DECLS
+
+#if !defined(__XEN_INTERFACE_VERSION__)
+/*
+ * Can update to a more recent version when we implement
+ * the hypercall page
+ */
+#define __XEN_INTERFACE_VERSION__ 0x00030204
+#endif
+
+#include <xen/interface/xen.h>
+
+extern shared_info_t *HYPERVISOR_shared_info;
+
+static inline void rep_nop(void)
+{
+ __asm__ __volatile__ ( "yield" : : : "memory" );
+}
+#define cpu_relax() rep_nop()
+
+#ifndef __ASSEMBLY__
+#define likely(x) __builtin_expect((x),1)
+#define unlikely(x) __builtin_expect((x),0)
+
+#ifndef mb
+#define mb() __asm__ __volatile__("dsb sy":::"memory")
+#endif
+#ifndef rmb
+#define rmb() __asm__ __volatile__("dsb ld":::"memory");
+#endif
+#ifndef wmb
+#define wmb() __asm__ __volatile__("dsb st":::"memory");
+#endif
+#ifdef SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+/* This is a barrier for the compiler only, NOT the processor! */
+#define barrier() __asm__ __volatile__("": : :"memory")
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic only.
+ * use synch_test_and_clear_bit() if you need barrier.
+ */
+static __inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+ uint8_t *byte = ((uint8_t *)addr) + (nr >> 3);
+ uint8_t bit = 1 << (nr & 7);
+ uint8_t orig;
+
+ orig = __atomic_fetch_and(byte, ~bit, __ATOMIC_RELAXED);
+
+ return (orig & bit) != 0;
+}
+
+/**
+ * Atomically set a bit and return the old value.
+ * Similar to test_and_clear_bit.
+ */
+static __inline__ int test_and_set_bit(int nr, volatile void *base)
+{
+ uint8_t *byte = ((uint8_t *)base) + (nr >> 3);
+ uint8_t bit = 1 << (nr & 7);
+ uint8_t orig;
+
+ orig = __atomic_fetch_or(byte, bit, __ATOMIC_RELAXED);
+
+ return (orig & bit) != 0;
+}
+
+
+/**
+ * Test whether a bit is set. */
+static __inline__ int test_bit(int nr, const volatile void *addr)
+{
+ const uint8_t *ptr = (const uint8_t *) addr;
+ return ((1 << (nr & 7)) & (ptr[nr >> 3])) != 0;
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic only.
+ * Use synch_set_bit() if you need a barrier.
+ */
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ test_and_set_bit(nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic only.
+ * Use synch_clear_bit() if you need a barrier.
+ */
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ test_and_clear_bit(nr, addr);
+}
+
+#endif /* !__ASSEMBLY__ */
+__END_DECLS
+#endif /* _XEN_OS_H_ */
--
You received this message because you are subscribed to the Google Groups "OSv
Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to osv-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.