If out of global lock, we will be challenged by SMP in low level,
so need atomic ops.

This file is a wrapper of GCC atomic builtin.

Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com>
---
 include/qemu/atomic.h |   63 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 63 insertions(+), 0 deletions(-)
 create mode 100644 include/qemu/atomic.h

diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
new file mode 100644
index 0000000..a9e6d35
--- /dev/null
+++ b/include/qemu/atomic.h
@@ -0,0 +1,63 @@
+/*
+ * Simple wrapper of gcc Atomic-Builtins
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef __QEMU_ATOMIC_H
+#define __QEMU_ATOMIC_H
+
+typedef struct Atomic {
+    volatile int counter;
+} Atomic;
+
+static inline void atomic_set(Atomic *v, int i)
+{
+    v->counter = i;
+}
+
+static inline int atomic_read(Atomic *v)
+{
+    return v->counter;
+}
+
+static inline int atomic_return_and_add(int i, Atomic *v)
+{
+    int ret;
+
+    ret = __sync_fetch_and_add(&v->counter, i);
+    return ret;
+}
+
+static inline int atomic_return_and_sub(int i, Atomic *v)
+{
+    int ret;
+
+    ret = __sync_fetch_and_sub(&v->counter, i);
+    return ret;
+}
+
+/**
+ *  * atomic_inc - increment atomic variable
+ *   * @v: pointer of type Atomic
+ *    *
+ *     * Atomically increments @v by 1.
+ *      */
+static inline void atomic_inc(Atomic *v)
+{
+    __sync_fetch_and_add(&v->counter, 1);
+}
+
+/**
+ *  * atomic_dec - decrement atomic variable
+ *   * @v: pointer of type Atomic
+ *    *
+ *     * Atomically decrements @v by 1.
+ *      */
+static inline void atomic_dec(Atomic *v)
+{
+    __sync_fetch_and_sub(&v->counter, 1);
+}
+
+#endif
-- 
1.7.4.4


Reply via email to