[CRIU] [PATCH] x86: atomic -- Adopt code from linux kernel

Cyrill Gorcunov gorcunov at openvz.org
Fri Feb 15 09:19:53 EST 2013


This allow us to eliminate "asm/types.h" inclusion as well, together
with simplifying the code.

The one not that simple convertable code is mutex_lock function.
Previously we did read value and increment all in locked instruction,
while new primitive set does it in two instructions -- lockless read
and locked increment. This is safe to be done since the code is designed
to spin in lockless manner.

Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
---
 arch/x86/include/asm/atomic.h | 92 +++++++++++++++++++------------------------
 include/lock.h                |  4 +-
 2 files changed, 44 insertions(+), 52 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 273d595..c309dcb 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -1,60 +1,50 @@
 #ifndef __CR_ATOMIC_H__
 #define __CR_ATOMIC_H__
 
-#include "asm/types.h"
+/*
+ * Adopted from linux kernel.
+ */
+
+#define LOCK_PREFIX "lock "
 
 typedef struct {
-	u32 counter;
+	unsigned int counter;
 } atomic_t;
 
-#define atomic_set(mem, v)					\
-	({							\
-		u32 ret__ = v;					\
-		asm volatile ("lock xchg %0, %1\n"		\
-				: "+r" (ret__), "+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-	})
-
-#define atomic_get(mem)						\
-	({							\
-		u32 ret__ = 0;					\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-#define atomic_inc(mem)						\
-	({							\
-		u32 ret__ = 1;					\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-#define atomic_dec(mem)						\
-	({							\
-		u32 ret__ = -1;					\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-/* true if the result is 0, or false for all other cases. */
-#define atomic_dec_and_test(mem)				\
-	({							\
-		unsigned char ret__;				\
-		asm volatile ("lock decl %0; sete %1\n"		\
-				: "+m" ((mem)->counter), "=qm" (ret__)	\
-				:				\
-				: "cc", "memory");		\
-		ret__ != 0;					\
-	})
+#define ATOMIC_INIT(i)	{ (i) }
+
+static inline int atomic_read(const atomic_t *v)
+{
+	return (*(volatile int *)&(v)->counter);
+}
+
+#define atomic_get(v) atomic_read(v)
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	v->counter = i;
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "incl %0"
+		     : "+m" (v->counter));
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "decl %0"
+		     : "+m" (v->counter));
+}
+
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+	unsigned char c;
+
+	asm volatile(LOCK_PREFIX "decl %0; sete %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : : "memory");
+	return c != 0;
+}
 
 #endif /* __CR_ATOMIC_H__ */
diff --git a/include/lock.h b/include/lock.h
index 515655e..9da8710 100644
--- a/include/lock.h
+++ b/include/lock.h
@@ -119,10 +119,12 @@ static void inline mutex_lock(mutex_t *m)
 	u32 c;
 	int ret;
 
-	while ((c = atomic_inc(&m->raw))) {
+	while ((c = atomic_read(&m->raw))) {
+		atomic_inc(&m->raw);
 		ret = sys_futex(&m->raw.counter, FUTEX_WAIT, c + 1, NULL, NULL, 0);
 		BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
 	}
+	atomic_inc(&m->raw);
 }
 
 static void inline mutex_unlock(mutex_t *m)
-- 
1.8.1.2



More information about the CRIU mailing list