[PATCH 2/4] x86: atomic -- Switch to linux kernel templates

Cyrill Gorcunov gorcunov at openvz.org
Tue Aug 13 10:28:02 EDT 2013


Use same code as provided in kernel. In first place
we used own prototypes in case of simplicity (they
all were based on "lock xadd" instruction. There is
no more need for that and we can switch to well known
kernel's api.

Because kernel uses plain int type to carry atomic
counters I had to add explicit u32 type for futexes,
as well as a couple of fixes for new api usage.

Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
---
 arch/arm/include/asm/atomic.h |   4 +-
 arch/x86/include/asm/atomic.h | 122 ++++++++++++++++++++++++------------------
 include/lock.h                |  28 +++++-----
 stats.c                       |   2 +-
 4 files changed, 86 insertions(+), 70 deletions(-)

diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 7468790..d1a4634 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -2,7 +2,7 @@
 #define __CR_ATOMIC_H__
 
 typedef struct {
-	u32 counter;
+	int counter;
 } atomic_t;
 
 
@@ -20,7 +20,7 @@ typedef struct {
 
 
 #define atomic_set(mem,v) ((mem)->counter = (v))
-#define atomic_get(v)	(*(volatile u32 *)&(v)->counter)
+#define atomic_get(v)	(*(volatile int *)&(v)->counter)
 
 static inline unsigned int atomic_add_return(int i, atomic_t *v)
 {
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 6850a48..13f9850 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -1,62 +1,78 @@
 #ifndef __CR_ATOMIC_H__
 #define __CR_ATOMIC_H__
 
-#include "asm/types.h"
+#include "asm/cmpxchg.h"
+
+#define LOCK_PREFIX "\n\tlock; "
 
 typedef struct {
-	u32 counter;
+	int counter;
 } atomic_t;
 
-#define atomic_set(mem, v)					\
-	({							\
-		u32 ret__ = v;					\
-		asm volatile ("lock xchg %0, %1\n"		\
-				: "+r" (ret__), "+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-	})
-
-#define atomic_get(mem)						\
-	({							\
-		u32 ret__ = 0;					\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-#define atomic_add(mem, val)					\
-	({							\
-		u32 ret__ = (val);				\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-#define atomic_inc(mem)	atomic_add(mem, 1)
-
-#define atomic_dec(mem)						\
-	({							\
-		u32 ret__ = -1;					\
-		asm volatile ("lock xadd %0, %1\n"		\
-				: "+r" (ret__),	"+m" ((mem)->counter)	\
-				:				\
-				: "cc", "memory");		\
-		ret__;						\
-	})
-
-/* true if the result is 0, or false for all other cases. */
-#define atomic_dec_and_test(mem)				\
-	({							\
-		unsigned char ret__;				\
-		asm volatile ("lock decl %0; sete %1\n"		\
-				: "+m" ((mem)->counter), "=qm" (ret__)	\
-				:				\
-				: "cc", "memory");		\
-		ret__ != 0;					\
-	})
+#define ATOMIC_INIT(i)	{ (i) }
+
+static inline int atomic_read(const atomic_t *v)
+{
+	return (*(volatile int *)&(v)->counter);
+}
+
+/*
+ * FIXME Use atomic_read instead of atomic_get all over the code
+ */
+#define atomic_get atomic_read
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	v->counter = i;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "addl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "subl %1,%0"
+		     : "+m" (v->counter)
+		     : "ir" (i));
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "incl %0"
+		     : "+m" (v->counter));
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+	asm volatile(LOCK_PREFIX "decl %0"
+		     : "+m" (v->counter));
+}
+
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+	unsigned char c;
+
+	asm volatile(LOCK_PREFIX "decl %0; sete %1"
+		     : "+m" (v->counter), "=qm" (c)
+		     : : "memory");
+	return c != 0;
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	return i + xadd(&v->counter, i);
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	return atomic_add_return(-i, v);
+}
+
+#define atomic_inc_return(v)  (atomic_add_return(1, v))
+#define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
 #endif /* __CR_ATOMIC_H__ */
diff --git a/include/lock.h b/include/lock.h
index d736e90..fbf7d2b 100644
--- a/include/lock.h
+++ b/include/lock.h
@@ -27,7 +27,7 @@ static inline u32 futex_get(futex_t *f)
 /* Set futex @f value to @v */
 static inline void futex_set(futex_t *f, u32 v)
 {
-	atomic_set(&f->raw, v);
+	atomic_set(&f->raw, (int)v);
 }
 
 #define futex_init(f)	futex_set(f, 0)
@@ -39,11 +39,11 @@ static inline void futex_set(futex_t *f, u32 v)
 		u32 tmp;					\
 								\
 		while (1) {					\
-			tmp = atomic_get(&(__f)->raw);			\
+			tmp = (u32)atomic_get(&(__f)->raw);	\
 			if ((tmp & FUTEX_ABORT_FLAG) ||		\
 			    (tmp __cond (__v)))			\
 				break;				\
-			ret = sys_futex(&(__f)->raw.counter, FUTEX_WAIT,\
+			ret = sys_futex((u32 *)&(__f)->raw.counter, FUTEX_WAIT,\
 					tmp, NULL, NULL, 0);	\
 			BUG_ON(ret < 0 && ret != -EWOULDBLOCK);	\
 		}						\
@@ -52,8 +52,8 @@ static inline void futex_set(futex_t *f, u32 v)
 /* Set futex @f to @v and wake up all waiters */
 static inline void futex_set_and_wake(futex_t *f, u32 v)
 {
-	atomic_set(&f->raw, v);
-	BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
+	atomic_set(&f->raw, (int)v);
+	BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
 }
 
 /* Mark futex @f as wait abort needed and wake up all waiters */
@@ -67,14 +67,14 @@ static inline void futex_abort_and_wake(futex_t *f)
 static inline void futex_dec_and_wake(futex_t *f)
 {
 	atomic_dec(&f->raw);
-	BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
+	BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
 }
 
 /* Increment futex @f value and wake up all waiters */
 static inline void futex_inc_and_wake(futex_t *f)
 {
 	atomic_inc(&f->raw);
-	BUG_ON(sys_futex(&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
+	BUG_ON(sys_futex((u32 *)&f->raw.counter, FUTEX_WAKE, INT_MAX, NULL, NULL, 0) < 0);
 }
 
 /* Plain increment futex @f value */
@@ -102,8 +102,8 @@ static inline void futex_wait_while_eq(futex_t *f, u32 v)
 /* Wait while futex @f value is @v */
 static inline void futex_wait_while(futex_t *f, u32 v)
 {
-	while (atomic_get(&f->raw) == v) {
-		int ret = sys_futex(&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0);
+	while ((u32)atomic_get(&f->raw) == v) {
+		int ret = sys_futex((u32 *)&f->raw.counter, FUTEX_WAIT, v, NULL, NULL, 0);
 		BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
 	}
 }
@@ -115,7 +115,7 @@ typedef struct {
 static inline void mutex_init(mutex_t *m)
 {
 	u32 c = 0;
-	atomic_set(&m->raw, c);
+	atomic_set(&m->raw, (int)c);
 }
 
 static inline void mutex_lock(mutex_t *m)
@@ -123,8 +123,8 @@ static inline void mutex_lock(mutex_t *m)
 	u32 c;
 	int ret;
 
-	while ((c = atomic_inc(&m->raw))) {
-		ret = sys_futex(&m->raw.counter, FUTEX_WAIT, c + 1, NULL, NULL, 0);
+	while ((c = (u32)atomic_inc_return(&m->raw)) != 1) {
+		ret = sys_futex((u32 *)&m->raw.counter, FUTEX_WAIT, c, NULL, NULL, 0);
 		BUG_ON(ret < 0 && ret != -EWOULDBLOCK);
 	}
 }
@@ -132,8 +132,8 @@ static inline void mutex_lock(mutex_t *m)
 static inline void mutex_unlock(mutex_t *m)
 {
 	u32 c = 0;
-	atomic_set(&m->raw, c);
-	BUG_ON(sys_futex(&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0);
+	atomic_set(&m->raw, (int)c);
+	BUG_ON(sys_futex((u32 *)&m->raw.counter, FUTEX_WAKE, 1, NULL, NULL, 0) < 0);
 }
 
 #endif /* __CR_LOCK_H__ */
diff --git a/stats.c b/stats.c
index fb67ab5..68e85d5 100644
--- a/stats.c
+++ b/stats.c
@@ -29,7 +29,7 @@ void cnt_add(int c, unsigned long val)
 		dstats->counts[c] += val;
 	} else if (rstats != NULL) {
 		BUG_ON(c >= RESTORE_CNT_NR_STATS);
-		atomic_add(&rstats->counts[c], val);
+		atomic_add(val, &rstats->counts[c]);
 	} else
 		BUG();
 }
-- 
1.8.1.4


--Nq2Wo0NMKNjxTN9z--


More information about the CRIU mailing list