[Devel] [PATCH rh7 3/3] ve: Use wait_on_bit() to wait attaching is completed

Kirill Tkhai ktkhai at odin.com
Fri Oct 16 09:25:15 PDT 2015


wait_on_bit() prevents excess schedule_timeout() looping
(i.e. cpu time waisting and possible livelocks)

Suggested-by: Vladimir Davydov <vdavydov at odin.com>
Suggested-by: Pavel Emelyanov <xemul at odin.com>
Signed-off-by: Kirill Tkhai <ktkhai at odin.com>
---
 kernel/ve/ve.c |   30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 8ead598..a8c6a5f 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -701,6 +701,9 @@ static void ve_destroy(struct cgroup *cg)
 	kmem_cache_free(ve_cachep, ve);
 }
 
+#define VE_WAIT_BIT	31
+#define VE_WAIT_BIAS	(1 << VE_WAIT_BIT)
+
 static void ve_attach_work(struct callback_head *head)
 {
 	atomic_t *nr = &current->task_ve_mover->task_ve_nr_works;
@@ -711,15 +714,30 @@ static void ve_attach_work(struct callback_head *head)
 	smp_mb(); /* Pairs with smp_mb() in ve_can_attach() */
 	init_task_work(&current->task_ve_work, NULL);
 
-	atomic_dec(nr);
+	if (!(atomic_dec_return(nr) & ~VE_WAIT_BIAS)) {
+		atomic_clear_mask(VE_WAIT_BIAS, nr);
+		smp_mb__after_clear_bit();
+		wake_up_bit(nr, VE_WAIT_BIT);
+	}
+}
+
+static int ve_wait_action(void *unused)
+{
+	schedule();
+	return 0;
 }
 
 static void ve_wait_work(struct callback_head *head)
 {
 	atomic_t *nr = &current->task_ve_nr_works;
 
-	while (atomic_read(nr))
-		schedule_timeout(1);
+	atomic_set_mask(VE_WAIT_BIAS, nr);
+	smp_mb__after_atomic();
+
+	if (atomic_read(nr) & ~VE_WAIT_BIAS)
+		wait_on_bit(nr, VE_WAIT_BIT, ve_wait_action,TASK_UNINTERRUPTIBLE);
+	else
+		atomic_clear_mask(VE_WAIT_BIAS, nr);
 
 	init_task_work(&current->task_ve_work, NULL);
 }
@@ -791,8 +809,10 @@ static void ve_attach(struct cgroup *cg, struct cgroup_taskset *tset)
 		if (task != current) {
 			task->task_ve_mover = current;
 			init_task_work(&task->task_ve_work, ve_attach_work);
-			if (task_work_add(task, &task->task_ve_work, true) == 0)
-				atomic_inc(nr);
+			atomic_inc(nr);
+			smp_mb__after_atomic_inc();
+			if (task_work_add(task, &task->task_ve_work, true) < 0)
+				atomic_dec(nr);
 		} else {
 			/* cgroup attach code is OK with changing task_ve */
 			task->task_ve = ve;




More information about the Devel mailing list