[Devel] [PATCH rh7 v2 1/2] sched/fair: Track cgroup depth

Kirill Tkhai ktkhai at odin.com
Wed Sep 23 09:18:49 PDT 2015


Port ms commits:

fed14d45f945042a15b09de48d7d3d58d9455fc4
eb7a59b2c888c2518ba2c9d0020343ca71aa9dee (fix for the first).

Track depth in cgroup tree, this is useful for things like
find_matching_se() where you need to get to a common parent of two
sched entities.

Keeping the depth avoids having to calculate it on the spot, which
saves a number of possible cache-misses.

Signed-off-by: Peter Zijlstra <peterz at infradead.org>
Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop
Signed-off-by: Ingo Molnar <mingo at kernel.org>
Signed-off-by: Kirill Tkhai <ktkhai at odin.com>
Reviewed-by: Vladimir Davydov <vdavydov at odin.com>
---
 include/linux/sched.h |    1 +
 kernel/sched/fair.c   |   34 ++++++++++++++++++++--------------
 2 files changed, 21 insertions(+), 14 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index eb1585a..e1bcabe 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1099,6 +1099,7 @@ struct sched_entity {
 	struct cfs_rq		*cfs_rq;
 	/* rq "owned" by this entity/group: */
 	struct cfs_rq		*my_q;
+	int			depth;
 #endif
 
 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 171601d..8bc8c0f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -335,16 +335,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
 }
 
 /* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
-{
-	int depth = 0;
-
-	for_each_sched_entity(se)
-		depth++;
-
-	return depth;
-}
-
 static void
 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 {
@@ -358,8 +348,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 	 */
 
 	/* First walk up until both entities are at same depth */
-	se_depth = depth_se(*se);
-	pse_depth = depth_se(*pse);
+	se_depth = (*se)->depth;
+	pse_depth = (*pse)->depth;
 
 	while (se_depth > pse_depth) {
 		se_depth--;
@@ -7819,6 +7809,13 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	/*
+	 * Since the real-depth could have been changed (only FAIR
+	 * class maintain depth value), reset depth properly.
+	 */
+	p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
+#endif
 	if (!p->se.on_rq)
 		return;
 
@@ -7913,6 +7910,12 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
 		cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
 #endif
 	}
+
+	/*
+	 * Since the real-depth could have been changed (only FAIR
+	 * class maintain depth value), reset depth properly.
+	 */
+        p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
 }
 
 void free_fair_sched_group(struct task_group *tg)
@@ -8006,10 +8009,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 	if (!se)
 		return;
 
-	if (!parent)
+	if (!parent) {
 		se->cfs_rq = &rq->cfs;
-	else
+		se->depth = 0;
+	} else {
 		se->cfs_rq = parent->my_q;
+		se->depth = parent->depth + 1;
+	}
 
 	se->my_q = cfs_rq;
 	update_load_set(&se->load, 0);




More information about the Devel mailing list