[Devel] [PATCH 2/5] sched/ve: Link VE root cpu cgroups in separate list
Kirill Tkhai
ktkhai at virtuozzo.com
Tue Feb 20 18:09:43 MSK 2018
The idea is to link small number of VE root cpu cgroups
to a separate list. This allows to avoid unnecessary
calculations of loadavg for VE children cpu cgroups
in next patches, and it should positively improve
the performance of calc_load_ve().
https://jira.sw.ru/browse/PSBM-81572
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
include/linux/sched.h | 5 +++++
kernel/cgroup.c | 3 +++
kernel/sched/core.c | 24 ++++++++++++++++++++++++
kernel/sched/sched.h | 4 ++++
4 files changed, 36 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index cc52094b4e97..b8611ca23851 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3294,4 +3294,9 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#ifdef CONFIG_VE
+struct cgroup;
+extern void link_ve_root_cpu_cgroup(struct cgroup *);
+#endif
+
#endif
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 11c547fed99f..dd808689aee6 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4275,6 +4275,9 @@ void cgroup_mark_ve_root(struct ve_struct *ve)
for_each_active_root(root) {
cgrp = task_cgroup_from_root(ve->init_task, root);
set_bit(CGRP_VE_ROOT, &cgrp->flags);
+
+ if (test_bit(cpu_cgroup_subsys_id, &root->subsys_mask))
+ link_ve_root_cpu_cgroup(cgrp);
}
mutex_unlock(&cgroup_mutex);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e3800a24db69..9bfae156e6d5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2881,6 +2881,8 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
}
#ifdef CONFIG_VE
+static LIST_HEAD(ve_root_list);
+
static void calc_load_ve(void)
{
unsigned long nr_unint, nr_active;
@@ -8754,6 +8756,19 @@ void set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
+#ifdef CONFIG_VE
+void link_ve_root_cpu_cgroup(struct cgroup *cgrp)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&task_group_lock, flags);
+ BUG_ON(!(cgrp->subsys[cpu_cgroup_subsys_id]->flags & CSS_ONLINE));
+ list_add_rcu(&tg->ve_root_list, &ve_root_list);
+ spin_unlock_irqrestore(&task_group_lock, flags);
+}
+#endif
+
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
@@ -9318,6 +9333,9 @@ static int cpu_cgroup_css_online(struct cgroup *cgrp)
struct task_group *tg = cgroup_tg(cgrp);
struct task_group *parent;
+#ifdef CONFIG_VE
+ INIT_LIST_HEAD(&tg->ve_root_list);
+#endif
if (!cgrp->parent)
return 0;
@@ -9336,7 +9354,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
static void cpu_cgroup_css_offline(struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
+#ifdef CONFIG_VE
+ unsigned long flags;
+ spin_lock_irqsave(&task_group_lock, flags);
+ list_del_rcu(&tg->ve_root_list);
+ spin_unlock_irqrestore(&task_group_lock, flags);
+#endif
sched_offline_group(tg);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f9152826e4f5..0a8a2b07436a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -260,6 +260,10 @@ struct task_group {
struct autogroup *autogroup;
#endif
+#ifdef CONFIG_VE
+ struct list_head ve_root_list;
+#endif
+
struct taskstats __percpu *taskstats;
unsigned long avenrun[3]; /* loadavg data */
struct timespec start_time;
More information about the Devel
mailing list