[Devel] [PATCH RHEL8 COMMIT] sched: Account task_group::avenrun
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Oct 19 14:20:39 MSK 2020
The commit is pushed to "branch-rh8-4.18.0-193.6.3.vz8.4.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-193.6.3.vz8.4.14
------>
commit 1cf597220bf1a89fb96c8793e6e2db79f7e5bce3
Author: Konstantin Khorenko <khorenko at virtuozzo.com>
Date: Mon Dec 11 23:40:07 2017 +0300
sched: Account task_group::avenrun
This patch is a part of vz7 commit (only avenrun part)
34a1dc1e4e3d ("sched: Account task_group::cpustat,taskstats,avenrun")
Extracted from "Initial patch".
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
+++
ve/sched: Do not use kstat_glb_lock to update kstat_glob::nr_unint_avg
kstat_glob::nr_unint_avg can't be updated in parallel on two or
more cpus, so on modifications we have to protect against readers
only.
So, avoid using global kstat_glb_lock here, to minimize its
sharing with another counters it protects.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
(cherry picked from commit 715f311fdb4ab0b7922f9e53617c5821ae36bfaf)
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
include/linux/sched/loadavg.h | 2 ++
kernel/sched/loadavg.c | 38 ++++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 1 +
3 files changed, 41 insertions(+)
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 4859bea47a7b..3c0a0db53559 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -14,6 +14,8 @@
*/
extern unsigned long avenrun[]; /* Load averages */
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+extern void get_avenrun_ve(unsigned long *loads,
+ unsigned long offset, int shift);
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index de22da666ac7..dafa428dc1ba 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -76,6 +76,14 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
loads[2] = (avenrun[2] + offset) << shift;
}
+void get_avenrun_ve(unsigned long *loads, unsigned long offset, int shift)
+{
+ struct task_group *tg = task_group(current);
+ loads[0] = (tg->avenrun[0] + offset) << shift;
+ loads[1] = (tg->avenrun[1] + offset) << shift;
+ loads[2] = (tg->avenrun[2] + offset) << shift;
+}
+
long calc_load_fold_active(struct rq *this_rq, long adjust)
{
long nr_active, delta = 0;
@@ -91,6 +99,34 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
return delta;
}
+#ifdef CONFIG_VE
+static void calc_load_ve(void)
+{
+ unsigned long nr_active;
+ struct task_group *tg;
+ int i;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tg, &task_groups, list) {
+ nr_active = 0;
+ for_each_possible_cpu(i) {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ nr_active += tg->cfs_rq[i]->nr_running;
+ nr_active += tg->cfs_rq[i]->nr_unint;
+#endif
+ }
+ nr_active *= FIXED_1;
+
+ tg->avenrun[0] = calc_load(tg->avenrun[0], EXP_1, nr_active);
+ tg->avenrun[1] = calc_load(tg->avenrun[1], EXP_5, nr_active);
+ tg->avenrun[2] = calc_load(tg->avenrun[2], EXP_15, nr_active);
+ }
+ rcu_read_unlock();
+}
+#else
+#define calc_load_ve() do { } while (0)
+#endif
+
/**
* fixed_power_int - compute: x^n, in O(log n) time
*
@@ -372,6 +408,8 @@ void calc_global_load(unsigned long ticks)
WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
+ calc_load_ve();
+
/*
* In case we went to NO_HZ for multiple LOAD_FREQ intervals
* catch up in bulk.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b2f0c26b2c50..485eee54f378 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,6 +404,7 @@ struct task_group {
struct autogroup *autogroup;
#endif
+ unsigned long avenrun[3]; /* loadavg data */
/* Monotonic time in nsecs: */
u64 start_time;
More information about the Devel
mailing list