[Devel] [PATCH RH9 17/33] vzstat: Update sched lat in vzmon
Andrey Zhadchenko
andrey.zhadchenko at virtuozzo.com
Thu Sep 23 22:08:20 MSK 2021
From: Kirill Tkhai <ktkhai at virtuozzo.com>
FIXME: This patch must be rewritten via sched_entity::statistics::wait_max.
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
(cherry picked from vz8 commit 85d40e62bfe81cbaba338b7ecb5fc1377a3f2d7b)
Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
---
kernel/ve/vzstat.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/kernel/ve/vzstat.c b/kernel/ve/vzstat.c
index 9c1287a..c278b62 100644
--- a/kernel/ve/vzstat.c
+++ b/kernel/ve/vzstat.c
@@ -100,6 +100,56 @@ void kernel_text_csum_check(void)
* Latency update and show functions
* ------------------------------------------------------------------------
*/
+static inline u64 get_task_lat(struct task_struct *t, u64 now)
+{
+ u64 wstamp;
+
+ wstamp = t->se.statistics.wait_start;
+ if (wstamp && now > wstamp && now - wstamp < (1ULL << 63))
+ return now - wstamp;
+ return 0;
+}
+
+static void update_max_sched_latency_snap(void)
+{
+ struct task_struct *t, *g;
+ u64 now, max, tmp;
+ struct kstat_lat_pcpu_struct *st;
+
+ max = 0;
+ read_lock(&tasklist_lock);
+ now = ktime_to_ns(ktime_get());
+ for_each_process_thread(g, t) {
+ if (likely(t->__state != TASK_RUNNING))
+ continue;
+
+ tmp = get_task_lat(t, now);
+ if (max < tmp)
+ max = tmp;
+ st = &t->task_ve->sched_lat_ve;
+ if (st->max_snap < tmp)
+ st->max_snap = tmp;
+ }
+ read_unlock(&tasklist_lock);
+ kstat_glob.sched_lat.max_snap = max;
+}
+
+static void update_schedule_latency(void)
+{
+ /*
+ * global scheduling latency is updated in schedule() and
+ * update_max_sched_latency_snap(). The latter function guarantees
+ * that tasks which do not recieve CPU time are still accounted in
+ * scheduling latency
+ */
+ update_max_sched_latency_snap();
+
+ spin_lock_irq(&kstat_glb_lock);
+ KSTAT_LAT_PCPU_UPDATE(&kstat_glob.sched_lat);
+ spin_unlock_irq(&kstat_glb_lock);
+ /* Note: per-VE latency is updated in update_venum() */
+}
+
static void update_alloc_latency(void)
{
int i;
@@ -258,6 +308,19 @@ static void mem_avg_show(struct seq_file *m, void *v)
}
}
+static void update_venum(void)
+{
+ struct ve_struct *ve;
+
+ mutex_lock(&ve_list_lock);
+ spin_lock_irq(&kstat_glb_lock);
+ for_each_ve(ve)
+ /* max_snap is already set in update_schedule_latency */
+ KSTAT_LAT_PCPU_UPDATE(&ve->sched_lat_ve);
+ spin_unlock_irq(&kstat_glb_lock);
+ mutex_unlock(&ve_list_lock);
+}
+
static void task_counts_seq_show(struct seq_file *m, void *v)
{
unsigned long _nr_running, _nr_sleeping, _nr_unint,
@@ -519,7 +582,9 @@ static int vzstat_mon_loop(void* data)
kernel_text_csum_check();
#endif
update_alloc_latency();
+ update_schedule_latency();
update_memory();
+ update_venum();
update_mmperf();
set_current_state(TASK_INTERRUPTIBLE);
--
1.8.3.1
More information about the Devel
mailing list