[Devel] [PATCH RHEL COMMIT] vzstat: Add base kstat structures and variables
Konstantin Khorenko
khorenko at virtuozzo.com
Fri Sep 24 15:15:53 MSK 2021
The commit is pushed to "branch-rh9-5.14.vz9.1.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after ark-5.14
------>
commit 70d21e9a67cff81fa0888ef63751b79c2e769d92
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date: Fri Sep 24 15:15:53 2021 +0300
vzstat: Add base kstat structures and variables
Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
+++
ve/vzstat.h: move some kstat definitions into new header
Move some definitions into kstat.h, so we could use later
in other headers (sched.h)
https://jira.sw.ru/browse/PSBM-81395
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
(cherry-picked from vz7 commit c98a0fae0c9a ("ve/vzstat.h: move some kstat
definitions into new header"))
Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
+++
kstat: fix percpu early linking
* init kstat early as possible, before page_alloc_init() and kmem_cache_init()
https://jira.sw.ru/browse/PSBM-36448
Signed-off-by: Konstantin Khlebnikov <khlebnikov at openvz.org>
Without this patch kstat_init() is useless and never called.
https://jira.sw.ru/browse/PSBM-127780
Splited-from: 02bc7ba77b34b ("core: Add glob_kstat, percpu kstat and
account mm stat")
mFixes: 32a31d2ad5e7f ("vzstat: Add base kstat structures and variables")
Signed-off-by: Pavel Tikhomirov <ptikhomirov at virtuozzo.com>
(cherry picked from vz8 commit 48043eee32dddb7e9d07f39c145d868d0d70a5ae)
Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
---
include/linux/kstat.h | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/vzstat.h | 51 ++++++++++++++++++++++++++++++++++++++++++++
init/main.c | 2 ++
kernel/sched/core.c | 40 +++++++++++++++++++++++++++++++++++
4 files changed, 150 insertions(+)
diff --git a/include/linux/kstat.h b/include/linux/kstat.h
new file mode 100644
index 000000000000..fcf6f0fc4b6f
--- /dev/null
+++ b/include/linux/kstat.h
@@ -0,0 +1,57 @@
+/*
+ * include/linux/kstat.h
+ *
+ * Copyright (c) 2000-2008 SWsoft
+ * Copyright (c) 2009-2015 Parallels IP Holdings GmbH
+ * Copyright (c) 2017-2021 Virtuozzo International GmbH. All rights reserved.
+ *
+ */
+
+#ifndef __LINUX_KSTAT_H
+#define __LINUX_KSTAT_H
+
+enum {
+ KSTAT_ALLOCSTAT_ATOMIC,
+ KSTAT_ALLOCSTAT_LOW,
+ KSTAT_ALLOCSTAT_HIGH,
+ KSTAT_ALLOCSTAT_LOW_MP,
+ KSTAT_ALLOCSTAT_HIGH_MP,
+ KSTAT_ALLOCSTAT_NR,
+};
+
+struct kstat_perf_snap_struct {
+ u64 wall_tottime, cpu_tottime;
+ u64 wall_maxdur, cpu_maxdur;
+ unsigned long count;
+};
+
+struct kstat_perf_pcpu_snap_struct {
+ u64 wall_tottime, cpu_tottime;
+ u64 wall_maxdur, cpu_maxdur;
+ unsigned long count;
+ seqcount_t lock;
+};
+
+struct kstat_perf_pcpu_struct {
+ struct kstat_perf_pcpu_snap_struct *cur;
+ struct kstat_perf_snap_struct last;
+};
+
+struct kstat_lat_snap_struct {
+ u64 maxlat, totlat;
+ unsigned long count;
+};
+
+struct kstat_lat_pcpu_snap_struct {
+ u64 maxlat, totlat;
+ unsigned long count;
+} ____cacheline_aligned_in_smp;
+
+struct kstat_lat_pcpu_struct {
+ struct kstat_lat_pcpu_snap_struct *cur;
+ u64 max_snap;
+ struct kstat_lat_snap_struct last;
+ u64 avg[3];
+};
+
+#endif
diff --git a/include/linux/vzstat.h b/include/linux/vzstat.h
new file mode 100644
index 000000000000..32f1132404c1
--- /dev/null
+++ b/include/linux/vzstat.h
@@ -0,0 +1,51 @@
+/*
+ * include/linux/vzstat.h
+ *
+ * Copyright (c) 2005-2008 SWsoft
+ * Copyright (c) 2009-2015 Parallels IP Holdings GmbH
+ * Copyright (c) 2017-2021 Virtuozzo International GmbH. All rights reserved.
+ *
+ */
+
+#ifndef __VZSTAT_H__
+#define __VZSTAT_H__
+
+#include <linux/mmzone.h>
+#include <linux/kstat.h>
+
+struct swap_cache_info_struct {
+ unsigned long add_total;
+ unsigned long del_total;
+ unsigned long find_success;
+ unsigned long find_total;
+};
+
+struct kstat_zone_avg {
+ unsigned long free_pages_avg[3],
+ nr_active_avg[3],
+ nr_inactive_avg[3];
+};
+
+struct kernel_stat_glob {
+ unsigned long nr_unint_avg[3];
+ seqcount_t nr_unint_avg_seq;
+
+ unsigned long alloc_fails[NR_CPUS][KSTAT_ALLOCSTAT_NR];
+ struct kstat_lat_pcpu_struct alloc_lat[KSTAT_ALLOCSTAT_NR];
+ struct kstat_lat_pcpu_struct sched_lat;
+ struct kstat_lat_pcpu_struct page_in;
+ struct kstat_lat_pcpu_struct swap_in;
+
+ struct kstat_perf_pcpu_struct ttfp, cache_reap,
+ refill_inact, shrink_icache, shrink_dcache;
+
+ struct kstat_zone_avg zone_avg[MAX_NR_ZONES];
+} ____cacheline_aligned;
+
+DECLARE_PER_CPU(seqcount_t, kstat_pcpu_seq);
+
+extern struct kernel_stat_glob kstat_glob ____cacheline_aligned;
+extern spinlock_t kstat_glb_lock;
+
+extern void kstat_init(void);
+#endif /* __VZSTAT_H__ */
diff --git a/init/main.c b/init/main.c
index 6a32ca904e42..58053a9295b8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -101,6 +101,7 @@
#include <linux/init_syscalls.h>
#include <linux/stackdepot.h>
#include <linux/veowner.h>
+#include <linux/vzstat.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -940,6 +941,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
+ kstat_init();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
boot_cpu_hotplug_init();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 291f293fb00a..a530d3cba26d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -17,6 +17,7 @@
#include <linux/kcov.h>
#include <linux/scs.h>
#include <linux/ve.h>
+#include <linux/vzstat.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -371,6 +372,45 @@ static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { }
*/
int sysctl_sched_rt_runtime = 950000;
+struct kernel_stat_glob kstat_glob;
+DEFINE_SPINLOCK(kstat_glb_lock);
+EXPORT_SYMBOL(kstat_glob);
+EXPORT_SYMBOL(kstat_glb_lock);
+
+DEFINE_PER_CPU(seqcount_t, kstat_pcpu_seq);
+EXPORT_SYMBOL(kstat_pcpu_seq);
+
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_lat);
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_page_in);
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, glob_kstat_swap_in);
+static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, alloc_kstat_lat[KSTAT_ALLOCSTAT_NR]);
+
+static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_ttfp);
+static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_cache_reap);
+static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_shrink_icache);
+static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_shrink_dcache);
+static DEFINE_PER_CPU(struct kstat_perf_pcpu_snap_struct, kstat_pcpu_refill_inact);
+
+void __init kstat_init(void)
+{
+ int i;
+
+ seqcount_init(&kstat_glob.nr_unint_avg_seq);
+ for_each_possible_cpu(i)
+ seqcount_init(per_cpu_ptr(&kstat_pcpu_seq, i));
+ kstat_glob.sched_lat.cur = &glob_kstat_lat;
+ kstat_glob.page_in.cur = &glob_kstat_page_in;
+ kstat_glob.swap_in.cur = &glob_kstat_swap_in;
+ for ( i = 0 ; i < KSTAT_ALLOCSTAT_NR ; i++)
+ kstat_glob.alloc_lat[i].cur = &alloc_kstat_lat[i];
+
+ kstat_glob.ttfp.cur = &kstat_pcpu_ttfp;
+ kstat_glob.cache_reap.cur = &kstat_pcpu_cache_reap;
+ kstat_glob.shrink_icache.cur = &kstat_pcpu_shrink_icache;
+ kstat_glob.shrink_dcache.cur = &kstat_pcpu_shrink_dcache;
+ kstat_glob.refill_inact.cur = &kstat_pcpu_refill_inact;
+}
+
#ifdef CONFIG_CFS_CPULIMIT
unsigned int task_nr_cpus(struct task_struct *p)
{
More information about the Devel
mailing list