[Devel] [PATCH rh8 3/3] vecalls: Introduce VZCTL_GET_CPU_STAT ioctl
Konstantin Khorenko
khorenko at virtuozzo.com
Tue Nov 10 12:44:55 MSK 2020
This vzctl ioctl still used by vzstat utility and dispatcher/libvirt
statistics reporting.
>From one point of view almost all data can be get from cpu cgroup of a
Container (missing data can be exported additionally),
but statistics is gathered often and ioctl is faster and requires less
cpu power => let it be for now.
The current patch is based on following vz7 commits:
ecdce58b214c ("sched: Export per task_group statistics_work")
a58fb58bff1c ("Use ve init task's css instead of opening cgroup via vfs")
75fc174adc36 ("sched: Port cpustat related patches")
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
include/linux/ve.h | 2 ++
kernel/time/time.c | 1 +
kernel/ve/ve.c | 18 +++++++++++++
kernel/ve/vecalls.c | 66 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 87 insertions(+)
diff --git a/include/linux/ve.h b/include/linux/ve.h
index 656ee43e383e..7cb416f342e7 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -201,10 +201,12 @@ struct seq_file;
#if defined(CONFIG_VE) && defined(CONFIG_CGROUP_SCHED)
int ve_show_cpu_stat(struct ve_struct *ve, struct seq_file *p);
int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p);
+int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avenrun);
int ve_get_cpu_stat(struct ve_struct *ve, struct kernel_cpustat *kstat);
#else
static inline int ve_show_cpu_stat(struct ve_struct *ve, struct seq_file *p) { return -ENOSYS; }
static inline int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p) { return -ENOSYS; }
+static inline int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avenrun) { return -ENOSYS; }
static inline int ve_get_cpu_stat(struct ve_struct *ve, struct kernel_cpustat *kstat) { return -ENOSYS; }
#endif
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 2b41e8e2d31d..ff1db0ba0c39 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -770,6 +770,7 @@ u64 nsec_to_clock_t(u64 x)
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
+EXPORT_SYMBOL(nsec_to_clock_t);
u64 jiffies64_to_nsecs(u64 j)
{
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index a9afefc5b9de..29e98e6396dc 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -1430,6 +1430,24 @@ int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p)
return err;
}
+inline struct task_group *css_tg(struct cgroup_subsys_state *css);
+int get_avenrun_tg(struct task_group *tg, unsigned long *loads,
+ unsigned long offset, int shift);
+
+int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avnrun)
+{
+ struct cgroup_subsys_state *css;
+ struct task_group *tg;
+ int err;
+
+ css = ve_get_init_css(ve, cpu_cgrp_id);
+ tg = css_tg(css);
+ err = get_avenrun_tg(tg, avnrun, 0, 0);
+ css_put(css);
+ return err;
+}
+EXPORT_SYMBOL(ve_get_cpu_avenrun);
+
int cpu_cgroup_get_stat(struct cgroup_subsys_state *cpu_css,
struct cgroup_subsys_state *cpuacct_css,
struct kernel_cpustat *kstat);
diff --git a/kernel/ve/vecalls.c b/kernel/ve/vecalls.c
index 3258b49b15b2..786a743faa1a 100644
--- a/kernel/ve/vecalls.c
+++ b/kernel/ve/vecalls.c
@@ -22,6 +22,8 @@
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
+#include <linux/jiffies.h>
+#include <linux/sched/loadavg.h>
#include <generated/utsrelease.h>
#include <linux/ve.h>
@@ -35,6 +37,62 @@ static u64 ve_get_uptime(struct ve_struct *ve)
return ktime_get_boot_ns() - ve->real_start_time;
}
+static int fill_cpu_stat(envid_t veid, struct vz_cpu_stat __user *buf)
+{
+ struct ve_struct *ve;
+ struct vz_cpu_stat *vstat;
+ int retval;
+ int i;
+ unsigned long tmp;
+ unsigned long avnrun[3];
+ struct kernel_cpustat kstat;
+
+ if (!ve_is_super(get_exec_env()) && (veid != get_exec_env()->veid))
+ return -EPERM;
+ ve = get_ve_by_id(veid);
+ if (!ve)
+ return -ESRCH;
+
+ retval = -ENOMEM;
+ vstat = kzalloc(sizeof(*vstat), GFP_KERNEL);
+ if (!vstat)
+ goto out_put_ve;
+
+ retval = ve_get_cpu_stat(ve, &kstat);
+ if (retval)
+ goto out_free;
+
+ retval = ve_get_cpu_avenrun(ve, avnrun);
+ if (retval)
+ goto out_free;
+
+ vstat->user_jif = (unsigned long)nsec_to_clock_t(
+ kstat.cpustat[CPUTIME_USER]);
+ vstat->nice_jif = (unsigned long)nsec_to_clock_t(
+ kstat.cpustat[CPUTIME_NICE]);
+ vstat->system_jif = (unsigned long)nsec_to_clock_t(
+ kstat.cpustat[CPUTIME_SYSTEM]);
+ vstat->idle_clk = kstat.cpustat[CPUTIME_IDLE];
+ vstat->uptime_clk = ve_get_uptime(ve);
+
+ vstat->uptime_jif = (unsigned long)jiffies_64_to_clock_t(
+ get_jiffies_64() - ve->start_jiffies);
+ for (i = 0; i < 3; i++) {
+ tmp = avnrun[i] + (FIXED_1/200);
+ vstat->avenrun[i].val_int = LOAD_INT(tmp);
+ vstat->avenrun[i].val_frac = LOAD_FRAC(tmp);
+ }
+
+ retval = 0;
+ if (copy_to_user(buf, vstat, sizeof(*vstat)))
+ retval = -EFAULT;
+out_free:
+ kfree(vstat);
+out_put_ve:
+ put_ve(ve);
+ return retval;
+}
+
/**********************************************************************
**********************************************************************
*
@@ -365,6 +423,14 @@ int vzcalls_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = -ENOTTY;
switch(cmd) {
+ case VZCTL_GET_CPU_STAT: {
+ struct vzctl_cpustatctl s;
+ err = -EFAULT;
+ if (copy_from_user(&s, (void __user *)arg, sizeof(s)))
+ break;
+ err = fill_cpu_stat(s.veid, s.cpustat);
+ }
+ break;
case VZCTL_VE_CONFIGURE:
err = ve_configure_ioctl((struct vzctl_ve_configure *)arg);
break;
--
2.28.0
More information about the Devel
mailing list