[Devel] [PATCH RHEL7 COMMIT] vznetstat: Port diff-vznetstat-support-of-resetting-of-traffic-statistics-on-running-CTs

Konstantin Khorenko khorenko at virtuozzo.com
Wed Jun 24 03:41:57 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-123.1.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-123.1.2.vz7.5.17
------>
commit 12fa3fb3c38d280b669e024d48ab11267640fb8c
Author: Vladimir Davydov <vdavydov at parallels.com>
Date:   Wed Jun 24 14:41:57 2015 +0400

    vznetstat: Port diff-vznetstat-support-of-resetting-of-traffic-statistics-on-running-CTs
    
    Author: Kirill Tkhai
    Email: ktkhai at parallels.com
    Subject: vznetstat: support of resetting of traffic statistics on
    Date: Thu, 13 Mar 2014 12:47:57 +0400
    
    This adds a possibility of clearing VE's venet_stat when container
    is running. Currently, to do that user needs to reboot the CT and
    to complitely destroy statistics.
    
    Patch implements two new ioctls: VZCTL_TC_CLEAR_STAT and
    VZCTL_TC_CLEAR_ALL_STAT (clear the VE's statistics and clear
    all available statistics).
    
    We schedule clearing work using schedule_on_each_cpu(). This
    function is not exported, so the patch does that. Scheduled
    work is executed under disabled smp hotplug, so we do not
    need in additional export of {get,put}_online_cpus(),
    which are GPL-only, to clear statistics on temporary down
    CPUs. Otherwise we'd have to export them (at least something,
    because kernel does not allow not GPL modules to control
    hotplug).
    
    Also I add __percpu to "struct venet_stat" description,
    because it's percpu.
    
    More info in the JIRA request https://jira.sw.ru/browse/PSBM-21243
    
    Signed-off-by: Kirill Tkhai <ktkhai at parallels.com>
    
    Acked-by: Vladimir Davydov <vdavydov at parallels.com>
    =============================================================================
    
    Author: Kirill Tkhai
    Email: ktkhai at parallels.com
    Subject: workqueue: export schedule_on_each_cpu() for vznetstat
    Date: Thu, 13 Mar 2014 12:47:57 +0400
    
    This is a subsidiary patch which just export a function for the following patch:
    diff-vznetstat-support-of-resetting-of-traffic-statistics-on-running-CTs
    
    This adds a possibility of clearing VE's venet_stat when container
    is running. Currently, to do that user needs to reboot the CT and
    to complitely destroy statistics.
    
    Patch implements two new ioctls: VZCTL_TC_CLEAR_STAT and
    VZCTL_TC_CLEAR_ALL_STAT (clear the VE's statistics and clear
    all available statistics).
    
    We schedule clearing work using schedule_on_each_cpu(). This
    function is not exported, so the patch does that. Scheduled
    work is executed under disabled smp hotplug, so we do not
    need in additional export of {get,put}_online_cpus(),
    which are GPL-only, to clear statistics on temporary down
    CPUs. Otherwise we'd have to export them (at least something,
    because kernel does not allow not GPL modules to control
    hotplug).
    
    Also I add __percpu to "struct venet_stat" description,
    because it's percpu.
    
    More info in the JIRA request https://jira.sw.ru/browse/PSBM-21243
    
    Signed-off-by: Kirill Tkhai <ktkhai at parallels.com>
    
    Acked-by: Vladimir Davydov <vdavydov at parallels.com>
    =============================================================================
    
    Related to https://jira.sw.ru/browse/PSBM-33650
    
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 include/linux/vznetstat.h          |   4 +-
 include/uapi/linux/vzctl_netstat.h |   3 ++
 kernel/ve/vznetstat/vznetstat.c    | 107 +++++++++++++++++++++++++++++++++++++
 kernel/workqueue.c                 |   1 +
 4 files changed, 113 insertions(+), 2 deletions(-)

diff --git a/include/linux/vznetstat.h b/include/linux/vznetstat.h
index b6627cc..11b8419 100644
--- a/include/linux/vznetstat.h
+++ b/include/linux/vznetstat.h
@@ -38,8 +38,8 @@ struct venet_stat {
 	unsigned long flags;
 	atomic_t users;
 
-	struct acct_stat *ipv4_stat;
-	struct acct_stat *ipv6_stat;
+	struct acct_stat __percpu *ipv4_stat;
+	struct acct_stat __percpu *ipv6_stat;
 };
 
 static inline int venet_acct_skb_size(struct sk_buff *skb)
diff --git a/include/uapi/linux/vzctl_netstat.h b/include/uapi/linux/vzctl_netstat.h
index 124c338..b81435e 100644
--- a/include/uapi/linux/vzctl_netstat.h
+++ b/include/uapi/linux/vzctl_netstat.h
@@ -83,6 +83,9 @@ struct vzctl_tc_set_base {
 
 #define VZCTL_TC_CLASS_NUM_V6		_IO(VZTCCTLTYPE, 16)
 
+#define VZCTL_TC_CLEAR_STAT		_IO(VZTCCTLTYPE, 17)
+#define VZCTL_TC_CLEAR_ALL_STAT		_IO(VZTCCTLTYPE, 18)
+
 #ifdef __KERNEL__
 #ifdef CONFIG_COMPAT
 #include <linux/compat.h>
diff --git a/kernel/ve/vznetstat/vznetstat.c b/kernel/ve/vznetstat/vznetstat.c
index dcdd85e..2f97b2c 100644
--- a/kernel/ve/vznetstat/vznetstat.c
+++ b/kernel/ve/vznetstat/vznetstat.c
@@ -398,6 +398,106 @@ static void venet_acct_destroy_all_stat(void)
 	write_unlock_irq(&tc_lock);
 }
 
+static DEFINE_MUTEX(req_mutex);
+static struct venet_stat *req_stat;
+
+static void zero_venet_stat(struct venet_stat *stat, unsigned cpu)
+{
+	struct acct_stat *acct;
+
+	acct = per_cpu_ptr(stat->ipv4_stat, cpu);
+	memset(acct, 0, sizeof(*acct));
+	acct = per_cpu_ptr(stat->ipv6_stat, cpu);
+	memset(acct, 0, sizeof(*acct));
+}
+
+static void clear_one_percpu_statistics(struct work_struct *dummy)
+{
+	unsigned cpu, this_cpu = get_cpu();
+
+	zero_venet_stat(req_stat, this_cpu);
+
+	if (cpumask_first(cpu_online_mask) != this_cpu)
+		goto out;
+
+	/* First cpu clears statistics on all offline cpus */
+	for_each_possible_cpu(cpu)
+		if (!cpu_online(cpu))
+			zero_venet_stat(req_stat, cpu);
+out:
+	put_cpu();
+}
+
+/* Clear VE's statistics */
+static int venet_acct_clear_stat(envid_t veid)
+{
+	int ret = -EINTR;
+
+	if (mutex_lock_interruptible(&req_mutex))
+		goto out;
+
+	req_stat = venet_acct_find_stat(veid);
+	if (!req_stat) {
+		ret = -ESRCH;
+		goto unlock;
+	}
+
+	ret = schedule_on_each_cpu(clear_one_percpu_statistics);
+
+	venet_acct_put_stat(req_stat);
+unlock:
+	mutex_unlock(&req_mutex);
+out:
+	return ret;
+}
+
+static void clear_all_percpu_statistics(struct work_struct *dummy)
+{
+	unsigned cpu, this_cpu = smp_processor_id();
+	struct venet_stat *stat = NULL;
+	int other = 0, hash = 0;
+
+	/*
+	 * Some cpus may be offline, and schedule_on_each_cpu()
+	 * does not create a work on them.
+	 * Work on the first online CPU clears their statistics.
+	 * Hotplug is disabled by schedule_on_each_cpu().
+	 */
+	if (cpumask_first(cpu_online_mask) == this_cpu)
+		other = 1;
+
+	read_lock(&tc_lock);
+
+	while ((stat = next_stat(&hash, stat)) != NULL) {
+		zero_venet_stat(stat, this_cpu);
+
+		if (!other)
+			continue;
+
+		/* Clear statistics on not active cpus */
+		for_each_possible_cpu(cpu)
+			if (!cpu_online(cpu))
+				zero_venet_stat(stat, cpu);
+	}
+
+	read_unlock(&tc_lock);
+}
+
+/* Clear all present statistics */
+static int venet_acct_clear_all_stat(void)
+{
+	int ret = -EINTR;
+
+	if (mutex_lock_interruptible(&req_mutex))
+		goto out;
+
+	ret = schedule_on_each_cpu(clear_all_percpu_statistics);
+
+	mutex_unlock(&req_mutex);
+out:
+	return ret;
+}
+
 static int venet_acct_get_stat_list(envid_t *__list, int length)
 {
 	int hash;
@@ -748,6 +848,13 @@ static int venet_acct_ioctl(struct file *file, unsigned int cmd,
 			err = 0;
 			venet_acct_destroy_all_stat();
 			break;
+		case VZCTL_TC_CLEAR_STAT:
+			err = venet_acct_clear_stat(arg);
+			break;
+		case VZCTL_TC_CLEAR_ALL_STAT:
+			err = venet_acct_clear_all_stat();
+			break;
+
 		case VZCTL_TC_GET_BASE:
 			err = venet_acct_get_base(arg);
 			break;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 68086a3..92bc5fb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2996,6 +2996,7 @@ int schedule_on_each_cpu(work_func_t func)
 	free_percpu(works);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(schedule_on_each_cpu);
 
 /**
  * flush_scheduled_work - ensure that any scheduled work has run to completion.



More information about the Devel mailing list