[Devel] [PATCH rh7 2/3] net: Allow pass cpu mask into snmp_fold_field{, 64}()

Kirill Tkhai ktkhai at virtuozzo.com
Thu Aug 25 06:37:19 PDT 2016


This allows to pass cpu_online_mask instead of cpu_possible_mask.

Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 include/net/ip.h   |   17 +++++++++++++++--
 net/ipv4/af_inet.c |   12 +++++++-----
 2 files changed, 22 insertions(+), 7 deletions(-)

diff --git a/include/net/ip.h b/include/net/ip.h
index ac68d69..d6676e2 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -200,14 +200,27 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long __snmp_fold_field(void __percpu *mib[], int offt, const struct cpumask *mask);
+static inline unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+{
+	return __snmp_fold_field(mib, offt, cpu_possible_mask);
+}
 #if BITS_PER_LONG==32
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 __snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off,
+			const struct cpumask *mask);
+static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off)
+{
+	return __snmp_fold_field64(mib, offt, sync_off, cpu_possible_mask)
+}
 #else
 static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
 {
 	return snmp_fold_field(mib, offt);
 }
+static inline unsigned long __snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off, const struct cpumask *mask)
+{
+	return __snmp_fold_field(mib, offt, mask);
+}
 #endif
 int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 69f4b98..d009c3f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1447,27 +1447,29 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+unsigned long __snmp_fold_field(void __percpu *mib[], int offt,
+				const struct cpumask *mask)
 {
 	unsigned long res = 0;
 	int i, j;
 
-	for_each_possible_cpu(i) {
+	for_each_cpu(i, mask) {
 		for (j = 0; j < SNMP_ARRAY_SZ; j++)
 			res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
 	}
 	return res;
 }
-EXPORT_SYMBOL_GPL(snmp_fold_field);
+EXPORT_SYMBOL_GPL(__snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+u64 __snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset,
+			const struct cpumask *mask)
 {
 	u64 res = 0;
 	int cpu;
 
-	for_each_possible_cpu(cpu) {
+	for_each_cpu(cpu, mask) {
 		void *bhptr;
 		struct u64_stats_sync *syncp;
 		u64 v;



More information about the Devel mailing list