[Devel] [PATCH RHEL7 COMMIT] Partially revert "tcp: Charge socket buffers into cg memory"

Konstantin Khorenko khorenko at virtuozzo.com
Mon May 2 07:05:59 PDT 2016


The commit is pushed to "branch-rh7-3.10.0-327.10.1.vz7.12.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.10.1.vz7.12.16
------>
commit 3d0f15be055d31aea23d70379efbe17df6ee8cc9
Author: Vladimir Davydov <vdavydov at virtuozzo.com>
Date:   Mon May 2 18:05:59 2016 +0400

    Partially revert "tcp: Charge socket buffers into cg memory"
    
    This partially reverts commit 4be12b4b184cd000b76883950267ab560ef4935d.
    
    As we are going to switch to the whitelist kmem accounting policy, there
    will be no need in passing __GFP_NOACCOUNT to avoid double accounting of
    tcp socket buffers.
    
    Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
 net/ipv4/tcp.c        | 5 -----
 net/ipv4/tcp_input.c  | 2 +-
 net/ipv4/tcp_output.c | 6 +++---
 3 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9e43c8b..2b54ad8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -414,11 +414,6 @@ void tcp_init_sock(struct sock *sk)
 
 	sk->sk_write_space = sk_stream_write_space;
 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
-	/*
-	 * TCP memory is accounted via cg_proto and there's
-	 * no need in additional kmem charging via slub
-	 */
-	sk->sk_allocation |= __GFP_NOACCOUNT;
 
 	icsk->icsk_sync_mss = tcp_sync_mss;
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fd3704e..d987f31 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4587,7 +4587,7 @@ restart:
 		int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
 		struct sk_buff *nskb;
 
-		nskb = alloc_skb(copy, GFP_ATOMIC|__GFP_NOACCOUNT);
+		nskb = alloc_skb(copy, GFP_ATOMIC);
 		if (!nskb)
 			return;
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7bf7e1a..95c0b50 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1124,7 +1124,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 		return -ENOMEM;
 
 	/* Get a new skb... force flag on. */
-	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC|__GFP_NOACCOUNT);
+	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
 	if (buff == NULL)
 		return -ENOMEM; /* We'll just try again later. */
 
@@ -1675,7 +1675,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
 	if (skb->len != skb->data_len)
 		return tcp_fragment(sk, skb, len, mss_now);
 
-	buff = sk_stream_alloc_skb(sk, 0, gfp|__GFP_NOACCOUNT);
+	buff = sk_stream_alloc_skb(sk, 0, gfp);
 	if (unlikely(buff == NULL))
 		return -ENOMEM;
 
@@ -1848,7 +1848,7 @@ static int tcp_mtu_probe(struct sock *sk)
 	}
 
 	/* We're allowed to probe.  Build it now. */
-	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC|__GFP_NOACCOUNT)) == NULL)
+	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
 		return -1;
 	sk->sk_wmem_queued += nskb->truesize;
 	sk_mem_charge(sk, nskb->truesize);


More information about the Devel mailing list