[Devel] [PATCH RHEL7 COMMIT] mm: memcontrol: add stats for reclaimable and unreclaimable stats

Konstantin Khorenko khorenko at virtuozzo.com
Thu May 19 04:23:41 PDT 2016


The commit is pushed to "branch-rh7-3.10.0-327.18.2.vz7.14.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.18.2.vz7.14.4
------>
commit a41d64689af7e527e6502d80a35e9cdd0d588f57
Author: Vladimir Davydov <vdavydov at virtuozzo.com>
Date:   Thu May 19 15:23:41 2016 +0400

    mm: memcontrol: add stats for reclaimable and unreclaimable stats
    
    Required by vcmmd for estimating the size of available memory, which in
    turn is used for calculating the lower boundary for memory limit.
    
    Also, these counters will be used by the following patches.
    
    Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
 mm/memcontrol.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b577055..f42f770 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -99,6 +99,8 @@ enum mem_cgroup_stat_index {
 	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
 	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
 	MEM_CGROUP_STAT_SHMEM,		/* # of charged shmem pages */
+	MEM_CGROUP_STAT_SLAB_RECLAIMABLE, /* # of reclaimable slab pages */
+	MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE, /* # of unreclaimable slab pages */
 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
 	MEM_CGROUP_STAT_NSTATS,
 };
@@ -109,6 +111,8 @@ static const char * const mem_cgroup_stat_names[] = {
 	"rss_huge",
 	"mapped_file",
 	"shmem",
+	"slab_reclaimable",
+	"slab_unreclaimable",
 	"swap",
 };
 
@@ -3222,8 +3226,10 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
 
 int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size)
 {
+	int nr_pages = size >> PAGE_SHIFT;
 	struct mem_cgroup *memcg;
 	struct res_counter *fail_res;
+	int idx;
 	int ret;
 
 	VM_BUG_ON(is_root_cache(s));
@@ -3232,21 +3238,33 @@ int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size)
 	ret = memcg_charge_kmem(memcg, gfp, size);
 	if (ret)
 		return ret;
-	if (s->flags & SLAB_RECLAIM_ACCOUNT)
+	if (s->flags & SLAB_RECLAIM_ACCOUNT) {
 		res_counter_charge_nofail(&memcg->dcache, size, &fail_res);
+		idx = MEM_CGROUP_STAT_SLAB_RECLAIMABLE;
+	} else
+		idx = MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE;
+
+	this_cpu_add(memcg->stat->count[idx], nr_pages);
 	return 0;
 }
 
 void __memcg_uncharge_slab(struct kmem_cache *s, unsigned size)
 {
+	int nr_pages = size >> PAGE_SHIFT;
 	struct mem_cgroup *memcg;
+	int idx;
 
 	VM_BUG_ON(is_root_cache(s));
 	memcg = s->memcg_params.memcg;
 
 	memcg_uncharge_kmem(memcg, size);
-	if (s->flags & SLAB_RECLAIM_ACCOUNT)
+	if (s->flags & SLAB_RECLAIM_ACCOUNT) {
 		res_counter_uncharge(&memcg->dcache, size);
+		idx = MEM_CGROUP_STAT_SLAB_RECLAIMABLE;
+	} else
+		idx = MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE;
+
+	this_cpu_sub(memcg->stat->count[idx], nr_pages);
 }
 
 /*


More information about the Devel mailing list