[Devel] [PATCH rh7 1/3] mm: memcontrol: add stats for reclaimable and unreclaimable stats

Vladimir Davydov vdavydov at virtuozzo.com
Thu May 12 07:11:04 PDT 2016


Required by vcmmd for estimating the size of available memory, which in
turn is used for calculating the lower boundary for memory limit.

Also, these counters will be used by the following patches.

Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
 mm/memcontrol.c | 22 ++++++++++++++++++++--
 1 file changed, 20 insertions(+), 2 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b57705523be1..f42f7708d19d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -99,6 +99,8 @@ enum mem_cgroup_stat_index {
 	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
 	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
 	MEM_CGROUP_STAT_SHMEM,		/* # of charged shmem pages */
+	MEM_CGROUP_STAT_SLAB_RECLAIMABLE, /* # of reclaimable slab pages */
+	MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE, /* # of unreclaimable slab pages */
 	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
 	MEM_CGROUP_STAT_NSTATS,
 };
@@ -109,6 +111,8 @@ static const char * const mem_cgroup_stat_names[] = {
 	"rss_huge",
 	"mapped_file",
 	"shmem",
+	"slab_reclaimable",
+	"slab_unreclaimable",
 	"swap",
 };
 
@@ -3222,8 +3226,10 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
 
 int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size)
 {
+	int nr_pages = size >> PAGE_SHIFT;
 	struct mem_cgroup *memcg;
 	struct res_counter *fail_res;
+	int idx;
 	int ret;
 
 	VM_BUG_ON(is_root_cache(s));
@@ -3232,21 +3238,33 @@ int __memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, unsigned size)
 	ret = memcg_charge_kmem(memcg, gfp, size);
 	if (ret)
 		return ret;
-	if (s->flags & SLAB_RECLAIM_ACCOUNT)
+	if (s->flags & SLAB_RECLAIM_ACCOUNT) {
 		res_counter_charge_nofail(&memcg->dcache, size, &fail_res);
+		idx = MEM_CGROUP_STAT_SLAB_RECLAIMABLE;
+	} else
+		idx = MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE;
+
+	this_cpu_add(memcg->stat->count[idx], nr_pages);
 	return 0;
 }
 
 void __memcg_uncharge_slab(struct kmem_cache *s, unsigned size)
 {
+	int nr_pages = size >> PAGE_SHIFT;
 	struct mem_cgroup *memcg;
+	int idx;
 
 	VM_BUG_ON(is_root_cache(s));
 	memcg = s->memcg_params.memcg;
 
 	memcg_uncharge_kmem(memcg, size);
-	if (s->flags & SLAB_RECLAIM_ACCOUNT)
+	if (s->flags & SLAB_RECLAIM_ACCOUNT) {
 		res_counter_uncharge(&memcg->dcache, size);
+		idx = MEM_CGROUP_STAT_SLAB_RECLAIMABLE;
+	} else
+		idx = MEM_CGROUP_STAT_SLAB_UNRECLAIMABLE;
+
+	this_cpu_sub(memcg->stat->count[idx], nr_pages);
 }
 
 /*
-- 
2.1.4



More information about the Devel mailing list