[Devel] [PATCH RHEL7 COMMIT] ub: make cached and shmem meminfo output more accurate
Konstantin Khorenko
khorenko at virtuozzo.com
Fri May 13 08:13:55 PDT 2016
The commit is pushed to "branch-rh7-3.10.0-327.10.1.vz7.12.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.10.1.vz7.12.19
------>
commit 414a4cffc1a6d976d25a2da7f07004ea624f3466
Author: Vladimir Davydov <vdavydov at virtuozzo.com>
Date: Fri May 13 19:13:54 2016 +0400
ub: make cached and shmem meminfo output more accurate
Currently, these fields are faked using file lru size and UB_SHMPAGES.
The result leaves a lot to be desired, because UB_SHMPAGES accounts
every shared pages as many times as it is mapped. Let's instead take
these values directly from memory cgroup where they are already properly
accounted.
Along the way, zap ub_page_stat and use mem_cgroup_get_nr_pages
directly.
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
Reviewed-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
include/bc/beancounter.h | 3 ---
kernel/bc/beancounter.c | 15 ---------------
kernel/bc/proc.c | 15 ++++++++++-----
kernel/bc/vm_pages.c | 24 +++++++-----------------
mm/memcontrol.c | 19 +++++++++++++++++++
5 files changed, 36 insertions(+), 40 deletions(-)
diff --git a/include/bc/beancounter.h b/include/bc/beancounter.h
index 8316cc4..8f3119d 100644
--- a/include/bc/beancounter.h
+++ b/include/bc/beancounter.h
@@ -288,9 +288,6 @@ extern void uncharge_warn(struct user_beancounter *ub, const char *resource,
extern int ub_update_memcg(struct user_beancounter *ub);
extern void ub_sync_memcg(struct user_beancounter *ub);
-extern void ub_page_stat(struct user_beancounter *ub,
- const nodemask_t *nodemask,
- unsigned long *pages);
extern unsigned long ub_total_pages(struct user_beancounter *ub, bool swap);
extern const char *ub_rnames[];
diff --git a/kernel/bc/beancounter.c b/kernel/bc/beancounter.c
index 90fc1dd..5023bd2 100644
--- a/kernel/bc/beancounter.c
+++ b/kernel/bc/beancounter.c
@@ -231,21 +231,6 @@ void ub_sync_memcg(struct user_beancounter *ub)
css_put(css);
}
-void ub_page_stat(struct user_beancounter *ub, const nodemask_t *nodemask,
- unsigned long *pages)
-{
- int nid;
- struct cgroup_subsys_state *css;
-
- memset(pages, 0, sizeof(unsigned long) * NR_LRU_LISTS);
-
- css = ub_get_mem_css(ub);
- for_each_node_mask(nid, *nodemask)
- mem_cgroup_get_nr_pages(mem_cgroup_from_cont(css->cgroup),
- nid, pages);
- css_put(css);
-}
-
unsigned long ub_total_pages(struct user_beancounter *ub, bool swap)
{
struct cgroup_subsys_state *css;
diff --git a/kernel/bc/proc.c b/kernel/bc/proc.c
index 9e9fde4..4c79550 100644
--- a/kernel/bc/proc.c
+++ b/kernel/bc/proc.c
@@ -17,6 +17,7 @@
#include <linux/mnt_namespace.h>
#include <linux/lglock.h>
#include <linux/ve.h>
+#include <linux/memcontrol.h>
#include <bc/beancounter.h>
#include <bc/proc.h>
@@ -129,18 +130,21 @@ static struct bc_proc_entry bc_meminfo_entry = {
.u.show = bc_proc_meminfo_show,
};
+extern void mem_cgroup_get_nr_pages(struct mem_cgroup *memcg, int nid,
+ unsigned long *pages);
+
#define K(x) ((x) << (PAGE_SHIFT - 10))
static int bc_proc_nodeinfo_show(struct seq_file *f, void *v)
{
int nid;
- struct user_beancounter *ub;
+ struct cgroup_subsys_state *css;
unsigned long pages[NR_LRU_LISTS];
- ub = seq_beancounter(f);
+ css = ub_get_mem_css(seq_beancounter(f));
for_each_node_state(nid, N_HIGH_MEMORY) {
- nodemask_t nodemask = nodemask_of_node(nid);
-
- ub_page_stat(ub, &nodemask, pages);
+ memset(pages, 0, sizeof(pages));
+ mem_cgroup_get_nr_pages(mem_cgroup_from_cont(css->cgroup),
+ nid, pages);
seq_printf(f,
"Node %d Active: %8lu kB\n"
"Node %d Inactive: %8lu kB\n"
@@ -159,6 +163,7 @@ static int bc_proc_nodeinfo_show(struct seq_file *f, void *v)
nid, K(pages[LRU_INACTIVE_FILE]),
nid, K(pages[LRU_UNEVICTABLE]));
}
+ css_put(css);
return 0;
}
#undef K
diff --git a/kernel/bc/vm_pages.c b/kernel/bc/vm_pages.c
index b46da98..5e588d1 100644
--- a/kernel/bc/vm_pages.c
+++ b/kernel/bc/vm_pages.c
@@ -159,22 +159,23 @@ static int bc_fill_sysinfo(struct user_beancounter *ub,
return NOTIFY_OK;
}
+extern void mem_cgroup_fill_meminfo(struct mem_cgroup *memcg, struct meminfo *mi);
+
static int bc_fill_meminfo(struct user_beancounter *ub,
unsigned long meminfo_val, struct meminfo *mi)
{
+ struct cgroup_subsys_state *css;
int cpu, ret;
- long dcache;
ret = bc_fill_sysinfo(ub, meminfo_val, mi->si);
if (ret & NOTIFY_STOP_MASK)
goto out;
- ub_sync_memcg(ub);
- ub_page_stat(ub, &node_online_map, mi->pages);
+ css = ub_get_mem_css(ub);
+ mem_cgroup_fill_meminfo(mem_cgroup_from_cont(css->cgroup), mi);
+ css_put(css);
mi->locked = ub->ub_parms[UB_LOCKEDPAGES].held;
- mi->shmem = ub->ub_parms[UB_SHMPAGES].held;
- dcache = ub->ub_parms[UB_DCACHESIZE].held;
mi->dirty_pages = __ub_stat_get(ub, dirty_pages);
mi->writeback_pages = __ub_stat_get(ub, writeback_pages);
@@ -182,22 +183,11 @@ static int bc_fill_meminfo(struct user_beancounter *ub,
struct ub_percpu_struct *pcpu = ub_percpu(ub, cpu);
mi->dirty_pages += pcpu->dirty_pages;
- mi->writeback_pages += pcpu->writeback_pages;
+ mi->writeback_pages += pcpu->writeback_pages;
}
mi->dirty_pages = max_t(long, 0, mi->dirty_pages);
mi->writeback_pages = max_t(long, 0, mi->writeback_pages);
-
- mi->slab_reclaimable = DIV_ROUND_UP(max(0L, dcache), PAGE_SIZE);
- mi->slab_unreclaimable =
- DIV_ROUND_UP(max(0L, (long)ub->ub_parms[UB_KMEMSIZE].held -
- dcache), PAGE_SIZE);
-
- mi->cached = min(mi->si->totalram - mi->si->freeram -
- mi->slab_reclaimable - mi->slab_unreclaimable,
- mi->pages[LRU_INACTIVE_FILE] +
- mi->pages[LRU_ACTIVE_FILE] +
- ub->ub_parms[UB_SHMPAGES].held);
out:
return ret;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 034bc45..f603758 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -53,6 +53,7 @@
#include <linux/page_cgroup.h>
#include <linux/cpu.h>
#include <linux/oom.h>
+#include <linux/virtinfo.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -5002,6 +5003,24 @@ static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
return val;
}
+void mem_cgroup_fill_meminfo(struct mem_cgroup *memcg, struct meminfo *mi)
+{
+ int nid;
+ unsigned long slab;
+
+ memset(&mi->pages, 0, sizeof(mi->pages));
+ for_each_online_node(nid)
+ mem_cgroup_get_nr_pages(memcg, nid, mi->pages);
+
+ slab = res_counter_read_u64(&memcg->kmem, RES_USAGE) >> PAGE_SHIFT;
+ mi->slab_reclaimable = res_counter_read_u64(&memcg->dcache, RES_USAGE)
+ >> PAGE_SHIFT;
+ mi->slab_unreclaimable = max_t(long, slab - mi->slab_reclaimable, 0);
+
+ mi->cached = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
+ mi->shmem = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SHMEM);
+}
+
static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
{
u64 val;
More information about the Devel
mailing list