[Devel] [PATCH RHEL7 COMMIT] ve/memcg: add function to get max mem+swap

Konstantin Khorenko khorenko at virtuozzo.com
Mon Jun 8 09:09:36 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-123.1.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-123.1.2.vz7.5.11
------>
commit 8545a296f7fc23e47802c6f52a480ec0726bd445
Author: Vladimir Davydov <vdavydov at parallels.com>
Date:   Mon Jun 8 20:09:35 2015 +0400

    ve/memcg: add function to get max mem+swap
    
    Series description:
    
    This patch /proc/PID/{oom_score_adj,oom_adj,oom_score} behavior inside a
    CT, resurrecting /proc/vz/oom_score_adj along the way. For more details,
    see individual patches.
    
    https://jira.sw.ru/browse/PSBM-33849
    ====================================================================
    This patch description:
    
    This patch renames mem_cgroup_rap_pages to mem_cgroup_total_pages and
    adds a bool argument to it, swap, which if set will result in the
    function returning mem+swap limit instead of mem limit.
    
    This is required for showing correct /proc/PID/oom_score inside a CT,
    which is a business of the following patch.
    
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
    Acked-by: Andrew Vagin <avagin at odin.com>
---
 include/linux/memcontrol.h | 4 ++--
 kernel/bc/io_acct.c        | 2 +-
 mm/memcontrol.c            | 8 ++++++--
 mm/shmem.c                 | 2 +-
 4 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2e2bb2a..ae28833 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -128,7 +128,7 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 					struct task_struct *p);
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
 					struct page *newpage);
-unsigned long mem_cgroup_ram_pages(void);
+unsigned long mem_cgroup_total_pages(bool swap);
 
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
@@ -403,7 +403,7 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
 {
 }
 
-static inline unsigned long mem_cgroup_ram_pages(void)
+static inline unsigned long mem_cgroup_total_pages(swap)
 {
 	return ULONG_MAX;
 }
diff --git a/kernel/bc/io_acct.c b/kernel/bc/io_acct.c
index ee30fea..50f8fb7 100644
--- a/kernel/bc/io_acct.c
+++ b/kernel/bc/io_acct.c
@@ -135,7 +135,7 @@ int ub_dirty_limits(unsigned long *pbackground,
 	if (!dirty_ratio)
 		return 0;
 
-	available_memory = mem_cgroup_ram_pages();
+	available_memory = mem_cgroup_total_pages(false);
 	if (available_memory == ULONG_MAX || available_memory == 0)
 		return 0;
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f570e7c..845e48d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1663,8 +1663,11 @@ void mem_cgroup_note_oom_kill(struct mem_cgroup *root_memcg,
  * cgroup. If it is changed, this function must be reworked. E.g. we could
  * assign a memory cgroup to each ve or beancounter cgroup and get the memory
  * cgroup of a container from get_exec_env() or get_exec_ub().
+ *
+ * If @swap is true, this function returns the total number of memory + swap
+ * pages available.
  */
-unsigned long mem_cgroup_ram_pages(void)
+unsigned long mem_cgroup_total_pages(bool swap)
 {
 	struct mem_cgroup *memcg_to_put, *memcg, *parent;
 	unsigned long long limit = RESOURCE_MAX;
@@ -1677,7 +1680,8 @@ unsigned long mem_cgroup_ram_pages(void)
 	       parent != root_mem_cgroup)
 		memcg = parent;
 
-	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+	limit = swap ? res_counter_read_u64(&memcg->memsw, RES_LIMIT) :
+			res_counter_read_u64(&memcg->res, RES_LIMIT);
 out:
 	if (memcg_to_put)
 		css_put(&memcg_to_put->css);
diff --git a/mm/shmem.c b/mm/shmem.c
index 88e070a..ca7f12a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -111,7 +111,7 @@ static unsigned long tmpfs_ram_pages(void)
 	if (ve_is_super(get_exec_env()))
 		return totalram_pages;
 
-	memcg_rampages = mem_cgroup_ram_pages();
+	memcg_rampages = mem_cgroup_total_pages(false);
 	return min(totalram_pages, memcg_rampages);
 }
 



More information about the Devel mailing list