[Devel] [PATCH rh7 2/7] mm: memcontrol: drop memcg_kmem_commit_charge
Vladimir Davydov
vdavydov at virtuozzo.com
Mon May 30 07:23:18 PDT 2016
To account a kmem page to memcg, we first try to charge it using
memcg_kmem_newpage_charge, which returns current memcg. If it succeeds,
we then allocate a page, and finally we associate the page with the
memcg using memcg_kmem_commit_charge. There's actually not much point in
such a design - it wouldn't do anything bad if we first allocated a page
and only then charged it w/o these try charge & commit steps.
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
arch/x86/mm/pgtable.c | 1 -
include/linux/memcontrol.h | 36 +++++-------------------------------
mm/memcontrol.c | 26 ++++++--------------------
3 files changed, 11 insertions(+), 52 deletions(-)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index ba13ef8e651a..f86258ae9c97 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -1,6 +1,5 @@
#include <linux/mm.h>
#include <linux/gfp.h>
-#include <linux/memcontrol.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 37ce43221f3c..d26adf10eaa7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -553,10 +553,7 @@ bool memcg_kmem_is_active(struct mem_cgroup *memcg);
* conditions, but because they are pretty simple, they are expected to be
* fast.
*/
-bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
- int order);
-void __memcg_kmem_commit_charge(struct page *page,
- struct mem_cgroup *memcg, int order);
+bool __memcg_kmem_newpage_charge(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
@@ -573,8 +570,8 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size);
/**
* memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
+ * @page: page to charge.
* @gfp: the gfp allocation flags.
- * @memcg: a pointer to the memcg this was charged against.
* @order: allocation order.
*
* returns true if the memcg where the current task belongs can hold this
@@ -584,7 +581,7 @@ void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size);
* any memcg.
*/
static inline bool
-memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+memcg_kmem_newpage_charge(struct page *page, gfp_t gfp, int order)
{
if (!memcg_kmem_enabled())
return true;
@@ -607,7 +604,7 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
if (unlikely(fatal_signal_pending(current)))
return true;
- return __memcg_kmem_newpage_charge(gfp, memcg, order);
+ return __memcg_kmem_newpage_charge(page, gfp, order);
}
/**
@@ -625,24 +622,6 @@ memcg_kmem_uncharge_pages(struct page *page, int order)
}
/**
- * memcg_kmem_commit_charge: embeds correct memcg in a page
- * @page: pointer to struct page recently allocated
- * @memcg: the memcg structure we charged against
- * @order: allocation order.
- *
- * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
- * failure of the allocation. if @page is NULL, this function will revert the
- * charges. Otherwise, it will commit the memcg given by @memcg to the
- * corresponding page_cgroup.
- */
-static inline void
-memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
-{
- if (memcg_kmem_enabled() && memcg)
- __memcg_kmem_commit_charge(page, memcg, order);
-}
-
-/**
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
* @cachep: the original global kmem cache
* @gfp: allocation flags.
@@ -691,7 +670,7 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
}
static inline bool
-memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+memcg_kmem_newpage_charge(struct page *page, gfp_t gfp, int order)
{
return true;
}
@@ -700,11 +679,6 @@ static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
{
}
-static inline void
-memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
-{
-}
-
static inline int memcg_cache_id(struct mem_cgroup *memcg)
{
return -1;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 50154da90750..8eb48071ea22 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3494,13 +3494,12 @@ void __memcg_kmem_put_cache(struct kmem_cache *cachep)
* Returning true means the allocation is possible.
*/
bool
-__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
+__memcg_kmem_newpage_charge(struct page *page, gfp_t gfp, int order)
{
+ struct page_cgroup *pc;
struct mem_cgroup *memcg;
int ret;
- *_memcg = NULL;
-
/*
* Disabling accounting is only relevant for some specific memcg
* internal allocations. Therefore we would initially not have such
@@ -3545,31 +3544,18 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
}
ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
- if (!ret)
- *_memcg = memcg;
-
css_put(&memcg->css);
- return (ret == 0);
-}
-
-void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
- int order)
-{
- struct page_cgroup *pc;
- VM_BUG_ON(mem_cgroup_is_root(memcg));
-
- /* The page allocation failed. Revert */
- if (!page) {
- memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
- return;
- }
+ if (ret)
+ return false;
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
pc->mem_cgroup = memcg;
SetPageCgroupUsed(pc);
unlock_page_cgroup(pc);
+
+ return true;
}
void __memcg_kmem_uncharge_pages(struct page *page, int order)
--
2.1.4
More information about the Devel
mailing list