[Devel] [PATCH rh7 1/2] mm/memcg: Don't charge anon pages as cache
Andrey Ryabinin
aryabinin at virtuozzo.com
Mon Feb 26 20:44:11 MSK 2018
We used to rely on PageAnon(page) in mem_cgroup_try_charge() to determine
whether the page is anon of file. However it doesn't work because
mem_cgroup_try_charge() called too early, before setting the page->mapping
so PageAnon() always return NULL. So we may wrongly charge anon pages
as cache.
Introduce mem_cgroup_try_charge_cache()/mem_cgroup_cancel_cache_charge()
and use them to charge cache pages.
https://jira.sw.ru/browse/PSBM-81750
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
include/linux/memcontrol.h | 16 +++++++++
mm/filemap.c | 4 +--
mm/memcontrol.c | 90 ++++++++++++++++++++++++++++------------------
mm/memory.c | 4 +--
4 files changed, 76 insertions(+), 38 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1d6bc80c4c90..995b9166c904 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -58,9 +58,12 @@ struct mem_cgroup_reclaim_cookie {
#ifdef CONFIG_MEMCG
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp);
+int mem_cgroup_try_charge_cache(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
+void mem_cgroup_cancel_cache_charge(struct page *page, struct mem_cgroup *memcg);
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -234,6 +237,14 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
return 0;
}
+static inline int mem_cgroup_try_charge_cache(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask,
+ struct mem_cgroup **memcgp)
+{
+ *memcgp = NULL;
+ return 0;
+}
+
static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare)
@@ -245,6 +256,11 @@ static inline void mem_cgroup_cancel_charge(struct page *page,
{
}
+static inline void mem_cgroup_cancel_cache_charge(struct page *page,
+ struct mem_cgroup *memcg)
+{
+}
+
static inline void mem_cgroup_uncharge(struct page *page)
{
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 31494ef85bcb..4dde6f691677 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -627,7 +627,7 @@ static int __add_to_page_cache_locked(struct page *page,
VM_BUG_ON(PageSwapBacked(page));
if (!huge) {
- error = mem_cgroup_try_charge(page, current->mm, gfp_mask,
+ error = mem_cgroup_try_charge_cache(page, current->mm, gfp_mask,
&memcg);
if (error)
return error;
@@ -657,7 +657,7 @@ static int __add_to_page_cache_locked(struct page *page,
}
radix_tree_preload_end();
} else if (!huge)
- mem_cgroup_cancel_charge(page, memcg);
+ mem_cgroup_cancel_cache_charge(page, memcg);
return error;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a3f118cba1b3..50cbcdc38f60 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6937,30 +6937,13 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
}
#endif
-/**
- * mem_cgroup_try_charge - try charging a page
- * @page: page to charge
- * @mm: mm context of the victim
- * @gfp_mask: reclaim mode
- * @memcgp: charged memcg return
- *
- * Try to charge @page to the memcg that @mm belongs to, reclaiming
- * pages according to @gfp_mask if necessary.
- *
- * Returns 0 on success, with *@memcgp pointing to the charged memcg.
- * Otherwise, an error code is returned.
- *
- * After page->mapping has been set up, the caller must finalize the
- * charge with mem_cgroup_commit_charge(). Or abort the transaction
- * with mem_cgroup_cancel_charge() in case page instantiation fails.
- */
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, struct mem_cgroup **memcgp)
+static int __mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp,
+ bool cache_charge)
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
int ret = 0;
- bool cache_charge;
if (mem_cgroup_disabled())
goto out;
@@ -6980,11 +6963,11 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+ if (!PageAnon(page)) {
+ pr_warn_once("page->flags %lx page %p head %p", page->flags, page, compound_head(page));
+ }
}
- cache_charge = !PageAnon(page) && !PageSwapBacked(page);
-
if (do_swap_account && PageSwapCache(page))
memcg = try_get_mem_cgroup_from_page(page);
if (!memcg)
@@ -7003,6 +6986,37 @@ out:
return ret;
}
+/**
+ * mem_cgroup_try_charge - try charging a page
+ * @page: page to charge
+ * @mm: mm context of the victim
+ * @gfp_mask: reclaim mode
+ * @memcgp: charged memcg return
+ *
+ * Try to charge @page to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp_mask if necessary.
+ *
+ * Returns 0 on success, with *@memcgp pointing to the charged memcg.
+ * Otherwise, an error code is returned.
+ *
+ * After page->mapping has been set up, the caller must finalize the
+ * charge with mem_cgroup_commit_charge(). Or abort the transaction
+ * with mem_cgroup_cancel_charge() in case page instantiation fails.
+ */
+int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp)
+{
+
+ return __mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, false);
+}
+
+int mem_cgroup_try_charge_cache(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp)
+{
+
+ return __mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, true);
+}
+
/**
* mem_cgroup_commit_charge - commit a page charge
* @page: page to charge
@@ -7055,17 +7069,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
}
}
-/**
- * mem_cgroup_cancel_charge - cancel a page charge
- * @page: page to charge
- * @memcg: memcg to charge the page to
- *
- * Cancel a charge transaction started by mem_cgroup_try_charge().
- */
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
+static void __mem_cgroup_cancel_charge(struct page *page,
+ struct mem_cgroup *memcg, bool cache_charge)
{
unsigned int nr_pages = 1;
- bool cache_charge;
if (mem_cgroup_disabled())
return;
@@ -7082,11 +7089,26 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
- cache_charge = !PageKmemcg(page) && !PageAnon(page)
- && !PageSwapBacked(page);
cancel_charge(memcg, nr_pages, cache_charge);
}
+/**
+ * mem_cgroup_cancel_charge - cancel a page charge
+ * @page: page to charge
+ * @memcg: memcg to charge the page to
+ *
+ * Cancel a charge transaction started by mem_cgroup_try_charge().
+ */
+void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
+{
+ __mem_cgroup_cancel_charge(page, memcg, false);
+}
+
+void mem_cgroup_cancel_cache_charge(struct page *page, struct mem_cgroup *memcg)
+{
+ __mem_cgroup_cancel_charge(page, memcg, true);
+}
+
static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
unsigned long nr_mem, unsigned long nr_memsw,
unsigned long nr_anon, unsigned long nr_file,
diff --git a/mm/memory.c b/mm/memory.c
index 54c447ee7648..8ffe65e989ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2891,7 +2891,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (!new_page)
return VM_FAULT_OOM;
- if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) {
+ if (mem_cgroup_try_charge_cache(new_page, mm, GFP_KERNEL, &memcg)) {
page_cache_release(new_page);
return VM_FAULT_OOM;
}
@@ -2929,7 +2929,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
return ret;
uncharge_out:
- mem_cgroup_cancel_charge(new_page, memcg);
+ mem_cgroup_cancel_cache_charge(new_page, memcg);
page_cache_release(new_page);
return ret;
}
--
2.16.1
More information about the Devel
mailing list