[Devel] [PATCH 11/13] mm/vmscan.c: generalize shrink_slab() calls in shrink_node()
Kirill Tkhai
ktkhai at virtuozzo.com
Tue Aug 28 14:32:56 MSK 2018
From: Vladimir Davydov <vdavydov.dev at gmail.com>
ms commit aeed1d325d42
The patch makes shrink_slab() be called for root_mem_cgroup in the same
way as it's called for the rest of cgroups. This simplifies the logic
and improves the readability.
[ktkhai at virtuozzo.com: wrote changelog]
Link: http://lkml.kernel.org/r/153063068338.1818.11496084754797453962.stgit@localhost.localdomain
Signed-off-by: Vladimir Davydov <vdavydov.dev at gmail.com>
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
Tested-by: Shakeel Butt <shakeelb at google.com>
Cc: Al Viro <viro at zeniv.linux.org.uk>
Cc: Andrey Ryabinin <aryabinin at virtuozzo.com>
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
Cc: Guenter Roeck <linux at roeck-us.net>
Cc: "Huang, Ying" <ying.huang at intel.com>
Cc: Johannes Weiner <hannes at cmpxchg.org>
Cc: Josef Bacik <jbacik at fb.com>
Cc: Li RongQing <lirongqing at baidu.com>
Cc: Matthew Wilcox <willy at infradead.org>
Cc: Matthias Kaehlcke <mka at chromium.org>
Cc: Mel Gorman <mgorman at techsingularity.net>
Cc: Michal Hocko <mhocko at kernel.org>
Cc: Minchan Kim <minchan at kernel.org>
Cc: Philippe Ombredanne <pombredanne at nexb.com>
Cc: Roman Gushchin <guro at fb.com>
Cc: Sahitya Tummala <stummala at codeaurora.org>
Cc: Stephen Rothwell <sfr at canb.auug.org.au>
Cc: Tetsuo Handa <penguin-kernel at I-love.SAKURA.ne.jp>
Cc: Thomas Gleixner <tglx at linutronix.de>
Cc: Waiman Long <longman at redhat.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
mm/vmscan.c | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index df792f5444f7..bd2d62dabdd9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -517,7 +517,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
if (unlikely(test_tsk_thread_flag(current, TIF_MEMDIE)))
return 0;
- if (memcg && !mem_cgroup_is_root(memcg))
+ if (!mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem)) {
@@ -539,9 +539,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
.for_drop_caches = for_drop_caches,
};
- if (!!memcg != !!(shrinker->flags & SHRINKER_MEMCG_AWARE))
- continue;
-
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
sc.nid = 0;
@@ -559,9 +556,10 @@ void drop_slab_node(int nid)
unsigned long freed;
do {
- struct mem_cgroup *memcg = NULL;
+ struct mem_cgroup *memcg;
freed = 0;
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
freed += shrink_slab(GFP_KERNEL, nid, memcg,
0, true);
@@ -2517,7 +2515,7 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc,
zone_lru_pages += lru_pages;
}
- if (memcg && is_classzone) {
+ if (is_classzone) {
shrink_slab(slab_gfp, zone_to_nid(zone),
memcg, sc->priority, false);
if (reclaim_state) {
@@ -2545,10 +2543,6 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc,
}
} while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
- if (global_reclaim(sc) && is_classzone)
- shrink_slab(slab_gfp, zone_to_nid(zone), NULL,
- sc->priority, false);
-
if (global_reclaim(sc)) {
/*
* If reclaim is isolating dirty pages under writeback, it implies
More information about the Devel
mailing list