[Devel] [PATCH rh7 v3] mm/vmscan: shrink tcache, tswap upfront everything else.
Andrey Ryabinin
aryabinin at virtuozzo.com
Thu Oct 18 19:13:49 MSK 2018
We don't want to evict page cache or anon to swap while
there are a lot of reclaimable pages in tcache/tswap.
Reclaim them first, and only after that go to traditional reclaim
https://jira.sw.ru/browse/PSBM-89403
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
mm/internal.h | 32 ++++++++++++++++++++++++++++++++
mm/tcache.c | 4 ++--
mm/tswap.c | 2 +-
mm/vmscan.c | 38 +++++++++++++++++++++++++++++++++++++-
4 files changed, 72 insertions(+), 4 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 2072b9b04b6b..c7265beced97 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -384,6 +384,38 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
enum ttu_flags;
struct tlbflush_unmap_batch;
+#ifdef CONFIG_TCACHE
+unsigned long tswap_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
+
+static inline unsigned long tswap_shrink(struct shrink_control *sc)
+{
+ unsigned long ret = tswap_shrink_scan(NULL, sc);
+ if (ret == SHRINK_STOP)
+ ret = 0;
+ return ret;
+}
+#else
+static inline tswap_shrink(struct shrink_control *sc)
+{ return 0; }
+#endif
+
+#ifdef CONFIG_TSWAP
+unsigned long tcache_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
+
+static inline unsigned long tcache_shrink(struct shrink_control *sc)
+{
+ unsigned long ret = tcache_shrink_scan(NULL, sc);
+ if (ret == SHRINK_STOP)
+ ret = 0;
+ return ret;
+}
+#else
+static inline unsigned long tcache_shrink(struct shrink_control *sc)
+{ return 0; }
+#endif
+
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
diff --git a/mm/tcache.c b/mm/tcache.c
index eb9c9dea4e51..61f4a6ea26b3 100644
--- a/mm/tcache.c
+++ b/mm/tcache.c
@@ -1210,7 +1210,7 @@ static unsigned long tcache_shrink_count(struct shrinker *shrink,
#define TCACHE_SCAN_BATCH 128UL
static DEFINE_PER_CPU(struct page * [TCACHE_SCAN_BATCH], tcache_page_vec);
-static unsigned long tcache_shrink_scan(struct shrinker *shrink,
+unsigned long tcache_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
long nr_isolated, nr_reclaimed;
@@ -1218,7 +1218,7 @@ static unsigned long tcache_shrink_scan(struct shrinker *shrink,
pages = get_cpu_var(tcache_page_vec); /* Implies rcu_read_lock_sched() */
- if (WARN_ON(sc->nr_to_scan > TCACHE_SCAN_BATCH))
+ if (sc->nr_to_scan > TCACHE_SCAN_BATCH)
sc->nr_to_scan = TCACHE_SCAN_BATCH;
nr_isolated = tcache_lru_isolate(sc->nid, pages, sc->nr_to_scan);
diff --git a/mm/tswap.c b/mm/tswap.c
index e6804dcba6e2..73b1f85d5279 100644
--- a/mm/tswap.c
+++ b/mm/tswap.c
@@ -236,7 +236,7 @@ static int tswap_evict_page(struct page *page)
return err;
}
-static unsigned long tswap_shrink_scan(struct shrinker *shrink,
+unsigned long tswap_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
struct tswap_lru *lru = &tswap_lru_node[sc->nid];
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6be538ce81b6..4b9dfe00fb64 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2793,6 +2793,32 @@ static bool all_unreclaimable(struct zonelist *zonelist,
return true;
}
+static void shrink_tcrutches(struct scan_control *scan_ctrl)
+{
+ int nid;
+ unsigned long shrunk;
+ nodemask_t *nodemask = scan_ctrl->nodemask ? : &node_online_map;
+
+ do {
+ shrunk = 0;
+
+ for_each_node_mask(nid, *nodemask) {
+ struct shrink_control sc = {
+ .gfp_mask = scan_ctrl->gfp_mask,
+ .nid = nid,
+ .memcg = NULL,
+ .nr_to_scan = scan_ctrl->nr_to_reclaim - scan_ctrl->nr_reclaimed,
+ };
+ shrunk = tcache_shrink(&sc);
+ scan_ctrl->nr_reclaimed += shrunk;
+ if (!shrunk)
+ shrunk += tswap_shrink(&sc);
+ if (shrunk >= scan_ctrl->nr_to_reclaim)
+ break;
+ }
+ } while (shrunk && scan_ctrl->nr_reclaimed < scan_ctrl->nr_to_reclaim);
+}
+
/*
* This is the main entry point to direct page reclaim.
*
@@ -2823,8 +2849,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
{KSTAT_PERF_ENTER(ttfp);
delayacct_freepages_start();
- if (global_reclaim(sc))
+ if (global_reclaim(sc)) {
count_vm_event(ALLOCSTALL);
+ shrink_tcrutches(sc);
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim)
+ goto out;
+ }
do {
vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
@@ -3466,6 +3496,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (sc.priority < DEF_PRIORITY - 2)
sc.may_writepage = 1;
+ shrink_tcrutches(&sc);
+ if (sc.nr_reclaimed >= sc.nr_to_reclaim &&
+ pgdat_balanced(pgdat, order, *classzone_idx))
+ goto out;
+
+
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
--
2.18.1
More information about the Devel
mailing list