[Devel] [PATCH rh8] mm/swap: activate swapped in pages on fault
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Oct 19 19:32:53 MSK 2020
From: Andrey Ryabinin <aryabinin at virtuozzo.com>
Move swapped in anon pages directly to active list. This should
help us to prevent anon thrashing. Recently swapped in pages
has more chances to stay in memory.
https://pmc.acronis.com/browse/VSTOR-20859
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
[VvS RHEL7.8 rebase] context changes
(cherry picked from vz7 commit 134cd9b20a914080539e6310f76fe3f7b32bc710)
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
include/linux/swap.h | 4 ++--
mm/madvise.c | 4 ++--
mm/swap_state.c | 21 +++++++++++++--------
mm/swapfile.c | 3 ++-
mm/zswap.c | 2 +-
5 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ee2145ab2ca1..c87b4f3b7acc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -424,10 +424,10 @@ extern struct page *lookup_swap_cache(swp_entry_t entry,
unsigned long addr);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
+ bool do_poll, bool activate);
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
+ bool *new_page_allocated, bool activate);
extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
diff --git a/mm/madvise.c b/mm/madvise.c
index 4f76df2dbfb5..953d4238b31c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -217,7 +217,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
continue;
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
- vma, index, false);
+ vma, index, false, false);
if (page)
put_page(page);
}
@@ -258,7 +258,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
}
swap = radix_to_swp_entry(page);
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
- NULL, 0, false);
+ NULL, 0, false, false);
if (page)
put_page(page);
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c64f5f088d01..e64dcca405f7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -377,7 +377,7 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated)
+ bool *new_page_allocated, bool activate)
{
struct page *found_page = NULL, *new_page = NULL;
struct swap_info_struct *si;
@@ -455,7 +455,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* Initiate read into locked page and return.
*/
SetPageWorkingset(new_page);
- lru_cache_add_anon(new_page);
+ if (activate)
+ SetPageActive(new_page);
+ lru_cache_add(new_page);
*new_page_allocated = true;
return new_page;
}
@@ -480,11 +482,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr, bool do_poll)
+ struct vm_area_struct *vma, unsigned long addr, bool do_poll,
+ bool activate)
{
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
- vma, addr, &page_was_allocated);
+ vma, addr, &page_was_allocated, activate);
if (page_was_allocated)
swap_readpage(retpage, do_poll);
@@ -602,7 +605,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
/* Ok, do the async read-ahead now */
page = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, &page_allocated);
+ gfp_mask, vma, addr, &page_allocated, offset == entry_offset);
if (!page)
continue;
if (page_allocated) {
@@ -618,7 +621,8 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
- return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
+ return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll,
+ true);
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -764,7 +768,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (unlikely(non_swap_entry(entry)))
continue;
page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
+ vmf->address, &page_allocated,
+ i == ra_info.offset);
if (!page)
continue;
if (page_allocated) {
@@ -780,7 +785,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
lru_add_drain();
skip:
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
- ra_info.win == 1);
+ ra_info.win == 1, true);
}
/**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index ebb524dcf2e4..2f77cf93df68 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2162,7 +2162,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
page = read_swap_cache_async(entry,
- GFP_HIGHUSER_MOVABLE, NULL, 0, false);
+ GFP_HIGHUSER_MOVABLE, NULL, 0, false,
+ false);
if (!page) {
/*
* Either swap_duplicate() failed because entry
diff --git a/mm/zswap.c b/mm/zswap.c
index cd91fd9d96b8..aa243b84fbca 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -826,7 +826,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
bool page_was_allocated;
*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
- NULL, 0, &page_was_allocated);
+ NULL, 0, &page_was_allocated, false);
if (page_was_allocated)
return ZSWAP_SWAPCACHE_NEW;
if (!*retpage)
--
2.28.0
More information about the Devel
mailing list