[Devel] [PATCH RHEL COMMIT] mm/swap: activate swapped in pages on fault
Konstantin Khorenko
khorenko at virtuozzo.com
Thu Sep 30 18:08:33 MSK 2021
The commit is pushed to "branch-rh9-5.14.vz9.1.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after ark-5.14
------>
commit 31af2f6493bdadb4f8bdda92b77377d7b810a07e
Author: Andrey Ryabinin <ryabinin.a.a at gmail.com>
Date: Thu Sep 30 18:08:33 2021 +0300
mm/swap: activate swapped in pages on fault
Move swapped in anon pages directly to active list. This should
help us to prevent anon thrashing. Recently swapped in pages
has more chances to stay in memory.
https://pmc.acronis.com/browse/VSTOR-20859
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
[VvS RHEL7.8 rebase] context changes
(cherry picked from vz7 commit 134cd9b20a914080539e6310f76fe3f7b32bc710)
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
Reviewed-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
(cherry-picked from vz8 commit 529114b7d628 ("mm/swap: activate
swapped in pages on fault"))
Signed-off-by: Nikita Yushchenko <nikita.yushchenko at virtuozzo.com>
---
include/linux/swap.h | 4 ++--
mm/madvise.c | 4 ++--
mm/swap_state.c | 19 ++++++++++++-------
mm/zswap.c | 2 +-
4 files changed, 17 insertions(+), 12 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9cf14021d01a..68c186361397 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -460,10 +460,10 @@ extern struct page *lookup_swap_cache(swp_entry_t entry,
struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
+ bool do_poll, bool activate);
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
+ bool *new_page_allocated, bool activate);
extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
diff --git a/mm/madvise.c b/mm/madvise.c
index 5c065bc8b5f6..ac42b1d12f7f 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -211,7 +211,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
continue;
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
- vma, index, false);
+ vma, index, false, false);
if (page)
put_page(page);
}
@@ -242,7 +242,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
swap = radix_to_swp_entry(page);
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
- NULL, 0, false);
+ NULL, 0, false, false);
if (page)
put_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 504fed0107da..7e3104d27d4f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -412,7 +412,7 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated)
+ bool *new_page_allocated, bool activate)
{
struct swap_info_struct *si;
struct page *page;
@@ -497,6 +497,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
workingset_refault(page, shadow);
/* Caller will initiate read into locked page */
+ if (activate)
+ SetPageActive(page);
lru_cache_add(page);
*new_page_allocated = true;
return page;
@@ -515,11 +517,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
- struct vm_area_struct *vma, unsigned long addr, bool do_poll)
+ struct vm_area_struct *vma, unsigned long addr, bool do_poll,
+ bool activate)
{
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
- vma, addr, &page_was_allocated);
+ vma, addr, &page_was_allocated, activate);
if (page_was_allocated)
swap_readpage(retpage, do_poll);
@@ -638,7 +641,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
/* Ok, do the async read-ahead now */
page = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
- gfp_mask, vma, addr, &page_allocated);
+ gfp_mask, vma, addr, &page_allocated, offset == entry_offset);
if (!page)
continue;
if (page_allocated) {
@@ -654,7 +657,8 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
- return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
+ return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll,
+ true);
}
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
@@ -811,7 +815,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (unlikely(non_swap_entry(entry)))
continue;
page = __read_swap_cache_async(entry, gfp_mask, vma,
- vmf->address, &page_allocated);
+ vmf->address, &page_allocated,
+ i == ra_info.offset);
if (!page)
continue;
if (page_allocated) {
@@ -827,7 +832,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
lru_add_drain();
skip:
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
- ra_info.win == 1);
+ ra_info.win == 1, true);
}
/**
diff --git a/mm/zswap.c b/mm/zswap.c
index 7944e3e57e78..564ff0d6ed3f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -904,7 +904,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
bool page_was_allocated;
*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
- NULL, 0, &page_was_allocated);
+ NULL, 0, &page_was_allocated, false);
if (page_was_allocated)
return ZSWAP_SWAPCACHE_NEW;
if (!*retpage)
More information about the Devel
mailing list