[Devel] [PATCH RHEL7 COMMIT] mm/swap: activate swapped in pages on fault

Konstantin Khorenko khorenko at virtuozzo.com
Wed Mar 6 14:08:31 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.5.1.vz7.84.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.5.1.vz7.84.4
------>
commit b8cd82d1380904efee41c39ab384c331295bdfa2
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Wed Mar 6 14:08:28 2019 +0300

    mm/swap: activate swapped in pages on fault
    
    Move swapped in anon pages directly to active list. This should
    help us to prevent anon thrashing. Recently swapped in pages
    has more chances to stay in memory.
    
    https://pmc.acronis.com/browse/VSTOR-20859
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 include/linux/swap.h |  5 +++--
 mm/madvise.c         |  4 ++--
 mm/swap_state.c      | 16 ++++++++++------
 mm/swapfile.c        |  2 +-
 mm/zswap.c           |  2 +-
 5 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7797cb88870b..51d71b089f26 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -463,10 +463,11 @@ extern void free_page_and_swap_cache(struct page *);
 extern void free_pages_and_swap_cache(struct page **, int);
 extern struct page *lookup_swap_cache(swp_entry_t);
 extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
-			struct vm_area_struct *vma, unsigned long addr);
+			struct vm_area_struct *vma, unsigned long addr,
+			bool activate);
 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr,
-			bool *new_page_allocated);
+			bool *new_page_allocated, bool activate);
 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr);
 
diff --git a/mm/madvise.c b/mm/madvise.c
index 586705116e6f..08afdc72f07a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -212,7 +212,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
 			continue;
 
 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
-								vma, index);
+							vma, index, false);
 		if (page)
 			page_cache_release(page);
 	}
@@ -253,7 +253,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
 		}
 		swap = radix_to_swp_entry(page);
 		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
-								NULL, 0);
+								NULL, 0, false);
 		if (page)
 			page_cache_release(page);
 	}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 83e48a7edb28..d05043084a38 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -319,7 +319,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
 
 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 			struct vm_area_struct *vma, unsigned long addr,
-			bool *new_page_allocated)
+			bool *new_page_allocated, bool activate)
 {
 	struct page *found_page, *new_page = NULL;
 	struct address_space *swapper_space = swap_address_space(entry);
@@ -401,7 +401,9 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 			/*
 			 * Initiate read into locked page and return.
 			 */
-			lru_cache_add_anon(new_page);
+			if (activate)
+				SetPageActive(new_page);
+			lru_cache_add(new_page);
 			*new_page_allocated = true;
 			return new_page;
 		}
@@ -427,11 +429,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
  * the swap entry is no longer in use.
  */
 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+			struct vm_area_struct *vma, unsigned long addr,
+			bool activate)
 {
 	bool page_was_allocated;
 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
-			vma, addr, &page_was_allocated);
+			vma, addr, &page_was_allocated, activate);
 
 	if (page_was_allocated)
 		swap_readpage(retpage);
@@ -463,6 +466,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 {
 	struct page *page;
 	unsigned long offset = swp_offset(entry);
+	unsigned long orig_offset = offset;
 	unsigned long start_offset, end_offset;
 	unsigned long mask = (1UL << page_cluster) - 1;
 	struct swap_info_struct *si = swp_swap_info(entry);
@@ -480,7 +484,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	for (offset = start_offset; offset <= end_offset ; offset++) {
 		/* Ok, do the async read-ahead now */
 		page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
-						gfp_mask, vma, addr);
+				gfp_mask, vma, addr, orig_offset == offset);
 		if (!page)
 			continue;
 		page_cache_release(page);
@@ -488,7 +492,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	blk_finish_plug(&plug);
 
 	lru_add_drain();	/* Push any new pages onto the LRU now */
-	return read_swap_cache_async(entry, gfp_mask, vma, addr);
+	return read_swap_cache_async(entry, gfp_mask, vma, addr, true);
 }
 
 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 14043e6bf776..e3923544a31f 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1705,7 +1705,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
 		swap_map = &si->swap_map[i];
 		entry = swp_entry(type, i);
 		page = read_swap_cache_async(entry,
-					GFP_HIGHUSER_MOVABLE, NULL, 0);
+					GFP_HIGHUSER_MOVABLE, NULL, 0, false);
 		if (!page) {
 			/*
 			 * Either swap_duplicate() failed because entry
diff --git a/mm/zswap.c b/mm/zswap.c
index 79cdd12ac453..b10275dbedb5 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -449,7 +449,7 @@ static int zswap_get_swap_cache_page(swp_entry_t entry,
 	bool page_was_allocated;
 
 	*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
-			NULL, 0, &page_was_allocated);
+			NULL, 0, &page_was_allocated, false);
 	if (page_was_allocated)
 		return ZSWAP_SWAPCACHE_NEW;
 	if (!*retpage)



More information about the Devel mailing list