[Devel] [PATCH RHEL7 COMMIT] ms/mm, migration: add destination page freeing callback
Konstantin Khorenko
khorenko at virtuozzo.com
Wed Jan 31 18:21:59 MSK 2018
The commit is pushed to "branch-rh7-3.10.0-693.11.6.vz7.42.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-693.11.6.vz7.42.4
------>
commit c4241eaf49fe7fed6f69777a960ff1a28c3b5748
Author: David Rientjes <rientjes at google.com>
Date: Wed Jan 31 18:21:59 2018 +0300
ms/mm, migration: add destination page freeing callback
Memory migration uses a callback defined by the caller to determine how to
allocate destination pages. When migration fails for a source page,
however, it frees the destination page back to the system.
This patch adds a memory migration callback defined by the caller to
determine how to free destination pages. If a caller, such as memory
compaction, builds its own freelist for migration targets, this can reuse
already freed memory instead of scanning additional memory.
If the caller provides a function to handle freeing of destination pages,
it is called when page migration fails. If the caller passes NULL then
freeing back to the system will be handled as usual. This patch
introduces no functional change.
Signed-off-by: David Rientjes <rientjes at google.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi at ah.jp.nec.com>
Acked-by: Mel Gorman <mgorman at suse.de>
Acked-by: Vlastimil Babka <vbabka at suse.cz>
Cc: Greg Thelen <gthelen at google.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
(cherry picked from commit 68711a746345c44ae00c64d8dbac6a9ce13ac54a)
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
include/linux/migrate.h | 11 ++++++----
mm/compaction.c | 2 +-
mm/memcontrol.c | 2 +-
mm/memory-failure.c | 4 ++--
mm/memory_hotplug.c | 2 +-
mm/mempolicy.c | 4 ++--
mm/migrate.c | 56 +++++++++++++++++++++++++++++++++----------------
mm/page_alloc.c | 2 +-
8 files changed, 53 insertions(+), 30 deletions(-)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ba9b278d8f63..453f40ce636d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -5,7 +5,9 @@
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
-typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+typedef struct page *new_page_t(struct page *page, unsigned long private,
+ int **reason);
+typedef void free_page_t(struct page *page, unsigned long private);
/*
* Return values from addresss_space_operations.migratepage():
@@ -30,7 +32,7 @@ extern void putback_lru_pages(struct list_head *l);
extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
-extern int migrate_pages(struct list_head *l, new_page_t x,
+extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int fail_migrate_page(struct address_space *,
@@ -53,8 +55,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {}
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private, enum migrate_mode mode, int reason)
+static inline int migrate_pages(struct list_head *l, new_page_t new,
+ free_page_t free, unsigned long private, enum migrate_mode mode,
+ int reason)
{ return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/mm/compaction.c b/mm/compaction.c
index 0a2d4eded2e0..7e74add6b9c2 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1020,7 +1020,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
}
nr_migrate = cc->nr_migratepages;
- err = migrate_pages(&cc->migratepages, compaction_alloc,
+ err = migrate_pages(&cc->migratepages, compaction_alloc, NULL,
(unsigned long)cc,
cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
MR_COMPACTION);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 116b303319af..f50377729d10 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5087,7 +5087,7 @@ static long __memcg_numa_migrate_pages(struct lruvec *lruvec, enum lru_list lru,
if (!scanned)
break;
- ret = migrate_pages(&pages, memcg_numa_migrate_new_page,
+ ret = migrate_pages(&pages, memcg_numa_migrate_new_page, NULL,
(unsigned long)&ms, MIGRATE_ASYNC,
MR_SYSCALL);
putback_lru_pages(&pages);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e1c12352c7fd..f5fdd96740f0 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1564,7 +1564,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
return -EBUSY;
}
- ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+ ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
@@ -1719,7 +1719,7 @@ static int __soft_offline_page(struct page *page, int flags)
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
- ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+ ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
putback_lru_pages(&pagelist);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 01a98180ebf8..cb840d2b66c7 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1528,7 +1528,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* alloc_migrate_target should be improooooved!!
* migrate_pages returns # of failed pages.
*/
- ret = migrate_pages(&source, alloc_migrate_target, 0,
+ ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret)
putback_movable_pages(&source);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 7bf644c82837..5e3418030899 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1046,7 +1046,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
- err = migrate_pages(&pagelist, new_node_page, dest,
+ err = migrate_pages(&pagelist, new_node_page, NULL, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
@@ -1292,7 +1292,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
- nr_failed = migrate_pages(&pagelist, new_page,
+ nr_failed = migrate_pages(&pagelist, new_page, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_movable_pages(&pagelist);
diff --git a/mm/migrate.c b/mm/migrate.c
index c629762fbfd5..3369475fe15f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -968,9 +968,9 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
-static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force, enum migrate_mode mode,
- enum migrate_reason reason)
+static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
+ unsigned long private, struct page *page, int force,
+ enum migrate_mode mode, enum migrate_reason reason)
{
int rc = 0;
int *result = NULL;
@@ -1004,16 +1004,19 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (reason != MR_MEMORY_FAILURE)
putback_lru_page(page);
}
+
/*
- * Move the new page to the LRU. If migration was not successful
- * then this will free the page.
+ * If migration was not successful and there's a freeing callback, use
+ * it. Otherwise, putback_lru_page() will drop the reference grabbed
+ * during isolation.
*/
- if (unlikely(__is_movable_balloon_page(newpage))) {
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ put_new_page(newpage, private);
+ else if (unlikely(__is_movable_balloon_page(newpage)))
/* drop our reference, page already in the balloon */
put_page(newpage);
- } else {
+ else
putback_lru_page(newpage);
- }
if (result) {
if (rc)
@@ -1043,8 +1046,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* will wait in the page fault for migration to complete.
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
- unsigned long private, struct page *hpage,
- int force, enum migrate_mode mode)
+ free_page_t put_new_page, unsigned long private,
+ struct page *hpage, int force,
+ enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@@ -1101,14 +1105,24 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
- if (!rc)
+ if (rc == MIGRATEPAGE_SUCCESS)
hugetlb_cgroup_migrate(hpage, new_hpage);
unlock_page(hpage);
out:
if (rc != -EAGAIN)
putback_active_hugepage(hpage);
- putback_active_hugepage(new_hpage);
+
+ /*
+ * If migration was not successful and there's a freeing callback, use
+ * it. Otherwise, put_page() will drop the reference grabbed during
+ * isolation.
+ */
+ if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ put_new_page(new_hpage, private);
+ else
+ putback_active_hugepage(new_hpage);
+
if (result) {
if (rc)
*result = rc;
@@ -1125,6 +1139,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* @from: The list of pages to be migrated.
* @get_new_page: The function used to allocate free pages to be used
* as the target of the page migration.
+ * @put_new_page: The function used to free target pages if migration
+ * fails, or NULL if no special handling is necessary.
* @private: Private data to be passed on to get_new_page()
* @mode: The migration mode that specifies the constraints for
* page migration, if any.
@@ -1138,7 +1154,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* Returns the number of pages that were not migrated, or an error code.
*/
int migrate_pages(struct list_head *from, new_page_t get_new_page,
- unsigned long private, enum migrate_mode mode, int reason)
+ free_page_t put_new_page, unsigned long private,
+ enum migrate_mode mode, int reason)
{
int retry = 1;
int nr_failed = 0;
@@ -1160,10 +1177,12 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
if (PageHuge(page))
rc = unmap_and_move_huge_page(get_new_page,
- private, page, pass > 2, mode);
+ put_new_page, private, page,
+ pass > 2, mode);
else
- rc = unmap_and_move(get_new_page, private,
- page, pass > 2, mode, reason);
+ rc = unmap_and_move(get_new_page, put_new_page,
+ private, page, pass > 2, mode,
+ reason);
switch(rc) {
case -ENOMEM:
@@ -1308,7 +1327,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = 0;
if (!list_empty(&pagelist)) {
- err = migrate_pages(&pagelist, new_page_node,
+ err = migrate_pages(&pagelist, new_page_node, NULL,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
@@ -1760,7 +1779,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
- node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
+ NULL, node, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED);
if (nr_remaining) {
putback_lru_pages(&migratepages);
isolated = 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 691c9bdbede3..558d82c958f6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6913,7 +6913,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
cc->nr_migratepages -= nr_reclaimed;
ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
- 0, MIGRATE_SYNC, MR_CMA);
+ NULL, 0, MIGRATE_SYNC, MR_CMA);
}
if (ret < 0) {
putback_movable_pages(&cc->migratepages);
More information about the Devel
mailing list