[Devel] [PATCH RHEL7 COMMIT] ms/mm/rmap: make rmap_walk to get the rmap_walk_control argument
Konstantin Khorenko
khorenko at virtuozzo.com
Tue Dec 8 06:15:55 PST 2015
The commit is pushed to "branch-rh7-3.10.0-229.7.2.vz7.9.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.9.15
------>
commit 83bf1997dc0d0556638be25f2570f7d543edb1e6
Author: Vladimir Davydov <vdavydov at virtuozzo.com>
Date: Tue Dec 8 18:15:55 2015 +0400
ms/mm/rmap: make rmap_walk to get the rmap_walk_control argument
Patchset description:
rmap_walk() present in RH7 requires the caller to either hold mmap_sem
or pin the page's anon_vma. page_idle_clear_pte_refs does neither. As a
result, it might end up trying to lock/unlock anon_vma which has already
been freed and possibly reallocated. This won't do any good.
Let's pull the new version of rmap_walk() from upstream, which allows to
specify a custom anon_vma lock function and use it in page_idle code to
avoid this issue. This patch puts page_idle in sync with upstream.
I hope this will fix:
https://jira.sw.ru/browse/PSBM-42015
Joonsoo Kim (3):
mm/rmap: factor lock function out of rmap_walk_anon()
mm/rmap: make rmap_walk to get the rmap_walk_control argument
mm/rmap: extend rmap_walk_xxx() to cope with different cases
Vladimir Davydov (1):
mm: page_idle: look up page anon_vma carefully when checking references
============================
This patch description:
From: Joonsoo Kim <iamjoonsoo.kim at lge.com>
In each rmap traverse case, there is some difference so that we need
function pointers and arguments to them in order to handle these
For this purpose, struct rmap_walk_control is introduced in this patch,
and will be extended in following patch. Introducing and extending are
separate, because it clarify changes.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim at lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi at ah.jp.nec.com>
Cc: Mel Gorman <mgorman at suse.de>
Cc: Hugh Dickins <hughd at google.com>
Cc: Rik van Riel <riel at redhat.com>
Cc: Ingo Molnar <mingo at kernel.org>
Cc: Hillf Danton <dhillf at gmail.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
(cherry picked from commit 051ac83adf69eea4f57a97356e4282e395a5fa6d)
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
include/linux/ksm.h | 7 +++----
include/linux/rmap.h | 9 +++++++--
mm/ksm.c | 6 +++---
mm/migrate.c | 7 ++++++-
mm/rmap.c | 19 ++++++++-----------
5 files changed, 27 insertions(+), 21 deletions(-)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 45c9b6a..0eef8cb 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -76,8 +76,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
int page_referenced_ksm(struct page *page,
struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
-int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg);
+int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */
@@ -120,8 +119,8 @@ static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
return 0;
}
-static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+static inline int rmap_walk_ksm(struct page *page,
+ struct rmap_walk_control *rwc)
{
return 0;
}
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 9775b7b..083b1df 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -237,11 +237,16 @@ extern struct anon_vma *page_lock_anon_vma_read(struct page *page);
extern void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+struct rmap_walk_control {
+ void *arg;
+ int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg);
+};
+
/*
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
-int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg);
+int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
diff --git a/mm/ksm.c b/mm/ksm.c
index 74e4c08..ff2d5a3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2003,8 +2003,7 @@ out:
}
#ifdef CONFIG_MIGRATION
-int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
@@ -2039,7 +2038,8 @@ again:
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
- ret = rmap_one(page, vma, rmap_item->address, arg);
+ ret = rwc->rmap_one(page, vma,
+ rmap_item->address, rwc->arg);
if (ret != SWAP_AGAIN) {
anon_vma_unlock_read(anon_vma);
goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index c608326..ce9a97c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -196,7 +196,12 @@ out:
*/
static void remove_migration_ptes(struct page *old, struct page *new)
{
- rmap_walk(new, remove_migration_pte, old);
+ struct rmap_walk_control rwc = {
+ .rmap_one = remove_migration_pte,
+ .arg = old,
+ };
+
+ rmap_walk(new, &rwc);
}
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index df4b9d1..362c5e8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1771,8 +1771,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page)
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
* Called by migrate.c to remove migration ptes, but might be used more later.
*/
-static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1786,7 +1785,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
- ret = rmap_one(page, vma, address, arg);
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
}
@@ -1794,8 +1793,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
return ret;
}
-static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -1807,7 +1805,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
- ret = rmap_one(page, vma, address, arg);
+ ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
}
@@ -1820,17 +1818,16 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
return ret;
}
-int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
- struct vm_area_struct *, unsigned long, void *), void *arg)
+int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
VM_BUG_ON(!PageLocked(page));
if (unlikely(PageKsm(page)))
- return rmap_walk_ksm(page, rmap_one, arg);
+ return rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
- return rmap_walk_anon(page, rmap_one, arg);
+ return rmap_walk_anon(page, rwc);
else
- return rmap_walk_file(page, rmap_one, arg);
+ return rmap_walk_file(page, rwc);
}
#endif /* CONFIG_MIGRATION */
More information about the Devel
mailing list