[Devel] [PATCH RHEL8 COMMIT] ms/mm: speed up mremap by 20x on large regions

Konstantin Khorenko khorenko at virtuozzo.com
Fri Jun 11 15:08:01 MSK 2021


The commit is pushed to "branch-rh8-4.18.0-240.1.1.vz8.5.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-240.1.1.vz8.5.40
------>
commit 2455a115c40f99f9e450f2af4ade6619dbeb51a4
Author: Joel Fernandes (Google) <joel at joelfernandes.org>
Date:   Fri Jun 11 15:08:01 2021 +0300

    ms/mm: speed up mremap by 20x on large regions
    
    Android needs to mremap large regions of memory during memory management
    related operations.  The mremap system call can be really slow if THP is
    not enabled.  The bottleneck is move_page_tables, which is copying each
    pte at a time, and can be really slow across a large map.  Turning on
    THP may not be a viable option, and is not for us.  This patch speeds up
    the performance for non-THP system by copying at the PMD level when
    possible.
    
    The speedup is an order of magnitude on x86 (~20x).  On a 1GB mremap,
    the mremap completion times drops from 3.4-3.6 milliseconds to 144-160
    microseconds.
    
    Before:
    Total mremap time for 1GB data: 3521942 nanoseconds.
    Total mremap time for 1GB data: 3449229 nanoseconds.
    Total mremap time for 1GB data: 3488230 nanoseconds.
    
    After:
    Total mremap time for 1GB data: 150279 nanoseconds.
    Total mremap time for 1GB data: 144665 nanoseconds.
    Total mremap time for 1GB data: 158708 nanoseconds.
    
    If THP is enabled the optimization is mostly skipped except in certain
    situations.
    
    [joel at joelfernandes.org: fix 'move_normal_pmd' unused function warning]
      Link: http://lkml.kernel.org/r/20181108224457.GB209347@google.com
    Link: http://lkml.kernel.org/r/20181108181201.88826-3-joelaf@google.com
    Signed-off-by: Joel Fernandes (Google) <joel at joelfernandes.org>
    Acked-by: Kirill A. Shutemov <kirill at shutemov.name>
    Reviewed-by: William Kucharski <william.kucharski at oracle.com>
    Cc: Julia Lawall <Julia.Lawall at lip6.fr>
    Cc: Michal Hocko <mhocko at kernel.org>
    Cc: Will Deacon <will.deacon at arm.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    
    (cherry picked from commit 2c91bd4a4e2e530582d6fd643ea7b86b27907151)
    
    Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    
    Patchset description:
    
    Patch "mm: speed up mremap by 20x on large regions" introduces
    optimization: when a moved region has source and destination addresses
    and size equal to multiple of PMD_SIZE; PTEs are not really copyed.
    Instead of this, new PMD pointer is changed to point to old PTEs, while
    old PMD is cleared.
    
    This may be useful, when CRIU remaps large memory areas on restore (but
    really, alignment to PMD_SIZE is not very often, though possible).
    
    (cherry-picked from ms commit 2c91bd4a4e2e530582d6fd643ea7b86b27907151)
    Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
---
 arch/Kconfig |  5 +++++
 mm/mremap.c  | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 68 insertions(+)

diff --git a/arch/Kconfig b/arch/Kconfig
index c2142afcf45f..4d7f52a9d739 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -669,6 +669,11 @@ config HAVE_IRQ_TIME_ACCOUNTING
 	  Archs need to ensure they use a high enough resolution clock to
 	  support irq time accounting and then call enable_sched_clock_irqtime().
 
+config HAVE_MOVE_PMD
+	bool
+	help
+	  Archs that select this are able to move page tables at the PMD level.
+
 config HAVE_ARCH_TRANSPARENT_HUGEPAGE
 	bool
 
diff --git a/mm/mremap.c b/mm/mremap.c
index 8cc4fc1962b1..e482d8aa0b75 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -191,6 +191,52 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 		drop_rmap_locks(vma);
 }
 
+#ifdef CONFIG_HAVE_MOVE_PMD
+static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
+		  unsigned long new_addr, unsigned long old_end,
+		  pmd_t *old_pmd, pmd_t *new_pmd)
+{
+	spinlock_t *old_ptl, *new_ptl;
+	struct mm_struct *mm = vma->vm_mm;
+	pmd_t pmd;
+
+	if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
+	    || old_end - old_addr < PMD_SIZE)
+		return false;
+
+	/*
+	 * The destination pmd shouldn't be established, free_pgtables()
+	 * should have release it.
+	 */
+	if (WARN_ON(!pmd_none(*new_pmd)))
+		return false;
+
+	/*
+	 * We don't have to worry about the ordering of src and dst
+	 * ptlocks because exclusive mmap_sem prevents deadlock.
+	 */
+	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
+	new_ptl = pmd_lockptr(mm, new_pmd);
+	if (new_ptl != old_ptl)
+		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+
+	/* Clear the pmd */
+	pmd = *old_pmd;
+	pmd_clear(old_pmd);
+
+	VM_BUG_ON(!pmd_none(*new_pmd));
+
+	/* Set the new pmd */
+	set_pmd_at(mm, new_addr, new_pmd, pmd);
+	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+	if (new_ptl != old_ptl)
+		spin_unlock(new_ptl);
+	spin_unlock(old_ptl);
+
+	return true;
+}
+#endif
+
 unsigned long move_page_tables(struct vm_area_struct *vma,
 		unsigned long old_addr, struct vm_area_struct *new_vma,
 		unsigned long new_addr, unsigned long len,
@@ -237,6 +283,23 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 			split_huge_pmd(vma, old_pmd, old_addr);
 			if (pmd_trans_unstable(old_pmd))
 				continue;
+		} else if (extent == PMD_SIZE) {
+#ifdef CONFIG_HAVE_MOVE_PMD
+			/*
+			 * If the extent is PMD-sized, try to speed the move by
+			 * moving at the PMD level if possible.
+			 */
+			bool moved;
+
+			if (need_rmap_locks)
+				take_rmap_locks(vma);
+			moved = move_normal_pmd(vma, old_addr, new_addr,
+					old_end, old_pmd, new_pmd);
+			if (need_rmap_locks)
+				drop_rmap_locks(vma);
+			if (moved)
+				continue;
+#endif
 		}
 		if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
 			break;


More information about the Devel mailing list