[Devel] [PATCH RHEL8 COMMIT] userfaultfd: wp: support write protection for userfault vma range

Konstantin Khorenko khorenko at virtuozzo.com
Mon Apr 20 10:34:44 MSK 2020


The commit is pushed to "branch-rh8-4.18.0-80.1.2.vz8.3.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-80.1.2.vz8.3.6
------>
commit c75cf243c0bc4a654255b067637013f386983778
Author: Shaohua Li <shli at fb.com>
Date:   Mon Apr 20 10:34:44 2020 +0300

    userfaultfd: wp: support write protection for userfault vma range
    
    Add API to enable/disable writeprotect a vma range.  Unlike mprotect, this
    doesn't split/merge vmas.
    
    [peterx at redhat.com:
     - use the helper to find VMA;
     - return -ENOENT if not found to match mcopy case;
     - use the new MM_CP_UFFD_WP* flags for change_protection
     - check against mmap_changing for failures
     - replace find_dst_vma with vma_find_uffd]
    Signed-off-by: Shaohua Li <shli at fb.com>
    Signed-off-by: Andrea Arcangeli <aarcange at redhat.com>
    Signed-off-by: Peter Xu <peterx at redhat.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Reviewed-by: Jerome Glisse <jglisse at redhat.com>
    Reviewed-by: Mike Rapoport <rppt at linux.vnet.ibm.com>
    Cc: Andrea Arcangeli <aarcange at redhat.com>
    Cc: Rik van Riel <riel at redhat.com>
    Cc: Kirill A. Shutemov <kirill at shutemov.name>
    Cc: Mel Gorman <mgorman at suse.de>
    Cc: Hugh Dickins <hughd at google.com>
    Cc: Johannes Weiner <hannes at cmpxchg.org>
    Cc: Bobby Powers <bobbypowers at gmail.com>
    Cc: Brian Geffon <bgeffon at google.com>
    Cc: David Hildenbrand <david at redhat.com>
    Cc: Denis Plotnikov <dplotnikov at virtuozzo.com>
    Cc: "Dr . David Alan Gilbert" <dgilbert at redhat.com>
    Cc: Martin Cracauer <cracauer at cons.org>
    Cc: Marty McFadden <mcfadden8 at llnl.gov>
    Cc: Maya Gokhale <gokhale2 at llnl.gov>
    Cc: Mike Kravetz <mike.kravetz at oracle.com>
    Cc: Pavel Emelyanov <xemul at openvz.org>
    Link: http://lkml.kernel.org/r/20200220163112.11409-13-peterx@redhat.com
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    
    https://jira.sw.ru/browse/PSBM-102938
    (cherry picked from commit ffd05793963a44bd119311df3c02b191982574ee)
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 include/linux/userfaultfd_k.h |  3 +++
 mm/userfaultfd.c              | 54 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 8776b913d2e9..d2aa1199d741 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -39,6 +39,9 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
 			      unsigned long dst_start,
 			      unsigned long len,
 			      bool *mmap_changing);
+extern int mwriteprotect_range(struct mm_struct *dst_mm,
+			       unsigned long start, unsigned long len,
+			       bool enable_wp, bool *mmap_changing);
 
 /* mm helpers */
 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 2db2b80b3c72..046486b2a98a 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -641,3 +641,57 @@ ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
 {
 	return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
 }
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+			unsigned long len, bool enable_wp, bool *mmap_changing)
+{
+	struct vm_area_struct *dst_vma;
+	pgprot_t newprot;
+	int err;
+
+	/*
+	 * Sanitize the command parameters:
+	 */
+	BUG_ON(start & ~PAGE_MASK);
+	BUG_ON(len & ~PAGE_MASK);
+
+	/* Does the address range wrap, or is the span zero-sized? */
+	BUG_ON(start + len <= start);
+
+	down_read(&dst_mm->mmap_sem);
+
+	/*
+	 * If memory mappings are changing because of non-cooperative
+	 * operation (e.g. mremap) running in parallel, bail out and
+	 * request the user to retry later
+	 */
+	err = -EAGAIN;
+	if (mmap_changing && READ_ONCE(*mmap_changing))
+		goto out_unlock;
+
+	err = -ENOENT;
+	dst_vma = find_dst_vma(dst_mm, start, len);
+	/*
+	 * Make sure the vma is not shared, that the dst range is
+	 * both valid and fully within a single existing vma.
+	 */
+	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+		goto out_unlock;
+	if (!userfaultfd_wp(dst_vma))
+		goto out_unlock;
+	if (!vma_is_anonymous(dst_vma))
+		goto out_unlock;
+
+	if (enable_wp)
+		newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+	else
+		newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+	change_protection(dst_vma, start, start + len, newprot,
+			  enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+
+	err = 0;
+out_unlock:
+	up_read(&dst_mm->mmap_sem);
+	return err;
+}


More information about the Devel mailing list