[Devel] [PATCH RHEL7 COMMIT] ms/x86/mm: Introduce mmap_compat_base() for 32-bit mmap()

Konstantin Khorenko khorenko at virtuozzo.com
Wed May 31 05:00:00 PDT 2017


The commit is pushed to "branch-rh7-3.10.0-514.16.1.vz7.32.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.16.1.vz7.32.5
------>
commit 8d86b59435a85166c13b149f27113610242fa564
Author: Dmitry Safonov <dsafonov at virtuozzo.com>
Date:   Wed May 31 16:00:00 2017 +0400

    ms/x86/mm: Introduce mmap_compat_base() for 32-bit mmap()
    
    mmap() uses a base address, from which it starts to look for a free space
    for allocation.
    
    The base address is stored in mm->mmap_base, which is calculated during
    exec(). The address depends on task's size, set rlimit for stack, ASLR
    randomization. The base depends on the task size and the number of random
    bits which are different for 64-bit and 32bit applications.
    
    Due to the fact, that the base address is fixed, its mmap() from a compat
    (32bit) syscall issued by a 64bit task will return a address which is based
    on the 64bit base address and does not fit into the 32bit address space
    (4GB). The returned pointer is truncated to 32bit, which results in an
    invalid address.
    
    To solve store a seperate compat address base plus a compat legacy address
    base in mm_struct. These bases are calculated at exec() time and can be
    used later to address the 32bit compat mmap() issued by 64 bit
    applications.
    
    As a consequence of this change 32-bit applications issuing a 64-bit
    syscall (after doing a long jump) will get a 64-bit mapping now. Before
    this change 32-bit applications always got a 32bit mapping.
    
    [ tglx: Massaged changelog and added a comment ]
    
    Signed-off-by: Dmitry Safonov <dsafonov at virtuozzo.com>
    
    Cc: 0x7f454c46 at gmail.com
    Cc: linux-mm at kvack.org
    Cc: Andy Lutomirski <luto at kernel.org>
    Cc: Cyrill Gorcunov <gorcunov at openvz.org>
    Cc: Borislav Petkov <bp at suse.de>
    Cc: "Kirill A. Shutemov" <kirill.shutemov at linux.intel.com>
    Link: http://lkml.kernel.org/r/20170306141721.9188-4-dsafonov@virtuozzo.com
    Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
    
    [picked from ms commit 1b028f784e8c]
    Signed-off-by: Dmitry Safonov <dsafonov at virtuozzo.com>
---
 arch/Kconfig                 |  8 ++++++++
 arch/x86/Kconfig             |  1 +
 arch/x86/include/asm/elf.h   |  3 +++
 arch/x86/kernel/sys_x86_64.c | 23 +++++++++++++++++++----
 arch/x86/mm/mmap.c           | 42 ++++++++++++++++++++++++++++++++----------
 include/linux/mm_types.h     |  5 +++++
 6 files changed, 68 insertions(+), 14 deletions(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 2b3b7d5..771b379 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -473,6 +473,14 @@ config ARCH_HAS_ELF_RANDOMIZE
 	  stack, mmap, brk, and ET_DYN. Defined functions:
 	  - arch_mmap_rnd()
 
+config HAVE_ARCH_COMPAT_MMAP_BASES
+	bool
+	help
+	  This allows 64bit applications to invoke 32-bit mmap() syscall
+	  and vice-versa 32-bit applications to call 64-bit mmap().
+	  Required for applications doing different bitness syscalls.
+
+
 #
 # ABI hall of shame
 #
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 29902c4..63e5dbe 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -87,6 +87,7 @@ config X86
 	select HAVE_USER_RETURN_NOTIFIER
 	select ARCH_BINFMT_ELF_RANDOMIZE_PIE
 	select ARCH_HAS_ELF_RANDOMIZE
+	select HAVE_ARCH_COMPAT_MMAP_BASES	if MMU && COMPAT
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_TEXT_POKE_SMP
 	select HAVE_GENERIC_HARDIRQS
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e705713..6949194 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -287,6 +287,9 @@ static inline int mmap_is_ia32(void)
 		test_thread_flag(TIF_ADDR32));
 }
 
+extern unsigned long tasksize_32bit(void);
+extern unsigned long tasksize_64bit(void);
+
 #ifdef CONFIG_X86_32
 
 #define __STACK_RND_MASK(is32bit) (0x7ff)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 10e0272..7c64989 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -16,6 +16,8 @@
 #include <linux/uaccess.h>
 #include <linux/elf.h>
 
+#include <asm/elf.h>
+#include <asm/compat.h>
 #include <asm/ia32.h>
 #include <asm/syscalls.h>
 
@@ -97,6 +99,18 @@ out:
 	return error;
 }
 
+static unsigned long get_mmap_base(int is_legacy)
+{
+	struct mm_struct *mm = current->mm;
+
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+	if (in_compat_syscall())
+		return is_legacy ? mm->mmap_compat_legacy_base
+				 : mm->mmap_compat_base;
+#endif
+	return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
+}
+
 static void find_start_end(unsigned long flags, unsigned long *begin,
 			   unsigned long *end)
 {
@@ -116,10 +130,11 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
 			if (new_begin)
 				*begin = new_begin;
 		}
-	} else {
-		*begin = current->mm->mmap_legacy_base;
-		*end = TASK_SIZE;
+		return;
 	}
+
+	*begin	= get_mmap_base(1);
+	*end	= in_compat_syscall() ? tasksize_32bit() : tasksize_64bit();
 }
 
 unsigned long
@@ -193,7 +208,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
 	info.low_limit = PAGE_SIZE;
-	info.high_limit = mm->mmap_base;
+	info.high_limit = get_mmap_base(0);
 	info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
 	if (filp) {
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index d0c222d..f97dd2d 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -35,11 +35,16 @@ struct __read_mostly va_alignment va_align = {
 	.flags = -1,
 };
 
-static inline unsigned long tasksize_32bit(void)
+unsigned long tasksize_32bit(void)
 {
 	return IA32_PAGE_OFFSET;
 }
 
+unsigned long tasksize_64bit(void)
+{
+	return TASK_SIZE_MAX;
+}
+
 static unsigned long stack_maxrandom_size(unsigned long task_size)
 {
 	unsigned long max = 0;
@@ -81,6 +86,8 @@ static unsigned long arch_rnd(unsigned int rndbits)
 
 unsigned long arch_mmap_rnd(void)
 {
+	if (!(current->flags & PF_RANDOMIZE))
+		return 0;
 	return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
 }
 
@@ -114,22 +121,37 @@ static unsigned long mmap_legacy_base(unsigned long rnd,
  * This function, called very early during the creation of a new
  * process VM image, sets up which VM layout function to use:
  */
-void arch_pick_mmap_layout(struct mm_struct *mm)
+static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
+		unsigned long random_factor, unsigned long task_size)
 {
-	unsigned long random_factor = 0UL;
-
-	if (current->flags & PF_RANDOMIZE)
-		random_factor = arch_mmap_rnd();
-
-	mm->mmap_legacy_base = mmap_legacy_base(random_factor, TASK_SIZE);
+	*legacy_base = mmap_legacy_base(random_factor, task_size);
+	if (mmap_is_legacy())
+		*base = *legacy_base;
+	else
+		*base = mmap_base(random_factor, task_size);
+}
 
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
 	if (mmap_is_legacy()) {
-		mm->mmap_base = mm->mmap_legacy_base;
 		mm->get_unmapped_area = arch_get_unmapped_area;
 		mm->unmap_area = arch_unmap_area;
 	} else {
-		mm->mmap_base = mmap_base(random_factor, TASK_SIZE);
 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 		mm->unmap_area = arch_unmap_area_topdown;
 	}
+
+	arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
+			arch_rnd(mmap64_rnd_bits), tasksize_64bit());
+
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+	/*
+	 * The mmap syscall mapping base decision depends solely on the
+	 * syscall type (64-bit or compat). This applies for 64bit
+	 * applications and 32bit applications. The 64bit syscall uses
+	 * mmap_base, the compat syscall uses mmap_compat_base.
+	 */
+	arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
+			arch_rnd(mmap32_rnd_bits), tasksize_32bit());
+#endif
 }
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5022363..ad703c4 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -413,6 +413,11 @@ struct mm_struct {
 #endif
 	unsigned long mmap_base;		/* base of mmap area */
 	unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+	/* Base adresses for compatible mmap() */
+	unsigned long mmap_compat_base;
+	unsigned long mmap_compat_legacy_base;
+#endif
 	unsigned long task_size;		/* size of task vm space */
 	unsigned long cached_hole_size; 	/* if non-zero, the largest hole below free_area_cache */
 	unsigned long free_area_cache;		/* first hole of size cached_hole_size or larger */


More information about the Devel mailing list