[Devel] [PATCH RHEL7 COMMIT] ms/x86/kasan: instrument user memory access API

Konstantin Khorenko khorenko at virtuozzo.com
Fri May 27 02:00:08 PDT 2016


The commit is pushed to "branch-rh7-3.10.0-327.18.2.vz7.14.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.18.2.vz7.14.8
------>
commit d1a6b4f4de8a395c7db3bc63acde73a364e25caa
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Fri May 27 13:00:08 2016 +0400

    ms/x86/kasan: instrument user memory access API
    
    ms commit 1771c6e1a567ea0ba2cccc0a4ffe68a1419fd8ef
    
    Exchange between user and kernel memory is coded in assembly language.
    Which means that such accesses won't be spotted by KASAN as a compiler
    instruments only C code.
    
    Add explicit KASAN checks to user memory access API to ensure that
    userspace writes to (or reads from) a valid kernel memory.
    
    Note: Unlike others strncpy_from_user() is written mostly in C and KASAN
    sees memory accesses in it.  However, it makes sense to add explicit
    check for all @count bytes that *potentially* could be written to the
    kernel.
    
    [aryabinin at virtuozzo.com: move kasan check under the condition]
      Link: http://lkml.kernel.org/r/1462869209-21096-1-git-send-email-aryabinin@virtuozzo.com
    Link: http://lkml.kernel.org/r/1462538722-1574-4-git-send-email-aryabinin@virtuozzo.com
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
    
    Cc: Alexander Potapenko <glider at google.com>
    Cc: Dmitry Vyukov <dvyukov at google.com>
    Cc: Ingo Molnar <mingo at elte.hu>
    Cc: "H. Peter Anvin" <hpa at zytor.com>
    Cc: Thomas Gleixner <tglx at linutronix.de>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
---
 arch/x86/include/asm/uaccess.h    |  1 +
 arch/x86/include/asm/uaccess_64.h | 10 +++++++++-
 lib/strncpy_from_user.c           |  2 ++
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5ee2687..854b048 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -5,6 +5,7 @@
  */
 #include <linux/errno.h>
 #include <linux/compiler.h>
+#include <linux/kasan-checks.h>
 #include <linux/thread_info.h>
 #include <linux/string.h>
 #include <asm/asm.h>
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 4f7923d..6550c0a 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -7,6 +7,7 @@
 #include <linux/compiler.h>
 #include <linux/errno.h>
 #include <linux/lockdep.h>
+#include <linux/kasan-checks.h>
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/page.h>
@@ -59,6 +60,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
 	int sz = __compiletime_object_size(to);
 
 	might_fault();
+	kasan_check_write(to, n);
 	if (likely(sz == -1 || sz >= n))
 		n = _copy_from_user(to, from, n);
 #ifdef CONFIG_DEBUG_VM
@@ -72,7 +74,7 @@ static __always_inline __must_check
 int copy_to_user(void __user *dst, const void *src, unsigned size)
 {
 	might_fault();
-
+	kasan_check_read(src, size);
 	return _copy_to_user(dst, src, size);
 }
 
@@ -81,6 +83,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
 
+	kasan_check_write(dst, size);
 	might_fault();
 	if (!__builtin_constant_p(size))
 		return copy_user_generic(dst, (__force void *)src, size);
@@ -125,6 +128,7 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
 {
 	int ret = 0;
 
+	kasan_check_read(src, size);
 	might_fault();
 	if (!__builtin_constant_p(size))
 		return copy_user_generic((__force void *)dst, src, size);
@@ -220,12 +224,14 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 static __must_check __always_inline int
 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
 {
+	kasan_check_write(dst, size);
 	return copy_user_generic(dst, (__force const void *)src, size);
 }
 
 static __must_check __always_inline int
 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 {
+	kasan_check_read(src, size);
 	return copy_user_generic((__force void *)dst, src, size);
 }
 
@@ -236,6 +242,7 @@ static inline int
 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
 {
 	might_fault();
+	kasan_check_write(dst, size);
 	return __copy_user_nocache(dst, src, size, 1);
 }
 
@@ -243,6 +250,7 @@ static inline int
 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
 				  unsigned size)
 {
+	kasan_check_write(dst, size);
 	return __copy_user_nocache(dst, src, size, 0);
 }
 
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index bb2b201..b5e2ad8 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
+#include <linux/kasan-checks.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 
@@ -106,6 +107,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
 	src_addr = (unsigned long)src;
 	if (likely(src_addr < max_addr)) {
 		unsigned long max = max_addr - src_addr;
+		kasan_check_write(dst, count);
 		return do_strncpy_from_user(dst, src, count, max);
 	}
 	return -EFAULT;


More information about the Devel mailing list