[Devel] [PATCH RHEL7 COMMIT] ms/x86_64: kasan: add interceptors for memset/memmove/memcpy functions

Konstantin Khorenko khorenko at virtuozzo.com
Thu Sep 3 08:27:42 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-229.7.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.6.6
------>
commit 100caa44f8eb00f138f374132ff5137d85dc21da
Author: Andrey Ryabinin <aryabinin at odin.com>
Date:   Thu Sep 3 19:27:42 2015 +0400

    ms/x86_64: kasan: add interceptors for memset/memmove/memcpy functions
    
    https://jira.sw.ru/browse/PSBM-26429
    
    From: Andrey Ryabinin <a.ryabinin at samsung.com>
    
    commit 393f203f5fd54421fddb1e2a263f64d3876eeadb upstream.
    
    Recently instrumentation of builtin functions calls was removed from GCC
    5.0.  To check the memory accessed by such functions, userspace asan
    always uses interceptors for them.
    
    So now we should do this as well.  This patch declares
    memset/memmove/memcpy as weak symbols.  In mm/kasan/kasan.c we have our
    own implementation of those functions which checks memory before accessing
    it.
    
    Default memset/memmove/memcpy now now always have aliases with '__'
    prefix.  For files that built without kasan instrumentation (e.g.
    mm/slub.c) original mem* replaced (via #define) with prefixed variants,
    cause we don't want to check memory accesses there.
    
    Signed-off-by: Andrey Ryabinin <a.ryabinin at samsung.com>
    Cc: Dmitry Vyukov <dvyukov at google.com>
    Cc: Konstantin Serebryany <kcc at google.com>
    Cc: Dmitry Chernenkov <dmitryc at google.com>
    Signed-off-by: Andrey Konovalov <adech.fo at gmail.com>
    Cc: Yuri Gribov <tetra2005 at gmail.com>
    Cc: Konstantin Khlebnikov <koct9i at gmail.com>
    Cc: Sasha Levin <sasha.levin at oracle.com>
    Cc: Christoph Lameter <cl at linux.com>
    Cc: Joonsoo Kim <iamjoonsoo.kim at lge.com>
    Cc: Dave Hansen <dave.hansen at intel.com>
    Cc: Andi Kleen <andi at firstfloor.org>
    Cc: Ingo Molnar <mingo at elte.hu>
    Cc: Thomas Gleixner <tglx at linutronix.de>
    Cc: "H. Peter Anvin" <hpa at zytor.com>
    Cc: Christoph Lameter <cl at linux.com>
    Cc: Pekka Enberg <penberg at kernel.org>
    Cc: David Rientjes <rientjes at google.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    Signed-off-by: Andrey Ryabinin <aryabinin at odin.com>
    
    Signed-off-by: Andrey Ryabinin <aryabinin at odin.com>
---
 arch/x86/boot/compressed/eboot.c |  5 +++--
 arch/x86/boot/compressed/misc.h  |  1 +
 arch/x86/include/asm/string_64.h | 18 +++++++++++++++++-
 arch/x86/kernel/x8664_ksyms_64.c | 10 ++++++++--
 arch/x86/lib/memcpy_64.S         |  6 ++++--
 arch/x86/lib/memmove_64.S        |  4 ++++
 arch/x86/lib/memset_64.S         | 10 ++++++----
 mm/kasan/kasan.c                 | 29 +++++++++++++++++++++++++++++
 8 files changed, 72 insertions(+), 11 deletions(-)

diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index dd94e98..dc3694d 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -7,6 +7,9 @@
  *
  * ----------------------------------------------------------------------- */
 
+#include "misc.h"
+#include <linux/types.h>
+#include "../string.h"
 #include <linux/efi.h>
 #include <linux/pci.h>
 #include <asm/efi.h>
@@ -14,8 +17,6 @@
 #include <asm/desc.h>
 #include <asm/bootparam_utils.h>
 
-#undef memcpy			/* Use memcpy from misc.c */
-
 #include "eboot.h"
 
 static efi_system_table_t *sys_table;
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 674019d..768b889 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -7,6 +7,7 @@
  * we just keep it from happening
  */
 #undef CONFIG_PARAVIRT
+#undef CONFIG_KASAN
 #ifdef CONFIG_X86_32
 #define _ASM_X86_DESC_H 1
 #endif
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 19e2c46..e466119 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,11 +27,12 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
    function. */
 
 #define __HAVE_ARCH_MEMCPY 1
+extern void *__memcpy(void *to, const void *from, size_t len);
+
 #ifndef CONFIG_KMEMCHECK
 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
 extern void *memcpy(void *to, const void *from, size_t len);
 #else
-extern void *__memcpy(void *to, const void *from, size_t len);
 #define memcpy(dst, src, len)					\
 ({								\
 	size_t __len = (len);					\
@@ -53,9 +54,11 @@ extern void *__memcpy(void *to, const void *from, size_t len);
 
 #define __HAVE_ARCH_MEMSET
 void *memset(void *s, int c, size_t n);
+void *__memset(void *s, int c, size_t n);
 
 #define __HAVE_ARCH_MEMMOVE
 void *memmove(void *dest, const void *src, size_t count);
+void *__memmove(void *dest, const void *src, size_t count);
 
 int memcmp(const void *cs, const void *ct, size_t count);
 size_t strlen(const char *s);
@@ -63,6 +66,19 @@ char *strcpy(char *dest, const char *src);
 char *strcat(char *dest, const char *src);
 int strcmp(const char *cs, const char *ct);
 
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#undef memcpy
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_X86_STRING_64_H */
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 72606f1..00dd2d8 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -50,13 +50,19 @@ EXPORT_SYMBOL(csum_partial);
 #undef memset
 #undef memmove
 
+extern void *__memset(void *, int, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *, const void *, __kernel_size_t);
 extern void *memset(void *, int, __kernel_size_t);
 extern void *memcpy(void *, const void *, __kernel_size_t);
-extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
 
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(memmove);
 
 #ifndef CONFIG_DEBUG_VIRTUAL
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 56313a3..89b53c9 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -53,6 +53,8 @@
 .Lmemcpy_e_e:
 	.previous
 
+.weak memcpy
+
 ENTRY(__memcpy)
 ENTRY(memcpy)
 	CFI_STARTPROC
@@ -199,8 +201,8 @@ ENDPROC(__memcpy)
 	 * only outcome...
 	 */
 	.section .altinstructions, "a"
-	altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
+	altinstruction_entry __memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
 			     .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
-	altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
+	altinstruction_entry __memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
 			     .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
 	.previous
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 65268a6..9c4b530 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -24,7 +24,10 @@
  * Output:
  * rax: dest
  */
+.weak memmove
+
 ENTRY(memmove)
+ENTRY(__memmove)
 	CFI_STARTPROC
 
 	/* Handle more 32 bytes in loop */
@@ -220,4 +223,5 @@ ENTRY(memmove)
 		.Lmemmove_end_forward-.Lmemmove_begin_forward,	\
 		.Lmemmove_end_forward_efs-.Lmemmove_begin_forward_efs
 	.previous
+ENDPROC(__memmove)
 ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2dcb380..6f44935 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -56,6 +56,8 @@
 .Lmemset_e_e:
 	.previous
 
+.weak memset
+
 ENTRY(memset)
 ENTRY(__memset)
 	CFI_STARTPROC
@@ -147,8 +149,8 @@ ENDPROC(__memset)
          * feature to implement the right patch order.
 	 */
 	.section .altinstructions,"a"
-	altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
-			     .Lfinal-memset,.Lmemset_e-.Lmemset_c
-	altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
-			     .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e
+	altinstruction_entry __memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
+			     .Lfinal-__memset,.Lmemset_e-.Lmemset_c
+	altinstruction_entry __memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
+			     .Lfinal-__memset,.Lmemset_e_e-.Lmemset_c_e
 	.previous
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index dc83f07..799c52b 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -255,6 +255,35 @@ static __always_inline void check_memory_region(unsigned long addr,
 	kasan_report(addr, size, write, _RET_IP_);
 }
 
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+
+#undef memset
+void *memset(void *addr, int c, size_t len)
+{
+	__asan_storeN((unsigned long)addr, len);
+
+	return __memset(addr, c, len);
+}
+
+#undef memmove
+void *memmove(void *dest, const void *src, size_t len)
+{
+	__asan_loadN((unsigned long)src, len);
+	__asan_storeN((unsigned long)dest, len);
+
+	return __memmove(dest, src, len);
+}
+
+#undef memcpy
+void *memcpy(void *dest, const void *src, size_t len)
+{
+	__asan_loadN((unsigned long)src, len);
+	__asan_storeN((unsigned long)dest, len);
+
+	return __memcpy(dest, src, len);
+}
+
 void kasan_alloc_pages(struct page *page, unsigned int order)
 {
 	if (likely(!PageHighMem(page)))



More information about the Devel mailing list