linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: glider@google.com
To: Jens Axboe <axboe@kernel.dk>, Andy Lutomirski <luto@kernel.org>,
	Wolfram Sang <wsa@the-dreams.de>,  Christoph Hellwig <hch@lst.de>,
	Vegard Nossum <vegard.nossum@oracle.com>,
	 Dmitry Vyukov <dvyukov@google.com>,
	Marco Elver <elver@google.com>,
	 Andrey Konovalov <andreyknvl@google.com>,
	linux-mm@kvack.org
Cc: glider@google.com, viro@zeniv.linux.org.uk,
	adilger.kernel@dilger.ca,  akpm@linux-foundation.org,
	aryabinin@virtuozzo.com, ard.biesheuvel@linaro.org,
	 arnd@arndb.de, hch@infradead.org, darrick.wong@oracle.com,
	 davem@davemloft.net, dmitry.torokhov@gmail.com,
	ebiggers@google.com,  edumazet@google.com, ericvh@gmail.com,
	gregkh@linuxfoundation.org,  harry.wentland@amd.com,
	herbert@gondor.apana.org.au, iii@linux.ibm.com,  mingo@elte.hu,
	jasowang@redhat.com, m.szyprowski@samsung.com,
	 mark.rutland@arm.com, martin.petersen@oracle.com,
	schwidefsky@de.ibm.com,  willy@infradead.org, mst@redhat.com,
	mhocko@suse.com, monstr@monstr.eu,  pmladek@suse.com, cai@lca.pw,
	rdunlap@infradead.org, robin.murphy@arm.com,
	 sergey.senozhatsky@gmail.com, rostedt@goodmis.org,
	tiwai@suse.com,  tytso@mit.edu, tglx@linutronix.de,
	gor@linux.ibm.com
Subject: [PATCH RFC v4 11/42] kmsan: add KMSAN hooks for kernel subsystems
Date: Fri, 20 Dec 2019 19:49:24 +0100	[thread overview]
Message-ID: <20191220184955.223741-12-glider@google.com> (raw)
In-Reply-To: <20191220184955.223741-1-glider@google.com>

This patch provides hooks that subsystems use to notify KMSAN about
changes in the kernel state. Such changes include:
 - page operations (allocation, deletion, splitting, mapping);
 - memory allocation and deallocation;
 - entering and leaving IRQ/NMI/softirq contexts;
 - copying data between kernel, userspace and hardware.

This patch has been split away from the rest of KMSAN runtime to
simplify the review process.

Signed-off-by: Alexander Potapenko <glider@google.com>
To: Alexander Potapenko <glider@google.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Wolfram Sang <wsa@the-dreams.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: linux-mm@kvack.org

---

v4:
 - fix a lot of comments by Marco Elver and Andrey Konovalov:
 - clean up headers and #defines, remove debugging code
 - simplified KMSAN entry hooks
 - fixed kmsan_check_skb()

Change-Id: I99d1f34f26bef122897cb840dac8d5b34d2b6a80
---
 arch/x86/include/asm/kmsan.h |  93 ++++++++
 mm/kmsan/kmsan_entry.c       |  38 ++++
 mm/kmsan/kmsan_hooks.c       | 416 +++++++++++++++++++++++++++++++++++
 3 files changed, 547 insertions(+)
 create mode 100644 arch/x86/include/asm/kmsan.h
 create mode 100644 mm/kmsan/kmsan_entry.c
 create mode 100644 mm/kmsan/kmsan_hooks.c

diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h
new file mode 100644
index 000000000000..f924f29f90f9
--- /dev/null
+++ b/arch/x86/include/asm/kmsan.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Assembly bits to safely invoke KMSAN hooks from .S files.
+ *
+ * Copyright (C) 2017-2019 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _ASM_X86_KMSAN_H
+#define _ASM_X86_KMSAN_H
+
+#ifdef CONFIG_KMSAN
+
+#ifdef __ASSEMBLY__
+.macro KMSAN_PUSH_REGS
+	pushq	%rax
+	pushq	%rcx
+	pushq	%rdx
+	pushq	%rdi
+	pushq	%rsi
+	pushq	%r8
+	pushq	%r9
+	pushq	%r10
+	pushq	%r11
+.endm
+
+.macro KMSAN_POP_REGS
+	popq	%r11
+	popq	%r10
+	popq	%r9
+	popq	%r8
+	popq	%rsi
+	popq	%rdi
+	popq	%rdx
+	popq	%rcx
+	popq	%rax
+
+.endm
+
+.macro KMSAN_CALL_HOOK fname
+	KMSAN_PUSH_REGS
+	call \fname
+	KMSAN_POP_REGS
+.endm
+
+.macro KMSAN_CONTEXT_ENTER
+	KMSAN_CALL_HOOK kmsan_context_enter
+.endm
+
+.macro KMSAN_CONTEXT_EXIT
+	KMSAN_CALL_HOOK kmsan_context_exit
+.endm
+
+#define KMSAN_INTERRUPT_ENTER KMSAN_CONTEXT_ENTER
+#define KMSAN_INTERRUPT_EXIT KMSAN_CONTEXT_EXIT
+
+#define KMSAN_SOFTIRQ_ENTER KMSAN_CONTEXT_ENTER
+#define KMSAN_SOFTIRQ_EXIT KMSAN_CONTEXT_EXIT
+
+#define KMSAN_NMI_ENTER KMSAN_CONTEXT_ENTER
+#define KMSAN_NMI_EXIT KMSAN_CONTEXT_EXIT
+
+#define KMSAN_IST_ENTER(shift_ist) KMSAN_CONTEXT_ENTER
+#define KMSAN_IST_EXIT(shift_ist) KMSAN_CONTEXT_EXIT
+
+.macro KMSAN_UNPOISON_PT_REGS
+	KMSAN_CALL_HOOK kmsan_unpoison_pt_regs
+.endm
+
+#else
+#error this header must be included into an assembly file
+#endif
+
+#else /* ifdef CONFIG_KMSAN */
+
+#define KMSAN_INTERRUPT_ENTER
+#define KMSAN_INTERRUPT_EXIT
+#define KMSAN_SOFTIRQ_ENTER
+#define KMSAN_SOFTIRQ_EXIT
+#define KMSAN_NMI_ENTER
+#define KMSAN_NMI_EXIT
+#define KMSAN_SYSCALL_ENTER
+#define KMSAN_SYSCALL_EXIT
+#define KMSAN_IST_ENTER(shift_ist)
+#define KMSAN_IST_EXIT(shift_ist)
+#define KMSAN_UNPOISON_PT_REGS
+
+#endif /* ifdef CONFIG_KMSAN */
+#endif /* ifndef _ASM_X86_KMSAN_H */
diff --git a/mm/kmsan/kmsan_entry.c b/mm/kmsan/kmsan_entry.c
new file mode 100644
index 000000000000..7af31642cd45
--- /dev/null
+++ b/mm/kmsan/kmsan_entry.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN hooks for entry_64.S
+ *
+ * Copyright (C) 2018-2019 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "kmsan.h"
+
+void kmsan_context_enter(void)
+{
+	int level = this_cpu_inc_return(kmsan_context_level);
+
+	BUG_ON(level >= KMSAN_NESTED_CONTEXT_MAX);
+}
+EXPORT_SYMBOL(kmsan_context_enter);
+
+void kmsan_context_exit(void)
+{
+	int level = this_cpu_dec_return(kmsan_context_level);
+
+	BUG_ON(level < 0);
+}
+EXPORT_SYMBOL(kmsan_context_exit);
+
+void kmsan_unpoison_pt_regs(struct pt_regs *regs)
+{
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	kmsan_internal_unpoison_shadow(regs, sizeof(*regs), /*checked*/true);
+}
+EXPORT_SYMBOL(kmsan_unpoison_pt_regs);
diff --git a/mm/kmsan/kmsan_hooks.c b/mm/kmsan/kmsan_hooks.c
new file mode 100644
index 000000000000..8ddfd91b1d11
--- /dev/null
+++ b/mm/kmsan/kmsan_hooks.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN hooks for kernel subsystems.
+ *
+ * These functions handle creation of KMSAN metadata for memory allocations.
+ *
+ * Copyright (C) 2018-2019 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "../slab.h"
+#include "kmsan.h"
+
+/*
+ * The functions may call back to instrumented code, which, in turn, may call
+ * these hooks again. To avoid re-entrancy, we use __GFP_NO_KMSAN_SHADOW.
+ * Instrumented functions shouldn't be called under
+ * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
+ * skipping effects of functions like memset() inside instrumented code.
+ */
+
+/* Called from kernel/kthread.c, kernel/fork.c */
+void kmsan_task_create(struct task_struct *task)
+{
+	unsigned long irq_flags;
+
+	if (!task)
+		return;
+	irq_flags = kmsan_enter_runtime();
+	kmsan_internal_task_create(task);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_task_create);
+
+/* Called from kernel/exit.c */
+void kmsan_task_exit(struct task_struct *task)
+{
+	struct kmsan_task_state *state = &task->kmsan;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+
+	state->allow_reporting = false;
+}
+EXPORT_SYMBOL(kmsan_task_exit);
+
+/* Called from mm/slub.c */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
+{
+	unsigned long irq_flags;
+
+	if (unlikely(object == NULL))
+		return;
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	/*
+	 * There's a ctor or this is an RCU cache - do nothing. The memory
+	 * status hasn't changed since last use.
+	 */
+	if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
+		return;
+
+	irq_flags = kmsan_enter_runtime();
+	if (flags & __GFP_ZERO)
+		kmsan_internal_unpoison_shadow(object, s->object_size,
+					       KMSAN_POISON_CHECK);
+	else
+		kmsan_internal_poison_shadow(object, s->object_size, flags,
+					     KMSAN_POISON_CHECK);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_slab_alloc);
+
+/* Called from mm/slub.c */
+void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+	unsigned long irq_flags;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+
+	/* RCU slabs could be legally used after free within the RCU period */
+	if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
+		return;
+	/*
+	 * If there's a constructor, freed memory must remain in the same state
+	 * till the next allocation. We cannot save its state to detect
+	 * use-after-free bugs, instead we just keep it unpoisoned.
+	 */
+	if (s->ctor)
+		return;
+	irq_flags = kmsan_enter_runtime();
+	kmsan_internal_poison_shadow(object, s->object_size,
+				     GFP_KERNEL,
+				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_slab_free);
+
+/* Called from mm/slub.c */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+	unsigned long irq_flags;
+
+	if (unlikely(ptr == NULL))
+		return;
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	irq_flags = kmsan_enter_runtime();
+	if (flags & __GFP_ZERO)
+		kmsan_internal_unpoison_shadow((void *)ptr, size,
+					       /*checked*/true);
+	else
+		kmsan_internal_poison_shadow((void *)ptr, size, flags,
+					     KMSAN_POISON_CHECK);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_kmalloc_large);
+
+/* Called from mm/slub.c */
+void kmsan_kfree_large(const void *ptr)
+{
+	struct page *page;
+	unsigned long irq_flags;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	irq_flags = kmsan_enter_runtime();
+	page = virt_to_head_page((void *)ptr);
+	BUG_ON(ptr != page_address(page));
+	kmsan_internal_poison_shadow(
+		(void *)ptr, PAGE_SIZE << compound_order(page), GFP_KERNEL,
+		KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_kfree_large);
+
+static unsigned long vmalloc_shadow(unsigned long addr)
+{
+	return (unsigned long)kmsan_get_metadata((void *)addr, 1, META_SHADOW);
+}
+
+static unsigned long vmalloc_origin(unsigned long addr)
+{
+	return (unsigned long)kmsan_get_metadata((void *)addr, 1, META_ORIGIN);
+}
+
+/* Called from mm/vmalloc.c */
+void kmsan_vunmap_page_range(unsigned long start, unsigned long end)
+{
+	__vunmap_page_range(vmalloc_shadow(start), vmalloc_shadow(end));
+	__vunmap_page_range(vmalloc_origin(start), vmalloc_origin(end));
+}
+EXPORT_SYMBOL(kmsan_vunmap_page_range);
+
+/* Called from lib/ioremap.c */
+/*
+ * This function creates new shadow/origin pages for the physical pages mapped
+ * into the virtual memory. If those physical pages already had shadow/origin,
+ * those are ignored.
+ */
+void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+	phys_addr_t phys_addr, pgprot_t prot)
+{
+	unsigned long irq_flags;
+	struct page *shadow, *origin;
+	int i, nr;
+	unsigned long off = 0;
+	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO | __GFP_NO_KMSAN_SHADOW;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+
+	nr = (end - start) / PAGE_SIZE;
+	irq_flags = kmsan_enter_runtime();
+	for (i = 0; i < nr; i++, off += PAGE_SIZE) {
+		shadow = alloc_pages(gfp_mask, 1);
+		origin = alloc_pages(gfp_mask, 1);
+		__vmap_page_range_noflush(vmalloc_shadow(start + off),
+				vmalloc_shadow(start + off + PAGE_SIZE),
+				prot, &shadow);
+		__vmap_page_range_noflush(vmalloc_origin(start + off),
+				vmalloc_origin(start + off + PAGE_SIZE),
+				prot, &origin);
+	}
+	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_ioremap_page_range);
+
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+{
+	int i, nr;
+	struct page *shadow, *origin;
+	unsigned long v_shadow, v_origin;
+	unsigned long irq_flags;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+
+	nr = (end - start) / PAGE_SIZE;
+	irq_flags = kmsan_enter_runtime();
+	v_shadow = (unsigned long)vmalloc_shadow(start);
+	v_origin = (unsigned long)vmalloc_origin(start);
+	for (i = 0; i < nr; i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
+		shadow = vmalloc_to_page_or_null((void *)v_shadow);
+		origin = vmalloc_to_page_or_null((void *)v_origin);
+		__vunmap_page_range(v_shadow, v_shadow + PAGE_SIZE);
+		__vunmap_page_range(v_origin, v_origin + PAGE_SIZE);
+		if (shadow)
+			__free_pages(shadow, 1);
+		if (origin)
+			__free_pages(origin, 1);
+	}
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_iounmap_page_range);
+
+/* Called from include/linux/uaccess.h, include/linux/uaccess.h */
+void kmsan_copy_to_user(const void *to, const void *from,
+			size_t to_copy, size_t left)
+{
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	/*
+	 * At this point we've copied the memory already. It's hard to check it
+	 * before copying, as the size of actually copied buffer is unknown.
+	 */
+
+	/* copy_to_user() may copy zero bytes. No need to check. */
+	if (!to_copy)
+		return;
+	/* Or maybe copy_to_user() failed to copy anything. */
+	if (to_copy == left)
+		return;
+	if ((u64)to < TASK_SIZE) {
+		/* This is a user memory access, check it. */
+		kmsan_internal_check_memory((void *)from, to_copy - left, to,
+						REASON_COPY_TO_USER);
+		return;
+	}
+	/* Otherwise this is a kernel memory access. This happens when a compat
+	 * syscall passes an argument allocated on the kernel stack to a real
+	 * syscall.
+	 * Don't check anything, just copy the shadow of the copied bytes.
+	 */
+	kmsan_memcpy_metadata((void *)to, (void *)from, to_copy - left);
+}
+EXPORT_SYMBOL(kmsan_copy_to_user);
+
+void kmsan_gup_pgd_range(struct page **pages, int nr)
+{
+	int i;
+	void *page_addr;
+
+	/*
+	 * gup_pgd_range() has just created a number of new pages that KMSAN
+	 * treats as uninitialized. In the case they belong to the userspace
+	 * memory, unpoison the corresponding kernel pages.
+	 */
+	for (i = 0; i < nr; i++) {
+		page_addr = page_address(pages[i]);
+		if (((u64)page_addr < TASK_SIZE) &&
+		    ((u64)page_addr + PAGE_SIZE < TASK_SIZE))
+			kmsan_unpoison_shadow(page_addr, PAGE_SIZE);
+	}
+
+}
+EXPORT_SYMBOL(kmsan_gup_pgd_range);
+
+/* Helper function to check an SKB. */
+void kmsan_check_skb(const struct sk_buff *skb)
+{
+	struct sk_buff *frag_iter;
+	int i;
+	skb_frag_t *f;
+	u32 p_off, p_len, copied;
+	struct page *p;
+	u8 *vaddr;
+
+	if (!skb || !skb->len)
+		return;
+
+	kmsan_internal_check_memory(skb->data, skb_headlen(skb), 0, REASON_ANY);
+	if (skb_is_nonlinear(skb)) {
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			f = &skb_shinfo(skb)->frags[i];
+
+			skb_frag_foreach_page(f, skb_frag_off(f),
+					      skb_frag_size(f),
+					      p, p_off, p_len, copied) {
+
+				vaddr = kmap_atomic(p);
+				kmsan_internal_check_memory(vaddr + p_off,
+						p_len, /*user_addr*/ 0,
+						REASON_ANY);
+				kunmap_atomic(vaddr);
+			}
+		}
+	}
+	skb_walk_frags(skb, frag_iter)
+		kmsan_check_skb(frag_iter);
+}
+EXPORT_SYMBOL(kmsan_check_skb);
+
+/* Helper function to check an URB. */
+void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+	if (!urb)
+		return;
+	if (is_out)
+		kmsan_internal_check_memory(urb->transfer_buffer,
+					    urb->transfer_buffer_length,
+					    /*user_addr*/ 0, REASON_SUBMIT_URB);
+	else
+		kmsan_internal_unpoison_shadow(urb->transfer_buffer,
+					       urb->transfer_buffer_length,
+					       /*checked*/false);
+}
+EXPORT_SYMBOL(kmsan_handle_urb);
+
+static void kmsan_handle_dma_page(const void *addr, size_t size,
+				  enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		kmsan_internal_check_memory((void *)addr, size, /*user_addr*/0,
+					    REASON_ANY);
+		kmsan_internal_unpoison_shadow((void *)addr, size,
+					       /*checked*/false);
+		break;
+	case DMA_TO_DEVICE:
+		kmsan_internal_check_memory((void *)addr, size, /*user_addr*/0,
+					    REASON_ANY);
+		break;
+	case DMA_FROM_DEVICE:
+		kmsan_internal_unpoison_shadow((void *)addr, size,
+					       /*checked*/false);
+		break;
+	case DMA_NONE:
+		break;
+	}
+}
+
+/* Helper function to handle DMA data transfers. */
+void kmsan_handle_dma(const void *addr, size_t size,
+		      enum dma_data_direction dir)
+{
+	u64 page_offset, to_go, uaddr = (u64)addr;
+
+	/*
+	 * The kernel may occasionally give us adjacent DMA pages not belonging
+	 * to the same allocation. Process them separately to avoid triggering
+	 * internal KMSAN checks.
+	 */
+	while (size > 0) {
+		page_offset = uaddr % PAGE_SIZE;
+		to_go = min(PAGE_SIZE - page_offset, (u64)size);
+		kmsan_handle_dma_page((void *)uaddr, to_go, dir);
+		uaddr += to_go;
+		size -= to_go;
+	}
+}
+EXPORT_SYMBOL(kmsan_handle_dma);
+
+/* Functions from kmsan-checks.h follow. */
+void kmsan_poison_shadow(const void *address, size_t size, gfp_t flags)
+{
+	unsigned long irq_flags;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+	irq_flags = kmsan_enter_runtime();
+	/* The users may want to poison/unpoison random memory. */
+	kmsan_internal_poison_shadow((void *)address, size, flags,
+				     KMSAN_POISON_NOCHECK);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_poison_shadow);
+
+void kmsan_unpoison_shadow(const void *address, size_t size)
+{
+	unsigned long irq_flags;
+
+	if (!kmsan_ready || kmsan_in_runtime())
+		return;
+
+	irq_flags = kmsan_enter_runtime();
+	/* The users may want to poison/unpoison random memory. */
+	kmsan_internal_unpoison_shadow((void *)address, size,
+				       KMSAN_POISON_NOCHECK);
+	kmsan_leave_runtime(irq_flags);
+}
+EXPORT_SYMBOL(kmsan_unpoison_shadow);
+
+void kmsan_check_memory(const void *addr, size_t size)
+{
+	return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
+					   REASON_ANY);
+}
+EXPORT_SYMBOL(kmsan_check_memory);
-- 
2.24.1.735.g03f4e72817-goog



  parent reply	other threads:[~2019-12-20 18:50 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-20 18:49 [PATCH RFC v4 00/42] Add KernelMemorySanitizer infrastructure glider
2019-12-20 18:49 ` [PATCH RFC v4 01/42] stackdepot: check depot_index before accessing the stack slab glider
2019-12-20 18:49 ` [PATCH RFC v4 02/42] stackdepot: build with -fno-builtin glider
2020-01-03 17:37   ` Steven Rostedt
2019-12-20 18:49 ` [PATCH RFC v4 03/42] kasan: stackdepot: move filter_irq_stacks() to stackdepot.c glider
2019-12-20 18:49 ` [PATCH RFC v4 04/42] stackdepot: reserve 5 extra bits in depot_stack_handle_t glider
2019-12-20 18:49 ` [PATCH RFC v4 05/42] kmsan: add ReST documentation glider
2019-12-20 18:49 ` [PATCH RFC v4 06/42] kmsan: gfp: introduce __GFP_NO_KMSAN_SHADOW glider
2019-12-20 18:49 ` [PATCH RFC v4 07/42] kmsan: introduce __no_sanitize_memory and __SANITIZE_MEMORY__ glider
2019-12-20 18:49 ` [PATCH RFC v4 08/42] kmsan: reduce vmalloc space glider
2019-12-20 18:49 ` [PATCH RFC v4 09/42] kmsan: add KMSAN runtime core glider
2019-12-20 18:49 ` [PATCH RFC v4 10/42] kmsan: KMSAN compiler API implementation glider
2019-12-20 18:49 ` glider [this message]
2019-12-20 18:49 ` [PATCH RFC v4 12/42] kmsan: stackdepot: don't allocate KMSAN metadata for stackdepot glider
2019-12-20 18:49 ` [PATCH RFC v4 13/42] kmsan: define READ_ONCE_NOCHECK() glider
2019-12-20 18:49 ` [PATCH RFC v4 14/42] kmsan: make READ_ONCE_TASK_STACK() return initialized values glider
2019-12-20 18:49 ` [PATCH RFC v4 15/42] kmsan: x86: sync metadata pages on page fault glider
2019-12-20 18:49 ` [PATCH RFC v4 16/42] kmsan: add tests for KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 17/42] crypto: kmsan: disable accelerated configs under KMSAN glider
2019-12-20 19:44   ` Eric Biggers
2020-01-09 14:56     ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 18/42] kmsan: x86: disable UNWINDER_ORC " glider
2019-12-20 18:49 ` [PATCH RFC v4 19/42] kmsan: x86/asm: softirq: add KMSAN IRQ entry hooks glider
2019-12-20 18:49 ` [PATCH RFC v4 20/42] kmsan: x86: increase stack sizes in KMSAN builds glider
2019-12-30 17:39   ` Arnd Bergmann
2020-01-08 15:31     ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 21/42] kmsan: disable KMSAN instrumentation for certain kernel parts glider
2019-12-20 18:49 ` [PATCH RFC v4 22/42] kmsan: mm: call KMSAN hooks from SLUB code glider
2019-12-20 18:49 ` [PATCH RFC v4 23/42] kmsan: mm: maintain KMSAN metadata for page operations glider
2019-12-20 18:49 ` [PATCH RFC v4 24/42] kmsan: handle memory sent to/from USB glider
2019-12-20 18:49 ` [PATCH RFC v4 25/42] kmsan: handle task creation and exiting glider
2019-12-20 18:49 ` [PATCH RFC v4 26/42] kmsan: net: check the value of skb before sending it to the network glider
2019-12-20 18:49 ` [PATCH RFC v4 27/42] kmsan: printk: treat the result of vscnprintf() as initialized glider
2019-12-20 18:49 ` [PATCH RFC v4 28/42] kmsan: disable instrumentation of certain functions glider
2019-12-20 18:49 ` [PATCH RFC v4 29/42] kmsan: unpoison |tlb| in arch_tlb_gather_mmu() glider
2019-12-20 18:49 ` [PATCH RFC v4 30/42] kmsan: use __msan_ string functions where possible glider
2019-12-20 18:49 ` [PATCH RFC v4 31/42] kmsan: hooks for copy_to_user() and friends glider
2019-12-20 18:49 ` [PATCH RFC v4 32/42] kmsan: init: call KMSAN initialization routines glider
2019-12-20 18:49 ` [PATCH RFC v4 33/42] kmsan: enable KMSAN builds glider
2019-12-20 18:49 ` [PATCH RFC v4 34/42] kmsan: handle /dev/[u]random glider
2019-12-20 18:49 ` [PATCH RFC v4 35/42] kmsan: virtio: check/unpoison scatterlist in vring_map_one_sg() glider
2019-12-20 18:49 ` [PATCH RFC v4 36/42] kmsan: disable strscpy() optimization under KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 37/42] kmsan: add iomap support glider
2019-12-20 18:49 ` [PATCH RFC v4 38/42] kmsan: dma: unpoison memory mapped by dma_direct_map_page() glider
2019-12-20 18:49 ` [PATCH RFC v4 39/42] kmsan: disable physical page merging in biovec glider
2019-12-20 18:49 ` [PATCH RFC v4 40/42] kmsan: ext4: skip block merging logic in ext4_mpage_readpages for KMSAN glider
2019-12-20 19:18   ` Eric Biggers
2020-01-08 16:14     ` Alexander Potapenko
2019-12-20 18:49 ` [PATCH RFC v4 41/42] x86: kasan: kmsan: support CONFIG_GENERIC_CSUM on x86, enable it for KASAN/KMSAN glider
2019-12-20 18:49 ` [PATCH RFC v4 42/42] kmsan: x86/uprobes: unpoison regs in arch_uprobe_exception_notify() glider
2019-12-23  7:51 ` [PATCH RFC v4 00/42] Add KernelMemorySanitizer infrastructure Leon Romanovsky
2020-01-09 14:38   ` Alexander Potapenko
2020-01-09 16:29     ` Thomas Gleixner
2020-03-25 11:04       ` Alexander Potapenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191220184955.223741-12-glider@google.com \
    --to=glider@google.com \
    --cc=adilger.kernel@dilger.ca \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@google.com \
    --cc=ard.biesheuvel@linaro.org \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=axboe@kernel.dk \
    --cc=cai@lca.pw \
    --cc=darrick.wong@oracle.com \
    --cc=davem@davemloft.net \
    --cc=dmitry.torokhov@gmail.com \
    --cc=dvyukov@google.com \
    --cc=ebiggers@google.com \
    --cc=edumazet@google.com \
    --cc=elver@google.com \
    --cc=ericvh@gmail.com \
    --cc=gor@linux.ibm.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=harry.wentland@amd.com \
    --cc=hch@infradead.org \
    --cc=hch@lst.de \
    --cc=herbert@gondor.apana.org.au \
    --cc=iii@linux.ibm.com \
    --cc=jasowang@redhat.com \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mark.rutland@arm.com \
    --cc=martin.petersen@oracle.com \
    --cc=mhocko@suse.com \
    --cc=mingo@elte.hu \
    --cc=monstr@monstr.eu \
    --cc=mst@redhat.com \
    --cc=pmladek@suse.com \
    --cc=rdunlap@infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=rostedt@goodmis.org \
    --cc=schwidefsky@de.ibm.com \
    --cc=sergey.senozhatsky@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=tiwai@suse.com \
    --cc=tytso@mit.edu \
    --cc=vegard.nossum@oracle.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=wsa@the-dreams.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox