From: Marco Elver <elver@google.com>
To: elver@google.com, akpm@linux-foundation.org, glider@google.com
Cc: hpa@zytor.com, paulmck@kernel.org, andreyknvl@google.com,
aryabinin@virtuozzo.com, luto@kernel.org, bp@alien8.de,
catalin.marinas@arm.com, cl@linux.com,
dave.hansen@linux.intel.com, rientjes@google.com,
dvyukov@google.com, edumazet@google.com,
gregkh@linuxfoundation.org, hdanton@sina.com, mingo@redhat.com,
jannh@google.com, Jonathan.Cameron@huawei.com, corbet@lwn.net,
iamjoonsoo.kim@lge.com, joern@purestorage.com,
keescook@chromium.org, mark.rutland@arm.com, penberg@kernel.org,
peterz@infradead.org, sjpark@amazon.com, tglx@linutronix.de,
vbabka@suse.cz, will@kernel.org, x86@kernel.org,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
kasan-dev@googlegroups.com,
linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org
Subject: [PATCH v7 2/9] x86, kfence: enable KFENCE for x86
Date: Tue, 3 Nov 2020 18:58:34 +0100 [thread overview]
Message-ID: <20201103175841.3495947-3-elver@google.com> (raw)
In-Reply-To: <20201103175841.3495947-1-elver@google.com>
From: Alexander Potapenko <glider@google.com>
Add architecture specific implementation details for KFENCE and enable
KFENCE for the x86 architecture. In particular, this implements the
required interface in <asm/kfence.h> for setting up the pool and
providing helper functions for protecting and unprotecting pages.
For x86, we need to ensure that the pool uses 4K pages, which is done
using the set_memory_4k() helper function.
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Co-developed-by: Marco Elver <elver@google.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Alexander Potapenko <glider@google.com>
---
v7:
* Only not-present faults should be handled by kfence [reported by Jann Horn].
v5:
* MAJOR CHANGE: Switch to the memblock_alloc'd pool. Running benchmarks
with the newly optimized is_kfence_address(), no difference between
baseline and KFENCE is observed.
* Suggested by Jann Horn:
* Move x86 kfence_handle_page_fault before oops handling.
* WARN_ON in kfence_protect_page if non-4K pages.
* Better comments for x86 kfence_protect_page.
v4:
* Define __kfence_pool_attrs.
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/kfence.h | 65 +++++++++++++++++++++++++++++++++++
arch/x86/mm/fault.c | 5 +++
3 files changed, 71 insertions(+)
create mode 100644 arch/x86/include/asm/kfence.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f6946b81f74a..c9ec6b5ba358 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -144,6 +144,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
+ select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
new file mode 100644
index 000000000000..beeac105dae7
--- /dev/null
+++ b/arch/x86/include/asm/kfence.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_X86_KFENCE_H
+#define _ASM_X86_KFENCE_H
+
+#include <linux/bug.h>
+#include <linux/kfence.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+
+/*
+ * The page fault handler entry function, up to which the stack trace is
+ * truncated in reports.
+ */
+#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"
+
+/* Force 4K pages for __kfence_pool. */
+static inline bool arch_kfence_init_pool(void)
+{
+ unsigned long addr;
+
+ for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
+ addr += PAGE_SIZE) {
+ unsigned int level;
+
+ if (!lookup_address(addr, &level))
+ return false;
+
+ if (level != PG_LEVEL_4K)
+ set_memory_4k(addr, 1);
+ }
+
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ unsigned int level;
+ pte_t *pte = lookup_address(addr, &level);
+
+ if (WARN_ON(!pte || level != PG_LEVEL_4K))
+ return false;
+
+ /*
+ * We need to avoid IPIs, as we may get KFENCE allocations or faults
+ * with interrupts disabled. Therefore, the below is best-effort, and
+ * does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
+ * lazy fault handling takes care of faults after the page is PRESENT.
+ */
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ /* Flush this CPU's TLB. */
+ flush_tlb_one_kernel(addr);
+ return true;
+}
+
+#endif /* _ASM_X86_KFENCE_H */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 82bf37a5c9ec..e42db2836438 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -9,6 +9,7 @@
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
#include <linux/memblock.h> /* max_low_pfn */
+#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
@@ -725,6 +726,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (IS_ENABLED(CONFIG_EFI))
efi_recover_from_page_fault(address);
+ /* Only not-present faults should be handled by KFENCE. */
+ if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address))
+ return;
+
oops:
/*
* Oops. The kernel tried to access some bad page. We'll have to
--
2.29.1.341.ge80a0c044ae-goog
next prev parent reply other threads:[~2020-11-03 17:59 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-03 17:58 [PATCH v7 0/9] KFENCE: A low-overhead sampling-based memory safety error detector Marco Elver
2020-11-03 17:58 ` [PATCH v7 1/9] mm: add Kernel Electric-Fence infrastructure Marco Elver
2020-11-03 22:17 ` Jann Horn
2020-11-03 17:58 ` Marco Elver [this message]
2020-11-03 22:17 ` [PATCH v7 2/9] x86, kfence: enable KFENCE for x86 Jann Horn
2020-11-03 17:58 ` [PATCH v7 3/9] arm64, kfence: enable KFENCE for ARM64 Marco Elver
2020-11-03 22:17 ` Jann Horn
2020-11-04 13:06 ` Mark Rutland
2020-11-04 14:23 ` Marco Elver
2020-11-04 14:56 ` Mark Rutland
2020-11-03 17:58 ` [PATCH v7 4/9] mm, kfence: insert KFENCE hooks for SLAB Marco Elver
2020-11-03 17:58 ` [PATCH v7 5/9] mm, kfence: insert KFENCE hooks for SLUB Marco Elver
2020-11-03 17:58 ` [PATCH v7 6/9] kfence, kasan: make KFENCE compatible with KASAN Marco Elver
2020-11-03 17:58 ` [PATCH v7 7/9] kfence, Documentation: add KFENCE documentation Marco Elver
2020-11-03 22:17 ` Jann Horn
2020-11-03 17:58 ` [PATCH v7 8/9] kfence: add test suite Marco Elver
2020-11-03 22:17 ` Jann Horn
2020-11-03 17:58 ` [PATCH v7 9/9] MAINTAINERS: add entry for KFENCE Marco Elver
2020-11-04 0:31 ` [PATCH v7 0/9] KFENCE: A low-overhead sampling-based memory safety error detector Andrew Morton
2020-11-04 12:36 ` Marco Elver
2020-11-04 15:16 ` Alexander Potapenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201103175841.3495947-3-elver@google.com \
--to=elver@google.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@google.com \
--cc=aryabinin@virtuozzo.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=cl@linux.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=dvyukov@google.com \
--cc=edumazet@google.com \
--cc=glider@google.com \
--cc=gregkh@linuxfoundation.org \
--cc=hdanton@sina.com \
--cc=hpa@zytor.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=jannh@google.com \
--cc=joern@purestorage.com \
--cc=kasan-dev@googlegroups.com \
--cc=keescook@chromium.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=paulmck@kernel.org \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=rientjes@google.com \
--cc=sjpark@amazon.com \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox