linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: js1304@gmail.com
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Alexander Potapenko <glider@google.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	kasan-dev@googlegroups.com, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H . Peter Anvin" <hpa@zytor.com>,
	kernel-team@lge.com, Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: [PATCH v1 07/11] x86/kasan: use per-page shadow memory
Date: Tue, 16 May 2017 10:16:45 +0900	[thread overview]
Message-ID: <1494897409-14408-8-git-send-email-iamjoonsoo.kim@lge.com> (raw)
In-Reply-To: <1494897409-14408-1-git-send-email-iamjoonsoo.kim@lge.com>

From: Joonsoo Kim <iamjoonsoo.kim@lge.com>

This patch enables for x86 to use per-page shadow memory.
Most of initialization code for per-page shadow memory is
copied from the code for original shadow memory.

There are two things that aren't trivial.
1. per-page shadow memory for global variable is initialized
as the bypass range. It's not the target for on-demand shadow
memory allocation since shadow memory for global variable is
always required.
2. per-page shadow memory for the module is initialized as the
bypass range since on-demand shadow memory allocation
for the module is already implemented.

Note that on-demand allocation for original shadow memory isn't
implemented yet so there is no memory saving on this patch.
It will be implemented in the following patch.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
 arch/x86/include/asm/kasan.h |  6 +++
 arch/x86/mm/kasan_init_64.c  | 87 +++++++++++++++++++++++++++++++++++++++-----
 2 files changed, 84 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index f527b02..cfa63c7 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -18,6 +18,12 @@
  */
 #define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3)))
 
+#define HAVE_KASAN_PER_PAGE_SHADOW 1
+#define KASAN_PSHADOW_SIZE	((1ULL << (47 - PAGE_SHIFT)))
+#define KASAN_PSHADOW_START	(kasan_pshadow_offset + \
+					(0xffff800000000000ULL >> PAGE_SHIFT))
+#define KASAN_PSHADOW_END	(KASAN_PSHADOW_START + KASAN_PSHADOW_SIZE)
+
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_KASAN
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index adc673b..1c300bf 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -15,19 +15,29 @@
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
-static int __init map_range(struct range *range)
+static int __init map_range(struct range *range, bool pshadow)
 {
 	unsigned long start;
 	unsigned long end;
 
-	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
-	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
+	start = (unsigned long)pfn_to_kaddr(range->start);
+	end = (unsigned long)pfn_to_kaddr(range->end);
 
 	/*
 	 * end + 1 here is intentional. We check several shadow bytes in advance
 	 * to slightly speed up fastpath. In some rare cases we could cross
 	 * boundary of mapped shadow, so we just map some more here.
 	 */
+	if (pshadow) {
+		start = (unsigned long)kasan_mem_to_pshadow((void *)start);
+		end = (unsigned long)kasan_mem_to_pshadow((void *)end);
+
+		return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
+	}
+
+	start = (unsigned long)kasan_mem_to_shadow((void *)start);
+	end = (unsigned long)kasan_mem_to_shadow((void *)end);
+
 	return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
 }
 
@@ -49,11 +59,10 @@ static void __init clear_pgds(unsigned long start,
 	}
 }
 
-static void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd,
+			unsigned long start, unsigned long end)
 {
 	int i;
-	unsigned long start = KASAN_SHADOW_START;
-	unsigned long end = KASAN_SHADOW_END;
 
 	for (i = pgd_index(start); start < end; i++) {
 		switch (CONFIG_PGTABLE_LEVELS) {
@@ -109,8 +118,35 @@ void __init kasan_early_init(void)
 	for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
 		kasan_zero_p4d[i] = __p4d(p4d_val);
 
-	kasan_map_early_shadow(early_level4_pgt);
-	kasan_map_early_shadow(init_level4_pgt);
+	kasan_map_early_shadow(early_level4_pgt,
+		KASAN_SHADOW_START, KASAN_SHADOW_END);
+	kasan_map_early_shadow(init_level4_pgt,
+		KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+	kasan_early_init_pshadow();
+
+	kasan_map_early_shadow(early_level4_pgt,
+		KASAN_PSHADOW_START, KASAN_PSHADOW_END);
+	kasan_map_early_shadow(init_level4_pgt,
+		KASAN_PSHADOW_START, KASAN_PSHADOW_END);
+
+	/* Prepare black shadow memory */
+	pte_val = __pa_nodebug(kasan_black_page) | __PAGE_KERNEL_RO;
+	pmd_val = __pa_nodebug(kasan_black_pte) | _KERNPG_TABLE;
+	pud_val = __pa_nodebug(kasan_black_pmd) | _KERNPG_TABLE;
+	p4d_val = __pa_nodebug(kasan_black_pud) | _KERNPG_TABLE;
+
+	for (i = 0; i < PTRS_PER_PTE; i++)
+		kasan_black_pte[i] = __pte(pte_val);
+
+	for (i = 0; i < PTRS_PER_PMD; i++)
+		kasan_black_pmd[i] = __pmd(pmd_val);
+
+	for (i = 0; i < PTRS_PER_PUD; i++)
+		kasan_black_pud[i] = __pud(pud_val);
+
+	for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
+		kasan_black_p4d[i] = __p4d(p4d_val);
 }
 
 void __init kasan_init(void)
@@ -135,7 +171,7 @@ void __init kasan_init(void)
 		if (pfn_mapped[i].end == 0)
 			break;
 
-		if (map_range(&pfn_mapped[i]))
+		if (map_range(&pfn_mapped[i], false))
 			panic("kasan: unable to allocate shadow!");
 	}
 	kasan_populate_shadow(
@@ -151,6 +187,39 @@ void __init kasan_init(void)
 			(void *)KASAN_SHADOW_END,
 			true, false);
 
+	/* For per-page shadow */
+	clear_pgds(KASAN_PSHADOW_START, KASAN_PSHADOW_END);
+
+	kasan_populate_shadow((void *)KASAN_PSHADOW_START,
+			kasan_mem_to_pshadow((void *)PAGE_OFFSET),
+			true, false);
+
+	for (i = 0; i < E820_MAX_ENTRIES; i++) {
+		if (pfn_mapped[i].end == 0)
+			break;
+
+		if (map_range(&pfn_mapped[i], true))
+			panic("kasan: unable to allocate shadow!");
+	}
+	kasan_populate_shadow(
+		kasan_mem_to_pshadow((void *)PAGE_OFFSET + MAXMEM),
+		kasan_mem_to_pshadow((void *)__START_KERNEL_map),
+		true, false);
+
+	kasan_populate_shadow(
+		kasan_mem_to_pshadow(_stext),
+		kasan_mem_to_pshadow(_end),
+		false, false);
+
+	kasan_populate_shadow(
+		kasan_mem_to_pshadow((void *)MODULES_VADDR),
+		kasan_mem_to_pshadow((void *)MODULES_END),
+		false, false);
+
+	kasan_populate_shadow(kasan_mem_to_pshadow((void *)MODULES_END),
+			(void *)KASAN_PSHADOW_END,
+			true, false);
+
 	load_cr3(init_level4_pgt);
 	__flush_tlb_all();
 
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-05-16  1:18 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-16  1:16 [PATCH v1 00/11] mm/kasan: support per-page shadow memory to reduce memory consumption js1304
2017-05-16  1:16 ` [PATCH v1 01/11] mm/kasan: rename XXX_is_zero to XXX_is_nonzero js1304
2017-05-16  1:16 ` [PATCH v1 02/11] mm/kasan: don't fetch the next shadow value speculartively js1304
2017-05-16  1:16 ` [PATCH v1 03/11] mm/kasan: handle unaligned end address in zero_pte_populate js1304
2017-05-16  1:16 ` [PATCH v1 04/11] mm/kasan: extend kasan_populate_zero_shadow() js1304
2017-05-16  1:16 ` [PATCH v1 05/11] mm/kasan: introduce per-page shadow memory infrastructure js1304
2017-05-16  1:16 ` [PATCH v1 06/11] mm/kasan: mark/unmark the target range that is for original shadow memory js1304
2017-05-16  1:16 ` js1304 [this message]
2017-05-16  1:16 ` [PATCH v1 08/11] mm/kasan: support on-demand shadow allocation/mapping js1304
2017-05-16  1:16 ` [PATCH v1 09/11] x86/kasan: support on-demand shadow mapping js1304
2017-05-16  1:16 ` [PATCH v1 10/11] mm/kasan: support dynamic shadow memory free js1304
2017-05-16  1:16 ` [PATCH v1 11/11] mm/kasan: change the order of shadow memory check js1304
2017-05-16  1:28 ` [PATCH(RE-RESEND) v1 01/11] mm/kasan: rename _is_zero to _is_nonzero Joonsoo Kim
2017-05-16  4:34 ` [PATCH v1 00/11] mm/kasan: support per-page shadow memory to reduce memory consumption Dmitry Vyukov
2017-05-16  4:47   ` Dmitry Vyukov
2017-05-16  6:23   ` Joonsoo Kim
2017-05-16 20:49     ` Dmitry Vyukov
2017-05-17  7:23       ` Joonsoo Kim
2017-05-17  7:25         ` Joonsoo Kim
2017-05-24  6:57       ` Dmitry Vyukov
2017-05-24  7:45         ` Joonsoo Kim
2017-05-24 17:19           ` Dmitry Vyukov
2017-05-25  0:41             ` Joonsoo Kim
2017-05-29 15:07               ` Dmitry Vyukov
2017-05-29 15:12                 ` Dmitry Vyukov
2017-05-29 15:29                   ` Dmitry Vyukov
2017-05-30  7:58                     ` Vladimir Murzin
2017-05-30  8:15                       ` Dmitry Vyukov
2017-05-30  8:31                         ` Vladimir Murzin
2017-05-30  8:40                           ` Vladimir Murzin
2017-05-30  8:49                             ` Dmitry Vyukov
2017-05-30  9:08                               ` Vladimir Murzin
2017-05-30  9:26                                 ` Dmitry Vyukov
2017-05-30  9:39                                   ` Vladimir Murzin
2017-05-30  9:45                                     ` Dmitry Vyukov
2017-05-30  9:54                                       ` Vladimir Murzin
2017-05-30 14:16                     ` Andrey Ryabinin
2017-05-31  5:50                       ` Joonsoo Kim
2017-05-31 16:31                         ` Andrey Ryabinin
2017-06-08  2:43                           ` Joonsoo Kim
2017-06-01 15:16                       ` 王靖天
2017-06-01 18:06                       ` Dmitry Vyukov
2017-06-08  2:40                         ` Joonsoo Kim
2017-06-13 16:49                           ` Andrey Ryabinin
2017-06-14  0:12                             ` Joonsoo Kim
2017-05-17 12:17 ` Andrey Ryabinin
2017-05-19  1:53   ` Joonsoo Kim
2017-05-22  6:02     ` Dmitry Vyukov
2017-05-24  6:04       ` Joonsoo Kim
2017-05-24 16:31         ` Dmitry Vyukov
2017-05-25  0:46           ` Joonsoo Kim
2017-05-22 14:00     ` Andrey Ryabinin
2017-05-24  6:18       ` Joonsoo Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1494897409-14408-8-git-send-email-iamjoonsoo.kim@lge.com \
    --to=js1304@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=aryabinin@virtuozzo.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kernel-team@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox