linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@gmail.com>
To: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
Cc: luto@kernel.org, xin@zytor.com, kirill.shutemov@linux.intel.com,
	 palmer@dabbelt.com, tj@kernel.org, brgerst@gmail.com,
	ardb@kernel.org,  dave.hansen@linux.intel.com, jgross@suse.com,
	will@kernel.org,  akpm@linux-foundation.org, arnd@arndb.de,
	corbet@lwn.net, dvyukov@google.com,  richard.weiyang@gmail.com,
	ytcoode@gmail.com, tglx@linutronix.de,  hpa@zytor.com,
	seanjc@google.com, paul.walmsley@sifive.com,
	 aou@eecs.berkeley.edu, justinstitt@google.com,
	jason.andryuk@amd.com,  glider@google.com, ubizjak@gmail.com,
	jannh@google.com, bhe@redhat.com,  vincenzo.frascino@arm.com,
	rafael.j.wysocki@intel.com,  ndesaulniers@google.com,
	mingo@redhat.com, catalin.marinas@arm.com,
	 junichi.nomura@nec.com, nathan@kernel.org,
	ryabinin.a.a@gmail.com,  dennis@kernel.org, bp@alien8.de,
	kevinloughlin@google.com, morbo@google.com,
	 dan.j.williams@intel.com,
	julian.stecklina@cyberus-technology.de,  peterz@infradead.org,
	cl@linux.com, kees@kernel.org,  kasan-dev@googlegroups.com,
	x86@kernel.org,  linux-arm-kernel@lists.infradead.org,
	linux-riscv@lists.infradead.org,  linux-kernel@vger.kernel.org,
	linux-mm@kvack.org, llvm@lists.linux.dev,
	 linux-doc@vger.kernel.org
Subject: Re: [PATCH 10/15] x86: KASAN raw shadow memory PTE init
Date: Thu, 6 Feb 2025 00:45:49 +0100	[thread overview]
Message-ID: <CA+fCnZfKQwNWbYEhk70ykT1+cnibCBnvZJrhAMvu_b0Y8xZTSg@mail.gmail.com> (raw)
In-Reply-To: <28ddfb1694b19278405b4934f37d398794409749.1738686764.git.maciej.wieczor-retman@intel.com>

On Tue, Feb 4, 2025 at 6:36 PM Maciej Wieczor-Retman
<maciej.wieczor-retman@intel.com> wrote:
>
> In KASAN's generic mode the default value in shadow memory is zero.
> During initialization of shadow memory pages they are allocated and
> zeroed.
>
> In KASAN's tag-based mode the default tag for the arm64 architecture is
> 0xFE which corresponds to any memory that should not be accessed. On x86
> (where tags are 4-bit wide instead of 8-bit wide) that tag is 0xE so
> during the initializations all the bytes in shadow memory pages should
> be filled with 0xE or 0xEE if two tags should be packed in one shadow
> byte.
>
> Use memblock_alloc_try_nid_raw() instead of memblock_alloc_try_nid() to
> avoid zeroing out the memory so it can be set with the KASAN invalid
> tag.
>
> Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
> ---
>  arch/x86/mm/kasan_init_64.c | 19 ++++++++++++++++---
>  include/linux/kasan.h       | 25 +++++++++++++++++++++++++
>  mm/kasan/kasan.h            | 19 -------------------
>  3 files changed, 41 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index 9dddf19a5571..55d468d83682 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -35,6 +35,18 @@ static __init void *early_alloc(size_t size, int nid, bool should_panic)
>         return ptr;
>  }
>
> +static __init void *early_raw_alloc(size_t size, int nid, bool should_panic)
> +{
> +       void *ptr = memblock_alloc_try_nid_raw(size, size,
> +                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
> +
> +       if (!ptr && should_panic)
> +               panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
> +                     (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
> +
> +       return ptr;
> +}
> +
>  static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
>                                       unsigned long end, int nid)
>  {
> @@ -64,8 +76,9 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
>                 if (!pte_none(*pte))
>                         continue;
>
> -               p = early_alloc(PAGE_SIZE, nid, true);
> -               entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
> +               p = early_raw_alloc(PAGE_SIZE, nid, true);
> +               memset(p, PAGE_SIZE, kasan_dense_tag(KASAN_SHADOW_INIT));
> +               entry = pfn_pte(PFN_DOWN(__pa_nodebug(p)), PAGE_KERNEL);
>                 set_pte_at(&init_mm, addr, pte, entry);
>         } while (pte++, addr += PAGE_SIZE, addr != end);
>  }
> @@ -437,7 +450,7 @@ void __init kasan_init(void)
>          * it may contain some garbage. Now we can clear and write protect it,
>          * since after the TLB flush no one should write to it.
>          */
> -       memset(kasan_early_shadow_page, 0, PAGE_SIZE);
> +       kasan_poison(kasan_early_shadow_page, PAGE_SIZE, KASAN_SHADOW_INIT, false);
>         for (i = 0; i < PTRS_PER_PTE; i++) {
>                 pte_t pte;
>                 pgprot_t prot;
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 83146367170a..af8272c74409 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -151,6 +151,31 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
>                 __kasan_unpoison_range(addr, size);
>  }
>
> +#ifdef CONFIG_KASAN_HW_TAGS
> +
> +static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
> +{
> +       if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
> +               return;
> +       if (WARN_ON(size & KASAN_GRANULE_MASK))
> +               return;
> +
> +       hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
> +}
> +
> +#else /* CONFIG_KASAN_HW_TAGS */
> +
> +/**
> + * kasan_poison - mark the memory range as inaccessible
> + * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> + * @size - range size, must be aligned to KASAN_GRANULE_SIZE
> + * @value - value that's written to metadata for the range
> + * @init - whether to initialize the memory range (only for hardware tag-based)
> + */
> +void kasan_poison(const void *addr, size_t size, u8 value, bool init);
> +
> +#endif /* CONFIG_KASAN_HW_TAGS */

Please keep kasan_poison() and kasan_unpoison() in mm/kasan/kasan.h:
these are intended as internal-only functions (perhaps, we should add
this into the comment). Instead, add a purpose-specific wrapper
similar to the ones in include/linux/kasan.h.


> +
>  void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
>  static __always_inline void kasan_poison_pages(struct page *page,
>                                                 unsigned int order, bool init)
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index a56aadd51485..2405477c5899 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -466,16 +466,6 @@ static inline u8 kasan_random_tag(void) { return 0; }
>
>  #ifdef CONFIG_KASAN_HW_TAGS
>
> -static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
> -{
> -       if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
> -               return;
> -       if (WARN_ON(size & KASAN_GRANULE_MASK))
> -               return;
> -
> -       hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
> -}
> -
>  static inline void kasan_unpoison(const void *addr, size_t size, bool init)
>  {
>         u8 tag = get_tag(addr);
> @@ -497,15 +487,6 @@ static inline bool kasan_byte_accessible(const void *addr)
>
>  #else /* CONFIG_KASAN_HW_TAGS */
>
> -/**
> - * kasan_poison - mark the memory range as inaccessible
> - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> - * @size - range size, must be aligned to KASAN_GRANULE_SIZE
> - * @value - value that's written to metadata for the range
> - * @init - whether to initialize the memory range (only for hardware tag-based)
> - */
> -void kasan_poison(const void *addr, size_t size, u8 value, bool init);
> -
>  /**
>   * kasan_unpoison - mark the memory range as accessible
>   * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
> --
> 2.47.1
>


  reply	other threads:[~2025-02-05 23:46 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-04 17:33 [PATCH 00/15] kasan: x86: arm64: risc-v: KASAN tag-based mode for x86 Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 01/15] kasan: Allocation enhancement for dense tag-based mode Maciej Wieczor-Retman
2025-02-05 23:43   ` Andrey Konovalov
2025-02-06 12:57     ` Maciej Wieczor-Retman
2025-02-06 18:14       ` Andrey Konovalov
2025-02-04 17:33 ` [PATCH 02/15] kasan: Tag checking with " Maciej Wieczor-Retman
2025-02-05 23:45   ` Andrey Konovalov
2025-02-06 14:55     ` Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 03/15] kasan: Vmalloc dense tag-based mode support Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 04/15] kasan: arm64: x86: risc-v: Make special tags arch specific Maciej Wieczor-Retman
2025-02-05 20:20   ` Palmer Dabbelt
2025-02-06 11:22     ` Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 05/15] x86: Add arch specific kasan functions Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 06/15] x86: Reset tag for virtual to physical address conversions Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 07/15] mm: Pcpu chunk address tag reset Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 08/15] x86: Physical address comparisons in fill_p*d/pte Maciej Wieczor-Retman
2025-02-06  0:57   ` Dave Hansen
2025-02-07 16:37     ` Maciej Wieczor-Retman
2025-02-11 19:59       ` Dave Hansen
2025-02-04 17:33 ` [PATCH 09/15] x86: Physical address comparison in current_mm pgd check Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 10/15] x86: KASAN raw shadow memory PTE init Maciej Wieczor-Retman
2025-02-05 23:45   ` Andrey Konovalov [this message]
2025-02-06 15:39     ` Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 11/15] x86: LAM initialization Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 12/15] x86: Minimal SLAB alignment Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 13/15] x86: runtime_const used for KASAN_SHADOW_END Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 14/15] x86: Make software tag-based kasan available Maciej Wieczor-Retman
2025-02-04 17:33 ` [PATCH 15/15] kasan: Add mititgation and debug modes Maciej Wieczor-Retman
2025-02-05 23:46   ` Andrey Konovalov
2025-02-07  9:08     ` Maciej Wieczor-Retman
2025-02-04 18:58 ` [PATCH 00/15] kasan: x86: arm64: risc-v: KASAN tag-based mode for x86 Christoph Lameter (Ampere)
2025-02-04 21:05   ` Dave Hansen
2025-02-05 18:59     ` Christoph Lameter (Ampere)
2025-02-05 23:04       ` Ard Biesheuvel
2025-02-04 23:36   ` Jessica Clarke
2025-02-05 18:51     ` Christoph Lameter (Ampere)
2025-02-06  1:05       ` Jessica Clarke
2025-02-06 19:11         ` Christoph Lameter (Ampere)
2025-02-06 21:41           ` Dave Hansen
2025-02-07  7:41             ` Maciej Wieczor-Retman
2025-02-06 22:56           ` Andrey Konovalov
2025-02-04 23:36   ` Jessica Clarke
2025-02-05 23:40 ` Andrey Konovalov
2025-02-06 10:40   ` Maciej Wieczor-Retman
2025-02-06 18:10     ` Andrey Konovalov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CA+fCnZfKQwNWbYEhk70ykT1+cnibCBnvZJrhAMvu_b0Y8xZTSg@mail.gmail.com \
    --to=andreyknvl@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=aou@eecs.berkeley.edu \
    --cc=ardb@kernel.org \
    --cc=arnd@arndb.de \
    --cc=bhe@redhat.com \
    --cc=bp@alien8.de \
    --cc=brgerst@gmail.com \
    --cc=catalin.marinas@arm.com \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=dennis@kernel.org \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=jason.andryuk@amd.com \
    --cc=jgross@suse.com \
    --cc=julian.stecklina@cyberus-technology.de \
    --cc=junichi.nomura@nec.com \
    --cc=justinstitt@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kees@kernel.org \
    --cc=kevinloughlin@google.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=llvm@lists.linux.dev \
    --cc=luto@kernel.org \
    --cc=maciej.wieczor-retman@intel.com \
    --cc=mingo@redhat.com \
    --cc=morbo@google.com \
    --cc=nathan@kernel.org \
    --cc=ndesaulniers@google.com \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=peterz@infradead.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=richard.weiyang@gmail.com \
    --cc=ryabinin.a.a@gmail.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=ubizjak@gmail.com \
    --cc=vincenzo.frascino@arm.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=xin@zytor.com \
    --cc=ytcoode@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox