* [PATCH v2] mm,kfence: decouple kfence from page granularity mapping judgement
@ 2023-03-09 15:37 Zhenhua Huang
2023-03-09 15:48 ` Marco Elver
0 siblings, 1 reply; 3+ messages in thread
From: Zhenhua Huang @ 2023-03-09 15:37 UTC (permalink / raw)
To: catalin.marinas, will, glider, elver, dvyukov, akpm,
robin.murphy, mark.rutland, jianyong.wu, james.morse
Cc: Zhenhua Huang, linux-arm-kernel, kasan-dev, linux-mm,
quic_pkondeti, quic_guptap, quic_tingweiz
Kfence only needs its pool to be mapped as page granularity, previous
judgement was a bit over protected. Decouple it from judgement and do
page granularity mapping for kfence pool only [1].
To implement this, also relocate the kfence pool allocation before the
linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
addr, __kfence_pool is to be set after linear mapping set up.
LINK: [1] https://lore.kernel.org/linux-arm-kernel/1675750519-1064-1-git-send-email-quic_zhenhuah@quicinc.com/T/
Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
---
arch/arm64/mm/mmu.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
arch/arm64/mm/pageattr.c | 5 ++---
include/linux/kfence.h | 7 +++++++
mm/kfence/core.c | 9 +++++++++
4 files changed, 62 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6f9d889..46afe3f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -24,6 +24,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h>
+#include <linux/kfence.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -525,6 +526,33 @@ static int __init enable_crash_mem_map(char *arg)
}
early_param("crashkernel", enable_crash_mem_map);
+#ifdef CONFIG_KFENCE
+
+static phys_addr_t arm64_kfence_alloc_pool(void)
+{
+ phys_addr_t kfence_pool = 0;
+
+ if (!kfence_sample_interval)
+ return 0;
+
+ kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+ if (!kfence_pool) {
+ pr_err("failed to allocate kfence pool\n");
+ return 0;
+ }
+
+ return kfence_pool;
+}
+
+#else
+
+static phys_addr_t arm64_kfence_alloc_pool(void)
+{
+ return (phys_addr_t)NULL;
+}
+
+#endif
+
static void __init map_mem(pgd_t *pgdp)
{
static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -532,6 +560,7 @@ static void __init map_mem(pgd_t *pgdp)
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
int flags = NO_EXEC_MAPPINGS;
+ phys_addr_t kfence_pool = 0;
u64 i;
/*
@@ -564,6 +593,10 @@ static void __init map_mem(pgd_t *pgdp)
}
#endif
+ kfence_pool = arm64_kfence_alloc_pool();
+ if (kfence_pool)
+ memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+
/* map all the memory banks */
for_each_mem_range(i, &start, &end) {
if (start >= end)
@@ -608,6 +641,17 @@ static void __init map_mem(pgd_t *pgdp)
}
}
#endif
+
+ /* Kfence pool needs page-level mapping */
+ if (kfence_pool) {
+ __map_memblock(pgdp, kfence_pool,
+ kfence_pool + KFENCE_POOL_SIZE,
+ pgprot_tagged(PAGE_KERNEL),
+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+ /* kfence_pool really mapped now */
+ kfence_set_pool(kfence_pool);
+ }
}
void mark_rodata_ro(void)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 79dd201..61156d0 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -22,12 +22,11 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
/*
- * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+ * rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
*/
- return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
- IS_ENABLED(CONFIG_KFENCE);
+ return (rodata_enabled && rodata_full) || debug_pagealloc_enabled();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 726857a..d982ac2 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -64,6 +64,11 @@ static __always_inline bool is_kfence_address(const void *addr)
void __init kfence_alloc_pool(void);
/**
+ * kfence_set_pool() - KFENCE pool mapped and can be used
+ */
+void __init kfence_set_pool(phys_addr_t addr);
+
+/**
* kfence_init() - perform KFENCE initialization at boot time
*
* Requires that kfence_alloc_pool() was called before. This sets up the
@@ -222,8 +227,10 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
#else /* CONFIG_KFENCE */
+#define KFENCE_POOL_SIZE 0
static inline bool is_kfence_address(const void *addr) { return false; }
static inline void kfence_alloc_pool(void) { }
+static inline void kfence_set_pool(phys_addr_t addr) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5349c37..a17c20c2 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -814,12 +814,21 @@ void __init kfence_alloc_pool(void)
if (!kfence_sample_interval)
return;
+ /* if __kfence_pool already initialized in some arch, abort */
+ if (__kfence_pool)
+ return;
+
__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!__kfence_pool)
pr_err("failed to allocate pool\n");
}
+void __init kfence_set_pool(phys_addr_t addr)
+{
+ __kfence_pool = phys_to_virt(addr);
+}
+
static void kfence_init_enable(void)
{
if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
--
2.7.4
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm,kfence: decouple kfence from page granularity mapping judgement
2023-03-09 15:37 [PATCH v2] mm,kfence: decouple kfence from page granularity mapping judgement Zhenhua Huang
@ 2023-03-09 15:48 ` Marco Elver
2023-03-10 2:02 ` Zhenhua Huang
0 siblings, 1 reply; 3+ messages in thread
From: Marco Elver @ 2023-03-09 15:48 UTC (permalink / raw)
To: Zhenhua Huang
Cc: catalin.marinas, will, glider, dvyukov, akpm, robin.murphy,
mark.rutland, jianyong.wu, james.morse, linux-arm-kernel,
kasan-dev, linux-mm, quic_pkondeti, quic_guptap, quic_tingweiz
On Thu, 9 Mar 2023 at 16:38, Zhenhua Huang <quic_zhenhuah@quicinc.com> wrote:
>
> Kfence only needs its pool to be mapped as page granularity, previous
> judgement was a bit over protected. Decouple it from judgement and do
> page granularity mapping for kfence pool only [1].
>
> To implement this, also relocate the kfence pool allocation before the
> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
> addr, __kfence_pool is to be set after linear mapping set up.
>
> LINK: [1] https://lore.kernel.org/linux-arm-kernel/1675750519-1064-1-git-send-email-quic_zhenhuah@quicinc.com/T/
> Suggested-by: Mark Rutland <mark.rutland@arm.com>
> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
> ---
> arch/arm64/mm/mmu.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
> arch/arm64/mm/pageattr.c | 5 ++---
> include/linux/kfence.h | 7 +++++++
> mm/kfence/core.c | 9 +++++++++
> 4 files changed, 62 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6f9d889..46afe3f 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -24,6 +24,7 @@
> #include <linux/mm.h>
> #include <linux/vmalloc.h>
> #include <linux/set_memory.h>
> +#include <linux/kfence.h>
>
> #include <asm/barrier.h>
> #include <asm/cputype.h>
> @@ -525,6 +526,33 @@ static int __init enable_crash_mem_map(char *arg)
> }
> early_param("crashkernel", enable_crash_mem_map);
>
> +#ifdef CONFIG_KFENCE
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> + phys_addr_t kfence_pool = 0;
> +
> + if (!kfence_sample_interval)
> + return 0;
> +
> + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
> + if (!kfence_pool) {
> + pr_err("failed to allocate kfence pool\n");
> + return 0;
> + }
> +
> + return kfence_pool;
> +}
> +
> +#else
> +
> +static phys_addr_t arm64_kfence_alloc_pool(void)
> +{
> + return (phys_addr_t)NULL;
Just return "0" - which the above function does as well on error. Or
the above function should also do (phys_addr_t)NULL for consistency.
> +}
> +
> +#endif
> +
> static void __init map_mem(pgd_t *pgdp)
> {
> static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
> @@ -532,6 +560,7 @@ static void __init map_mem(pgd_t *pgdp)
> phys_addr_t kernel_end = __pa_symbol(__init_begin);
> phys_addr_t start, end;
> int flags = NO_EXEC_MAPPINGS;
> + phys_addr_t kfence_pool = 0;
> u64 i;
>
> /*
> @@ -564,6 +593,10 @@ static void __init map_mem(pgd_t *pgdp)
> }
> #endif
>
> + kfence_pool = arm64_kfence_alloc_pool();
> + if (kfence_pool)
> + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
> +
> /* map all the memory banks */
> for_each_mem_range(i, &start, &end) {
> if (start >= end)
> @@ -608,6 +641,17 @@ static void __init map_mem(pgd_t *pgdp)
> }
> }
> #endif
> +
> + /* Kfence pool needs page-level mapping */
> + if (kfence_pool) {
> + __map_memblock(pgdp, kfence_pool,
> + kfence_pool + KFENCE_POOL_SIZE,
> + pgprot_tagged(PAGE_KERNEL),
> + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
> + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
> + /* kfence_pool really mapped now */
> + kfence_set_pool(kfence_pool);
> + }
> }
>
> void mark_rodata_ro(void)
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 79dd201..61156d0 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -22,12 +22,11 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
> bool can_set_direct_map(void)
> {
> /*
> - * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
> + * rodata_full and DEBUG_PAGEALLOC require linear map to be
> * mapped at page granularity, so that it is possible to
> * protect/unprotect single pages.
> */
> - return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
> - IS_ENABLED(CONFIG_KFENCE);
> + return (rodata_enabled && rodata_full) || debug_pagealloc_enabled();
> }
>
> static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> index 726857a..d982ac2 100644
> --- a/include/linux/kfence.h
> +++ b/include/linux/kfence.h
> @@ -64,6 +64,11 @@ static __always_inline bool is_kfence_address(const void *addr)
> void __init kfence_alloc_pool(void);
>
> /**
> + * kfence_set_pool() - KFENCE pool mapped and can be used
I don't understand the comment. Maybe just "allows an arch to set the
KFENCE pool during early init"
> + */
> +void __init kfence_set_pool(phys_addr_t addr);
> +
> +/**
> * kfence_init() - perform KFENCE initialization at boot time
> *
> * Requires that kfence_alloc_pool() was called before. This sets up the
> @@ -222,8 +227,10 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
>
> #else /* CONFIG_KFENCE */
>
> +#define KFENCE_POOL_SIZE 0
> static inline bool is_kfence_address(const void *addr) { return false; }
> static inline void kfence_alloc_pool(void) { }
> +static inline void kfence_set_pool(phys_addr_t addr) { }
> static inline void kfence_init(void) { }
> static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
> static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 5349c37..a17c20c2 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -814,12 +814,21 @@ void __init kfence_alloc_pool(void)
> if (!kfence_sample_interval)
> return;
>
> + /* if __kfence_pool already initialized in some arch, abort */
Abort sounds like it's a failure condition, but it's actually ok.
Maybe just write:
/* Check if the pool has already been initialized by arch; if so,
skip the below. */
> + if (__kfence_pool)
> + return;
> +
> __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
>
> if (!__kfence_pool)
> pr_err("failed to allocate pool\n");
> }
>
> +void __init kfence_set_pool(phys_addr_t addr)
> +{
> + __kfence_pool = phys_to_virt(addr);
> +}
> +
The rest looks good.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm,kfence: decouple kfence from page granularity mapping judgement
2023-03-09 15:48 ` Marco Elver
@ 2023-03-10 2:02 ` Zhenhua Huang
0 siblings, 0 replies; 3+ messages in thread
From: Zhenhua Huang @ 2023-03-10 2:02 UTC (permalink / raw)
To: Marco Elver
Cc: catalin.marinas, will, glider, dvyukov, akpm, robin.murphy,
mark.rutland, jianyong.wu, james.morse, linux-arm-kernel,
kasan-dev, linux-mm, quic_pkondeti, quic_guptap, quic_tingweiz
Thanks Marco for your detailed review!
On 2023/3/9 23:48, Marco Elver wrote:
> On Thu, 9 Mar 2023 at 16:38, Zhenhua Huang <quic_zhenhuah@quicinc.com> wrote:
>>
>> Kfence only needs its pool to be mapped as page granularity, previous
>> judgement was a bit over protected. Decouple it from judgement and do
>> page granularity mapping for kfence pool only [1].
>>
>> To implement this, also relocate the kfence pool allocation before the
>> linear mapping setting up, arm64_kfence_alloc_pool is to allocate phys
>> addr, __kfence_pool is to be set after linear mapping set up.
>>
>> LINK: [1] https://lore.kernel.org/linux-arm-kernel/1675750519-1064-1-git-send-email-quic_zhenhuah@quicinc.com/T/
>> Suggested-by: Mark Rutland <mark.rutland@arm.com>
>> Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
>> ---
>> arch/arm64/mm/mmu.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
>> arch/arm64/mm/pageattr.c | 5 ++---
>> include/linux/kfence.h | 7 +++++++
>> mm/kfence/core.c | 9 +++++++++
>> 4 files changed, 62 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 6f9d889..46afe3f 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -24,6 +24,7 @@
>> #include <linux/mm.h>
>> #include <linux/vmalloc.h>
>> #include <linux/set_memory.h>
>> +#include <linux/kfence.h>
>>
>> #include <asm/barrier.h>
>> #include <asm/cputype.h>
>> @@ -525,6 +526,33 @@ static int __init enable_crash_mem_map(char *arg)
>> }
>> early_param("crashkernel", enable_crash_mem_map);
>>
>> +#ifdef CONFIG_KFENCE
>> +
>> +static phys_addr_t arm64_kfence_alloc_pool(void)
>> +{
>> + phys_addr_t kfence_pool = 0;
>> +
>> + if (!kfence_sample_interval)
>> + return 0;
>> +
>> + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
>> + if (!kfence_pool) {
>> + pr_err("failed to allocate kfence pool\n");
>> + return 0;
>> + }
>> +
>> + return kfence_pool;
>> +}
>> +
>> +#else
>> +
>> +static phys_addr_t arm64_kfence_alloc_pool(void)
>> +{
>> + return (phys_addr_t)NULL;
>
> Just return "0" - which the above function does as well on error. Or
> the above function should also do (phys_addr_t)NULL for consistency.
Done
>
>> +}
>> +
>> +#endif
>> +
>> static void __init map_mem(pgd_t *pgdp)
>> {
>> static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
>> @@ -532,6 +560,7 @@ static void __init map_mem(pgd_t *pgdp)
>> phys_addr_t kernel_end = __pa_symbol(__init_begin);
>> phys_addr_t start, end;
>> int flags = NO_EXEC_MAPPINGS;
>> + phys_addr_t kfence_pool = 0;
>> u64 i;
>>
>> /*
>> @@ -564,6 +593,10 @@ static void __init map_mem(pgd_t *pgdp)
>> }
>> #endif
>>
>> + kfence_pool = arm64_kfence_alloc_pool();
>> + if (kfence_pool)
>> + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
>> +
>> /* map all the memory banks */
>> for_each_mem_range(i, &start, &end) {
>> if (start >= end)
>> @@ -608,6 +641,17 @@ static void __init map_mem(pgd_t *pgdp)
>> }
>> }
>> #endif
>> +
>> + /* Kfence pool needs page-level mapping */
>> + if (kfence_pool) {
>> + __map_memblock(pgdp, kfence_pool,
>> + kfence_pool + KFENCE_POOL_SIZE,
>> + pgprot_tagged(PAGE_KERNEL),
>> + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
>> + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
>> + /* kfence_pool really mapped now */
>> + kfence_set_pool(kfence_pool);
>> + }
>> }
>>
>> void mark_rodata_ro(void)
>> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
>> index 79dd201..61156d0 100644
>> --- a/arch/arm64/mm/pageattr.c
>> +++ b/arch/arm64/mm/pageattr.c
>> @@ -22,12 +22,11 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
>> bool can_set_direct_map(void)
>> {
>> /*
>> - * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
>> + * rodata_full and DEBUG_PAGEALLOC require linear map to be
>> * mapped at page granularity, so that it is possible to
>> * protect/unprotect single pages.
>> */
>> - return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
>> - IS_ENABLED(CONFIG_KFENCE);
>> + return (rodata_enabled && rodata_full) || debug_pagealloc_enabled();
>> }
>>
>> static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
>> index 726857a..d982ac2 100644
>> --- a/include/linux/kfence.h
>> +++ b/include/linux/kfence.h
>> @@ -64,6 +64,11 @@ static __always_inline bool is_kfence_address(const void *addr)
>> void __init kfence_alloc_pool(void);
>>
>> /**
>> + * kfence_set_pool() - KFENCE pool mapped and can be used
>
> I don't understand the comment. Maybe just "allows an arch to set the
> KFENCE pool during early init"
What I want to emphasize is __kfence_pool can be used now :)
Sure, your comment is more clear.
>
>> + */
>> +void __init kfence_set_pool(phys_addr_t addr);
>> +
>> +/**
>> * kfence_init() - perform KFENCE initialization at boot time
>> *
>> * Requires that kfence_alloc_pool() was called before. This sets up the
>> @@ -222,8 +227,10 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
>>
>> #else /* CONFIG_KFENCE */
>>
>> +#define KFENCE_POOL_SIZE 0
>> static inline bool is_kfence_address(const void *addr) { return false; }
>> static inline void kfence_alloc_pool(void) { }
>> +static inline void kfence_set_pool(phys_addr_t addr) { }
>> static inline void kfence_init(void) { }
>> static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
>> static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
>> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
>> index 5349c37..a17c20c2 100644
>> --- a/mm/kfence/core.c
>> +++ b/mm/kfence/core.c
>> @@ -814,12 +814,21 @@ void __init kfence_alloc_pool(void)
>> if (!kfence_sample_interval)
>> return;
>>
>> + /* if __kfence_pool already initialized in some arch, abort */
>
> Abort sounds like it's a failure condition, but it's actually ok.
>
> Maybe just write:
>
> /* Check if the pool has already been initialized by arch; if so,
> skip the below. */
Yes, your comment is more clear. Done.
>
>> + if (__kfence_pool)
>> + return;
>> +
>> __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
>>
>> if (!__kfence_pool)
>> pr_err("failed to allocate pool\n");
>> }
>>
>> +void __init kfence_set_pool(phys_addr_t addr)
>> +{
>> + __kfence_pool = phys_to_virt(addr);
>> +}
>> +
>
> The rest looks good.
Updated patchset V 3 :)
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-03-10 2:02 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-09 15:37 [PATCH v2] mm,kfence: decouple kfence from page granularity mapping judgement Zhenhua Huang
2023-03-09 15:48 ` Marco Elver
2023-03-10 2:02 ` Zhenhua Huang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox