From: Matteo Rizzo <matteorizzo@google.com>
To: cl@linux.com, penberg@kernel.org, rientjes@google.com,
iamjoonsoo.kim@lge.com, akpm@linux-foundation.org,
vbabka@suse.cz, roman.gushchin@linux.dev, 42.hyeyoo@gmail.com,
keescook@chromium.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, linux-mm@kvack.org,
linux-hardening@vger.kernel.org, tglx@linutronix.de,
mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com,
x86@kernel.org, hpa@zytor.com, corbet@lwn.net, luto@kernel.org,
peterz@infradead.org
Cc: jannh@google.com, matteorizzo@google.com, evn@google.com,
poprdi@google.com, jordyzomer@google.com
Subject: [RFC PATCH 07/14] mm/slub: pass slab pointer to the freeptr decode helper
Date: Fri, 15 Sep 2023 10:59:26 +0000 [thread overview]
Message-ID: <20230915105933.495735-8-matteorizzo@google.com> (raw)
In-Reply-To: <20230915105933.495735-1-matteorizzo@google.com>
From: Jann Horn <jannh@google.com>
This is refactoring in preparation for checking freeptrs for corruption
inside freelist_ptr_decode().
Signed-off-by: Jann Horn <jannh@google.com>
Co-developed-by: Matteo Rizzo <matteorizzo@google.com>
Signed-off-by: Matteo Rizzo <matteorizzo@google.com>
---
mm/slub.c | 43 +++++++++++++++++++++++--------------------
1 file changed, 23 insertions(+), 20 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index eaa1256aff89..42e7cc0b4452 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -383,7 +383,8 @@ static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
}
static inline void *freelist_ptr_decode(const struct kmem_cache *s,
- freeptr_t ptr, unsigned long ptr_addr)
+ freeptr_t ptr, unsigned long ptr_addr,
+ struct slab *slab)
{
void *decoded;
@@ -395,7 +396,8 @@ static inline void *freelist_ptr_decode(const struct kmem_cache *s,
return decoded;
}
-static inline void *get_freepointer(struct kmem_cache *s, void *object)
+static inline void *get_freepointer(struct kmem_cache *s, void *object,
+ struct slab *slab)
{
unsigned long ptr_addr;
freeptr_t p;
@@ -403,7 +405,7 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
object = kasan_reset_tag(object);
ptr_addr = (unsigned long)object + s->offset;
p = *(freeptr_t *)(ptr_addr);
- return freelist_ptr_decode(s, p, ptr_addr);
+ return freelist_ptr_decode(s, p, ptr_addr, slab);
}
#ifndef CONFIG_SLUB_TINY
@@ -424,18 +426,19 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
* get_freepointer_safe() returns initialized memory.
*/
__no_kmsan_checks
-static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+static inline void *get_freepointer_safe(struct kmem_cache *s, void *object,
+ struct slab *slab)
{
unsigned long freepointer_addr;
freeptr_t p;
if (!debug_pagealloc_enabled_static())
- return get_freepointer(s, object);
+ return get_freepointer(s, object, slab);
object = kasan_reset_tag(object);
freepointer_addr = (unsigned long)object + s->offset;
copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
- return freelist_ptr_decode(s, p, freepointer_addr);
+ return freelist_ptr_decode(s, p, freepointer_addr, slab);
}
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
@@ -627,7 +630,7 @@ static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
bitmap_zero(obj_map, slab->objects);
- for (p = slab->freelist; p; p = get_freepointer(s, p))
+ for (p = slab->freelist; p; p = get_freepointer(s, p, slab))
set_bit(__obj_to_index(s, addr, p), obj_map);
}
@@ -937,7 +940,7 @@ static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
print_slab_info(slab);
pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
- p, p - addr, get_freepointer(s, p));
+ p, p - addr, get_freepointer(s, p, slab));
if (s->flags & SLAB_RED_ZONE)
print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
@@ -1230,7 +1233,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
return 1;
/* Check free pointer validity */
- if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
+ if (!check_valid_pointer(s, slab, get_freepointer(s, p, slab))) {
object_err(s, slab, p, "Freepointer corrupt");
/*
* No choice but to zap it and thus lose the remainder
@@ -1298,7 +1301,7 @@ static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
break;
}
object = fp;
- fp = get_freepointer(s, object);
+ fp = get_freepointer(s, object, slab);
nr++;
}
@@ -1810,7 +1813,7 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
object = next;
/* Single objects don't actually contain a freepointer */
if (object != old_tail)
- next = get_freepointer(s, object);
+ next = get_freepointer(s, object, virt_to_slab(object));
/* If object's reuse doesn't have to be delayed */
if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
@@ -2161,7 +2164,7 @@ static void *alloc_single_from_partial(struct kmem_cache *s,
lockdep_assert_held(&n->list_lock);
object = slab->freelist;
- slab->freelist = get_freepointer(s, object);
+ slab->freelist = get_freepointer(s, object, slab);
slab->inuse++;
if (!alloc_debug_processing(s, slab, object, orig_size)) {
@@ -2192,7 +2195,7 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s,
object = slab->freelist;
- slab->freelist = get_freepointer(s, object);
+ slab->freelist = get_freepointer(s, object, slab);
slab->inuse = 1;
if (!alloc_debug_processing(s, slab, object, orig_size))
@@ -2517,7 +2520,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
freelist_tail = NULL;
freelist_iter = freelist;
while (freelist_iter) {
- nextfree = get_freepointer(s, freelist_iter);
+ nextfree = get_freepointer(s, freelist_iter, slab);
/*
* If 'nextfree' is invalid, it is possible that the object at
@@ -2944,7 +2947,7 @@ static inline bool free_debug_processing(struct kmem_cache *s,
/* Reached end of constructed freelist yet? */
if (object != tail) {
- object = get_freepointer(s, object);
+ object = get_freepointer(s, object, slab);
goto next_object;
}
checks_ok = true;
@@ -3173,7 +3176,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* That slab must be frozen for per cpu allocations to work.
*/
VM_BUG_ON(!c->slab->frozen);
- c->freelist = get_freepointer(s, freelist);
+ c->freelist = get_freepointer(s, freelist, c->slab);
c->tid = next_tid(c->tid);
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
return freelist;
@@ -3275,7 +3278,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
* For !pfmemalloc_match() case we don't load freelist so that
* we don't make further mismatched allocations easier.
*/
- deactivate_slab(s, slab, get_freepointer(s, freelist));
+ deactivate_slab(s, slab, get_freepointer(s, freelist, slab));
return freelist;
}
@@ -3377,7 +3380,7 @@ static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
unlikely(!object || !slab || !node_match(slab, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
} else {
- void *next_object = get_freepointer_safe(s, object);
+ void *next_object = get_freepointer_safe(s, object, slab);
/*
* The cmpxchg will only match if there was no additional
@@ -3984,7 +3987,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
continue; /* goto for-loop */
}
- c->freelist = get_freepointer(s, object);
+ c->freelist = get_freepointer(s, object, c->slab);
p[i] = object;
maybe_wipe_obj_freeptr(s, p[i]);
}
@@ -4275,7 +4278,7 @@ static void early_kmem_cache_node_alloc(int node)
init_tracking(kmem_cache_node, n);
#endif
n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
- slab->freelist = get_freepointer(kmem_cache_node, n);
+ slab->freelist = get_freepointer(kmem_cache_node, n, slab);
slab->inuse = 1;
kmem_cache_node->node[node] = n;
init_kmem_cache_node(n);
--
2.42.0.459.ge4e396fd5e-goog
next prev parent reply other threads:[~2023-09-15 10:59 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-15 10:59 [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 01/14] mm/slub: don't try to dereference invalid freepointers Matteo Rizzo
2023-09-15 20:50 ` Kees Cook
2023-09-30 11:04 ` Hyeonggon Yoo
2023-09-15 10:59 ` [RFC PATCH 02/14] mm/slub: add is_slab_addr/is_slab_page helpers Matteo Rizzo
2023-09-15 20:55 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 03/14] mm/slub: move kmem_cache_order_objects to slab.h Matteo Rizzo
2023-09-15 20:56 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 04/14] mm: use virt_to_slab instead of folio_slab Matteo Rizzo
2023-09-15 20:59 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 05/14] mm/slub: create folio_set/clear_slab helpers Matteo Rizzo
2023-09-15 21:02 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 06/14] mm/slub: pass additional args to alloc_slab_page Matteo Rizzo
2023-09-15 21:03 ` Kees Cook
2023-09-15 10:59 ` Matteo Rizzo [this message]
2023-09-15 21:06 ` [RFC PATCH 07/14] mm/slub: pass slab pointer to the freeptr decode helper Kees Cook
2023-09-15 10:59 ` [RFC PATCH 08/14] security: introduce CONFIG_SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:07 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 09/14] mm/slub: add the slab freelists to kmem_cache Matteo Rizzo
2023-09-15 21:08 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 10/14] x86: Create virtual memory region for SLUB Matteo Rizzo
2023-09-15 21:13 ` Kees Cook
2023-09-15 21:49 ` Dave Hansen
2023-09-18 8:54 ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 11/14] mm/slub: allocate slabs from virtual memory Matteo Rizzo
2023-09-15 21:22 ` Kees Cook
2023-09-15 21:57 ` Dave Hansen
2023-10-11 9:17 ` Matteo Rizzo
2023-09-15 10:59 ` [RFC PATCH 12/14] mm/slub: introduce the deallocated_pages sysfs attribute Matteo Rizzo
2023-09-15 21:23 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 13/14] mm/slub: sanity-check freepointers Matteo Rizzo
2023-09-15 21:26 ` Kees Cook
2023-09-15 10:59 ` [RFC PATCH 14/14] security: add documentation for SLAB_VIRTUAL Matteo Rizzo
2023-09-15 21:34 ` Kees Cook
2023-09-20 9:04 ` Vlastimil Babka
2023-09-15 15:19 ` [RFC PATCH 00/14] Prevent cross-cache attacks in the SLUB allocator Dave Hansen
2023-09-15 16:30 ` Lameter, Christopher
2023-09-18 12:08 ` Matteo Rizzo
2023-09-18 17:39 ` Ingo Molnar
2023-09-18 18:05 ` Linus Torvalds
2023-09-19 15:48 ` Matteo Rizzo
2023-09-19 16:02 ` Dave Hansen
2023-09-19 17:56 ` Kees Cook
2023-09-19 18:49 ` Linus Torvalds
2023-09-19 13:42 ` Matteo Rizzo
2023-09-19 15:56 ` Dave Hansen
2023-09-20 7:44 ` Ingo Molnar
2023-09-20 8:49 ` Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230915105933.495735-8-matteorizzo@google.com \
--to=matteorizzo@google.com \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=cl@linux.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=evn@google.com \
--cc=hpa@zytor.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=jannh@google.com \
--cc=jordyzomer@google.com \
--cc=keescook@chromium.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=penberg@kernel.org \
--cc=peterz@infradead.org \
--cc=poprdi@google.com \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox