linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vlastimil Babka <vbabka@suse.cz>
To: Matthew Wilcox <willy@infradead.org>,
	linux-mm@kvack.org, Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Pekka Enberg <penberg@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Subject: [RFC PATCH 08/32] mm: Convert check_heap_object() to use struct slab
Date: Tue, 16 Nov 2021 01:16:04 +0100	[thread overview]
Message-ID: <20211116001628.24216-9-vbabka@suse.cz> (raw)
In-Reply-To: <20211116001628.24216-1-vbabka@suse.cz>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Ensure that we're not seeing a tail page inside __check_heap_object()
by converting to a slab instead of a page.  Take the opportunity to
mark the slab as const since we're not modifying it.  Also move the
declaration of __check_heap_object() to mm/slab.h so it's not
available to the wider kernel.

[ vbabka@suse.cz: in check_heap_object() only convert to struct slab for
  actual PageSlab pages; use folio as intermediate step instead of page ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 include/linux/slab.h |  8 --------
 mm/slab.c            | 14 +++++++-------
 mm/slab.h            |  9 +++++++++
 mm/slub.c            | 10 +++++-----
 mm/usercopy.c        | 13 +++++++------
 5 files changed, 28 insertions(+), 26 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 181045148b06..367366f1d1ff 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -189,14 +189,6 @@ bool kmem_valid_obj(void *object);
 void kmem_dump_obj(void *object);
 #endif
 
-#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
-			bool to_user);
-#else
-static inline void __check_heap_object(const void *ptr, unsigned long n,
-				       struct page *page, bool to_user) { }
-#endif
-
 /*
  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  * alignment larger than the alignment of a 64-bit integer.
diff --git a/mm/slab.c b/mm/slab.c
index acdede1e6528..2a62fb1612ba 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -372,8 +372,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 static int slab_max_order = SLAB_MAX_ORDER_LO;
 static bool slab_max_order_set __initdata;
 
-static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
-				 unsigned int idx)
+static inline void *index_to_obj(struct kmem_cache *cache,
+				 const struct page *page, unsigned int idx)
 {
 	return page->s_mem + cache->size * idx;
 }
@@ -4167,8 +4167,8 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
-			 bool to_user)
+void __check_heap_object(const void *ptr, unsigned long n,
+			 const struct slab *slab, bool to_user)
 {
 	struct kmem_cache *cachep;
 	unsigned int objnr;
@@ -4177,15 +4177,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 	ptr = kasan_reset_tag(ptr);
 
 	/* Find and validate object. */
-	cachep = page->slab_cache;
-	objnr = obj_to_index(cachep, page, (void *)ptr);
+	cachep = slab->slab_cache;
+	objnr = obj_to_index(cachep, slab_page(slab), (void *)ptr);
 	BUG_ON(objnr >= cachep->num);
 
 	/* Find offset within object. */
 	if (is_kfence_address(ptr))
 		offset = ptr - kfence_object_start(ptr);
 	else
-		offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+		offset = ptr - index_to_obj(cachep, slab_page(slab), objnr) - obj_offset(cachep);
 
 	/* Allow address range falling entirely within usercopy region. */
 	if (offset >= cachep->useroffset &&
diff --git a/mm/slab.h b/mm/slab.h
index c7ab033a1c2f..d6c993894c02 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -812,4 +812,13 @@ struct kmem_obj_info {
 void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
 #endif
 
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+void __check_heap_object(const void *ptr, unsigned long n,
+			 const struct slab *slab, bool to_user);
+#else
+static inline
+void __check_heap_object(const void *ptr, unsigned long n,
+			 const struct slab *slab, bool to_user) { }
+#endif
+
 #endif /* MM_SLAB_H */
diff --git a/mm/slub.c b/mm/slub.c
index ffcc75020225..dd9331e08635 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4484,8 +4484,8 @@ EXPORT_SYMBOL(__kmalloc_node);
  * Returns NULL if check passes, otherwise const char * to name of cache
  * to indicate an error.
  */
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
-			 bool to_user)
+void __check_heap_object(const void *ptr, unsigned long n,
+			 const struct slab *slab, bool to_user)
 {
 	struct kmem_cache *s;
 	unsigned int offset;
@@ -4494,10 +4494,10 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 	ptr = kasan_reset_tag(ptr);
 
 	/* Find object and usable object size. */
-	s = page->slab_cache;
+	s = slab->slab_cache;
 
 	/* Reject impossible pointers. */
-	if (ptr < page_address(page))
+	if (ptr < slab_address(slab))
 		usercopy_abort("SLUB object not in SLUB page?!", NULL,
 			       to_user, 0, n);
 
@@ -4505,7 +4505,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 	if (is_kfence)
 		offset = ptr - kfence_object_start(ptr);
 	else
-		offset = (ptr - page_address(page)) % s->size;
+		offset = (ptr - slab_address(slab)) % s->size;
 
 	/* Adjust for redzone and reject if within the redzone. */
 	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
diff --git a/mm/usercopy.c b/mm/usercopy.c
index b3de3c4eefba..d0d268135d96 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -20,6 +20,7 @@
 #include <linux/atomic.h>
 #include <linux/jump_label.h>
 #include <asm/sections.h>
+#include "slab.h"
 
 /*
  * Checks if a given pointer and length is contained by the current
@@ -223,7 +224,7 @@ static inline void check_page_span(const void *ptr, unsigned long n,
 static inline void check_heap_object(const void *ptr, unsigned long n,
 				     bool to_user)
 {
-	struct page *page;
+	struct folio *folio;
 
 	if (!virt_addr_valid(ptr))
 		return;
@@ -231,16 +232,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
 	/*
 	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
 	 * highmem page or fallback to virt_to_page(). The following
-	 * is effectively a highmem-aware virt_to_head_page().
+	 * is effectively a highmem-aware virt_to_slab().
 	 */
-	page = compound_head(kmap_to_page((void *)ptr));
+	folio = page_folio(kmap_to_page((void *)ptr));
 
-	if (PageSlab(page)) {
+	if (folio_test_slab(folio)) {
 		/* Check slab allocator for flags and size. */
-		__check_heap_object(ptr, n, page, to_user);
+		__check_heap_object(ptr, n, folio_slab(folio), to_user);
 	} else {
 		/* Verify object does not incorrectly span multiple pages. */
-		check_page_span(ptr, n, page, to_user);
+		check_page_span(ptr, n, folio_page(folio, 0), to_user);
 	}
 }
 
-- 
2.33.1



  parent reply	other threads:[~2021-11-16  0:17 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-16  0:15 [RFC PATCH 00/32] Separate struct slab from struct page Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 01/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 02/32] mm/slub: Make object_err() static Vlastimil Babka
2021-11-16  0:15 ` [RFC PATCH 03/32] mm: Split slab into its own type Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 04/32] mm: Add account_slab() and unaccount_slab() Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 05/32] mm: Convert virt_to_cache() to use struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 06/32] mm: Convert __ksize() to " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 07/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2021-11-16  0:16 ` Vlastimil Babka [this message]
2021-11-16  0:16 ` [RFC PATCH 09/32] mm/slub: Convert detached_freelist to use a struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 10/32] mm/slub: Convert kfree() " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 11/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 12/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 13/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 14/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 15/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 16/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 17/32] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 18/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 19/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 20/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 21/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2021-11-16 14:02   ` Andrey Konovalov
2021-11-16 16:32     ` Vlastimil Babka
2021-11-16 23:04       ` Andrey Konovalov
2021-11-16 23:37         ` Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 22/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 23/32] mm/slob: Convert SLOB to use " Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 24/32] mm/kasan: Convert to " Vlastimil Babka
2021-11-16 13:58   ` Andrey Konovalov
2021-11-16 18:17   ` Matthew Wilcox
2021-11-16  0:16 ` [RFC PATCH 25/32] mm/kfence: Convert kfence_guarded_alloc() " Vlastimil Babka
2021-11-17  7:00   ` Marco Elver
2021-11-16  0:16 ` [RFC PATCH 26/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 27/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 28/32] iommu: Use put_pages_list Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 29/32] mm: Remove slab from struct page Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 30/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2021-11-17  7:00   ` Marco Elver
2021-11-16  0:16 ` [RFC PATCH 31/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2021-11-16  0:16 ` [RFC PATCH 32/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211116001628.24216-9-vbabka@suse.cz \
    --to=vbabka@suse.cz \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox