linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>,
	Christoph Lameter <cl@linux.com>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Pekka Enberg <penberg@kernel.org>,
	linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Roman Gushchin <guro@fb.com>,
	patches@lists.linux.dev
Subject: Re: [PATCH v4 09/32] mm: Convert check_heap_object() to use struct slab
Date: Thu, 6 Jan 2022 13:56:02 +0000	[thread overview]
Message-ID: <Ydb08tDjjrQObNPi@ip-172-31-30-232.ap-northeast-1.compute.internal> (raw)
In-Reply-To: <20220104001046.12263-10-vbabka@suse.cz>

On Tue, Jan 04, 2022 at 01:10:23AM +0100, Vlastimil Babka wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Ensure that we're not seeing a tail page inside __check_heap_object() by
> converting to a slab instead of a page.  Take the opportunity to mark
> the slab as const since we're not modifying it.  Also move the
> declaration of __check_heap_object() to mm/slab.h so it's not available
> to the wider kernel.
> 
> [ vbabka@suse.cz: in check_heap_object() only convert to struct slab for
>   actual PageSlab pages; use folio as intermediate step instead of page ]
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: Roman Gushchin <guro@fb.com>
> ---
>  include/linux/slab.h |  8 --------
>  mm/slab.c            | 14 +++++++-------
>  mm/slab.h            | 11 +++++++++++
>  mm/slub.c            | 10 +++++-----
>  mm/usercopy.c        | 13 +++++++------
>  5 files changed, 30 insertions(+), 26 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index 181045148b06..367366f1d1ff 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -189,14 +189,6 @@ bool kmem_valid_obj(void *object);
>  void kmem_dump_obj(void *object);
>  #endif
>  
> -#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
> -void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> -			bool to_user);
> -#else
> -static inline void __check_heap_object(const void *ptr, unsigned long n,
> -				       struct page *page, bool to_user) { }
> -#endif
> -
>  /*
>   * Some archs want to perform DMA into kmalloc caches and need a guaranteed
>   * alignment larger than the alignment of a 64-bit integer.
> diff --git a/mm/slab.c b/mm/slab.c
> index 44bc1fcd1393..38fcd3f496df 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -372,8 +372,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
>  static int slab_max_order = SLAB_MAX_ORDER_LO;
>  static bool slab_max_order_set __initdata;
>  
> -static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
> -				 unsigned int idx)
> +static inline void *index_to_obj(struct kmem_cache *cache,
> +				 const struct page *page, unsigned int idx)
>  {
>  	return page->s_mem + cache->size * idx;
>  }
> @@ -4166,8 +4166,8 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
>   * Returns NULL if check passes, otherwise const char * to name of cache
>   * to indicate an error.
>   */
> -void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> -			 bool to_user)
> +void __check_heap_object(const void *ptr, unsigned long n,
> +			 const struct slab *slab, bool to_user)
>  {
>  	struct kmem_cache *cachep;
>  	unsigned int objnr;
> @@ -4176,15 +4176,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
>  	ptr = kasan_reset_tag(ptr);
>  
>  	/* Find and validate object. */
> -	cachep = page->slab_cache;
> -	objnr = obj_to_index(cachep, page, (void *)ptr);
> +	cachep = slab->slab_cache;
> +	objnr = obj_to_index(cachep, slab_page(slab), (void *)ptr);
>  	BUG_ON(objnr >= cachep->num);
>  
>  	/* Find offset within object. */
>  	if (is_kfence_address(ptr))
>  		offset = ptr - kfence_object_start(ptr);
>  	else
> -		offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
> +		offset = ptr - index_to_obj(cachep, slab_page(slab), objnr) - obj_offset(cachep);
>  
>  	/* Allow address range falling entirely within usercopy region. */
>  	if (offset >= cachep->useroffset &&
> diff --git a/mm/slab.h b/mm/slab.h
> index 9ae9f6c3d1cb..039babfde2fe 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -812,4 +812,15 @@ struct kmem_obj_info {
>  void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
>  #endif
>  
> +#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
> +void __check_heap_object(const void *ptr, unsigned long n,
> +			 const struct slab *slab, bool to_user);
> +#else
> +static inline
> +void __check_heap_object(const void *ptr, unsigned long n,
> +			 const struct slab *slab, bool to_user)
> +{
> +}
> +#endif
> +
>  #endif /* MM_SLAB_H */
> diff --git a/mm/slub.c b/mm/slub.c
> index 8e9667815f81..8b82188849ae 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4485,8 +4485,8 @@ EXPORT_SYMBOL(__kmalloc_node);
>   * Returns NULL if check passes, otherwise const char * to name of cache
>   * to indicate an error.
>   */
> -void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
> -			 bool to_user)
> +void __check_heap_object(const void *ptr, unsigned long n,
> +			 const struct slab *slab, bool to_user)
>  {
>  	struct kmem_cache *s;
>  	unsigned int offset;
> @@ -4495,10 +4495,10 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
>  	ptr = kasan_reset_tag(ptr);
>  
>  	/* Find object and usable object size. */
> -	s = page->slab_cache;
> +	s = slab->slab_cache;
>  
>  	/* Reject impossible pointers. */
> -	if (ptr < page_address(page))
> +	if (ptr < slab_address(slab))
>  		usercopy_abort("SLUB object not in SLUB page?!", NULL,
>  			       to_user, 0, n);
>  
> @@ -4506,7 +4506,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
>  	if (is_kfence)
>  		offset = ptr - kfence_object_start(ptr);
>  	else
> -		offset = (ptr - page_address(page)) % s->size;
> +		offset = (ptr - slab_address(slab)) % s->size;
>  
>  	/* Adjust for redzone and reject if within the redzone. */
>  	if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
> diff --git a/mm/usercopy.c b/mm/usercopy.c
> index b3de3c4eefba..d0d268135d96 100644
> --- a/mm/usercopy.c
> +++ b/mm/usercopy.c
> @@ -20,6 +20,7 @@
>  #include <linux/atomic.h>
>  #include <linux/jump_label.h>
>  #include <asm/sections.h>
> +#include "slab.h"
>  
>  /*
>   * Checks if a given pointer and length is contained by the current
> @@ -223,7 +224,7 @@ static inline void check_page_span(const void *ptr, unsigned long n,
>  static inline void check_heap_object(const void *ptr, unsigned long n,
>  				     bool to_user)
>  {
> -	struct page *page;
> +	struct folio *folio;
>  
>  	if (!virt_addr_valid(ptr))
>  		return;
> @@ -231,16 +232,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
>  	/*
>  	 * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
>  	 * highmem page or fallback to virt_to_page(). The following
> -	 * is effectively a highmem-aware virt_to_head_page().
> +	 * is effectively a highmem-aware virt_to_slab().
>  	 */
> -	page = compound_head(kmap_to_page((void *)ptr));
> +	folio = page_folio(kmap_to_page((void *)ptr));
>  
> -	if (PageSlab(page)) {
> +	if (folio_test_slab(folio)) {
>  		/* Check slab allocator for flags and size. */
> -		__check_heap_object(ptr, n, page, to_user);
> +		__check_heap_object(ptr, n, folio_slab(folio), to_user);
>  	} else {
>  		/* Verify object does not incorrectly span multiple pages. */
> -		check_page_span(ptr, n, page, to_user);
> +		check_page_span(ptr, n, folio_page(folio, 0), to_user);
>  	}
>  }
>

Looks good,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks!
> -- 
> 2.34.1
> 


  reply	other threads:[~2022-01-06 13:56 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-04  0:10 [PATCH v4 00/32] Separate struct slab from struct page Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 01/32] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 02/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2022-01-06  6:40   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 03/32] mm/slub: Make object_err() static Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 04/32] mm: Split slab into its own type Vlastimil Babka
2022-01-06 11:54   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 05/32] mm: Convert [un]account_slab_page() to struct slab Vlastimil Babka
2022-01-06 13:04   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 06/32] mm: Convert virt_to_cache() to use " Vlastimil Babka
2022-01-06  6:44   ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 07/32] mm: Convert __ksize() to " Vlastimil Babka
2022-01-06 13:42   ` Hyeonggon Yoo
2022-01-06 17:26     ` Vlastimil Babka
2022-01-08  6:21       ` Hyeonggon Yoo
2022-01-04  0:10 ` [PATCH v4 08/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 09/32] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2022-01-06 13:56   ` Hyeonggon Yoo [this message]
2022-01-04  0:10 ` [PATCH v4 10/32] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2022-01-05  0:58   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 11/32] mm/slub: Convert kfree() " Vlastimil Babka
2022-01-05  1:00   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 12/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 13/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 14/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 15/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 16/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 17/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 18/32] mm/slub: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 19/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 20/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-05  1:52   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 21/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-05  2:05   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 22/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2022-01-05  2:12   ` Roman Gushchin
2022-01-05 16:39     ` Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 23/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2022-01-05  2:41   ` Roman Gushchin
2022-01-05 17:08     ` Vlastimil Babka
2022-01-06  3:36       ` Roman Gushchin
2022-01-05  2:55   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 24/32] mm/slob: Convert SLOB to use struct slab and struct folio Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 25/32] mm/kasan: Convert to struct folio and struct slab Vlastimil Babka
2022-01-06  4:06   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 26/32] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 27/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2022-01-06  4:12   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 28/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2022-01-06  4:13   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 29/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2022-01-06  4:16   ` Roman Gushchin
2022-01-04  0:10 ` [PATCH v4 30/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 31/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2022-01-04  0:10 ` [PATCH v4 32/32] mm/slob: Remove unnecessary page_mapcount_reset() function call Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Ydb08tDjjrQObNPi@ip-172-31-30-232.ap-northeast-1.compute.internal \
    --to=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=guro@fb.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-mm@kvack.org \
    --cc=patches@lists.linux.dev \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox