linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrey Ryabinin <aryabinin@virtuozzo.com>
To: Alexander Potapenko <glider@google.com>,
	adech.fo@gmail.com, cl@linux.com, dvyukov@google.com,
	akpm@linux-foundation.org, rostedt@goodmis.org,
	iamjoonsoo.kim@lge.com, js1304@gmail.com, kcc@google.com,
	kuthonuzo.luruo@hpe.com
Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH] mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB
Date: Thu, 9 Jun 2016 19:45:16 +0300	[thread overview]
Message-ID: <57599D1C.2080701@virtuozzo.com> (raw)
In-Reply-To: <1465411243-102618-1-git-send-email-glider@google.com>



On 06/08/2016 09:40 PM, Alexander Potapenko wrote:
> For KASAN builds:
>  - switch SLUB allocator to using stackdepot instead of storing the
>    allocation/deallocation stacks in the objects;
>  - define SLAB_RED_ZONE, SLAB_POISON, SLAB_STORE_USER to zero,
>    effectively disabling these debug features, as they're redundant in
>    the presence of KASAN;

Instead of having duplicated functionality, I think it might be better to switch SLAB_STORE_USER to stackdepot instead.
Because now, we have two piles of code which do basically the same thing, but
do differently.

>  - refactor the slab freelist hook, put freed memory into the quarantine.
> 

What you did with slab_freelist_hook() is not refactoring, it's an obfuscation.


>  }
>  
> -#ifdef CONFIG_SLAB
>  /*
>   * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
>   * For larger allocations larger redzones are used.
> @@ -372,17 +371,21 @@ static size_t optimal_redzone(size_t object_size)
>  void kasan_cache_create(struct kmem_cache *cache, size_t *size,
>  			unsigned long *flags)
>  {
> -	int redzone_adjust;
> -	/* Make sure the adjusted size is still less than
> -	 * KMALLOC_MAX_CACHE_SIZE.
> -	 * TODO: this check is only useful for SLAB, but not SLUB. We'll need
> -	 * to skip it for SLUB when it starts using kasan_cache_create().
> +	int redzone_adjust, orig_size = *size;
> +
> +#ifdef CONFIG_SLAB
> +	/*
> +	 * Make sure the adjusted size is still less than
> +	 * KMALLOC_MAX_CACHE_SIZE, i.e. we don't use the page allocator.
>  	 */
> +
>  	if (*size > KMALLOC_MAX_CACHE_SIZE -

This is wrong. You probably wanted KMALLOC_MAX_SIZE here.

However, we should get rid of SLAB_KASAN altogether. It's absolutely useless, and only complicates
the code. And if we don't fit in KMALLOC_MAX_SIZE, just don't create cache.

>  	    sizeof(struct kasan_alloc_meta) -
>  	    sizeof(struct kasan_free_meta))
>  		return;
> +#endif
>  	*flags |= SLAB_KASAN;
> +
>  	/* Add alloc meta. */
>  	cache->kasan_info.alloc_meta_offset = *size;
>  	*size += sizeof(struct kasan_alloc_meta);
> @@ -392,17 +395,37 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
>  	    cache->object_size < sizeof(struct kasan_free_meta)) {
>  		cache->kasan_info.free_meta_offset = *size;
>  		*size += sizeof(struct kasan_free_meta);
> +	} else {
> +		cache->kasan_info.free_meta_offset = 0;
>  	}
>  	redzone_adjust = optimal_redzone(cache->object_size) -
>  		(*size - cache->object_size);
> +
>  	if (redzone_adjust > 0)
>  		*size += redzone_adjust;
> +
> +#ifdef CONFIG_SLAB
>  	*size = min(KMALLOC_MAX_CACHE_SIZE,
>  		    max(*size,
>  			cache->object_size +
>  			optimal_redzone(cache->object_size)));
> -}
> +	/*
> +	 * If the metadata doesn't fit, disable KASAN at all.
> +	 */
> +	if (*size <= cache->kasan_info.alloc_meta_offset ||
> +			*size <= cache->kasan_info.free_meta_offset) {
> +		*flags &= ~SLAB_KASAN;
> +		*size = orig_size;
> +		cache->kasan_info.alloc_meta_offset = -1;
> +		cache->kasan_info.free_meta_offset = -1;
> +	}
> +#else
> +	*size = max(*size,
> +			cache->object_size +
> +			optimal_redzone(cache->object_size));
> +
>  #endif
> +}
>  
>  void kasan_cache_shrink(struct kmem_cache *cache)
>  {
> @@ -431,16 +454,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
>  	kasan_poison_shadow(object,
>  			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
>  			KASAN_KMALLOC_REDZONE);
> -#ifdef CONFIG_SLAB
>  	if (cache->flags & SLAB_KASAN) {
>  		struct kasan_alloc_meta *alloc_info =
>  			get_alloc_info(cache, object);
> -		alloc_info->state = KASAN_STATE_INIT;
> +		if (alloc_info)

If I read the code right alloc_info can be NULL only if SLAB_KASAN is set.


> +			alloc_info->state = KASAN_STATE_INIT;
>  	}
> -#endif
>  }
>  
> -#ifdef CONFIG_SLAB
>  static inline int in_irqentry_text(unsigned long ptr)
>  {
>  	return (ptr >= (unsigned long)&__irqentry_text_start &&
> @@ -492,6 +513,8 @@ struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
>  					const void *object)
>  {
>  	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
> +	if (cache->kasan_info.alloc_meta_offset == -1)
> +		return NULL;

What's the point of this ? This should be always false.

>  	return (void *)object + cache->kasan_info.alloc_meta_offset;
>  }
>  
> @@ -499,9 +522,10 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
>  				      const void *object)
>  {
>  	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
> +	if (cache->kasan_info.free_meta_offset == -1)
> +		return NULL;
>  	return (void *)object + cache->kasan_info.free_meta_offset;
>  }
> -#endif
>  
>  void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
>  {
> @@ -522,7 +546,6 @@ void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
>  
>  bool kasan_slab_free(struct kmem_cache *cache, void *object)
>  {
> -#ifdef CONFIG_SLAB
>  	/* RCU slabs could be legally used after free within the RCU period */
>  	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
>  		return false;
> @@ -532,7 +555,10 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
>  			get_alloc_info(cache, object);
>  		struct kasan_free_meta *free_info =
>  			get_free_info(cache, object);
> -
> +		WARN_ON(!alloc_info);
> +		WARN_ON(!free_info);
> +		if (!alloc_info || !free_info)
> +			return;

Again, never possible.


>  		switch (alloc_info->state) {
>  		case KASAN_STATE_ALLOC:
>  			alloc_info->state = KASAN_STATE_QUARANTINE;
> @@ -550,10 +576,6 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object)
>  		}
>  	}
>  	return false;
> -#else
> -	kasan_poison_slab_free(cache, object);
> -	return false;
> -#endif
>  }
>  
>  void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
> @@ -568,24 +590,29 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
>  	if (unlikely(object == NULL))
>  		return;
>  
> +	if (!(cache->flags & SLAB_KASAN))
> +		return;
> +
>  	redzone_start = round_up((unsigned long)(object + size),
>  				KASAN_SHADOW_SCALE_SIZE);
>  	redzone_end = round_up((unsigned long)object + cache->object_size,
>  				KASAN_SHADOW_SCALE_SIZE);
>  
>  	kasan_unpoison_shadow(object, size);
> +	WARN_ON(redzone_start > redzone_end);
> +	if (redzone_start > redzone_end)

How that's can happen? 

> +		return;
>  	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
>  		KASAN_KMALLOC_REDZONE);
> -#ifdef CONFIG_SLAB
>  	if (cache->flags & SLAB_KASAN) {
>  		struct kasan_alloc_meta *alloc_info =
>  			get_alloc_info(cache, object);
> -
> -		alloc_info->state = KASAN_STATE_ALLOC;
> -		alloc_info->alloc_size = size;
> -		set_track(&alloc_info->track, flags);
> +		if (alloc_info) {

And again...


> +			alloc_info->state = KASAN_STATE_ALLOC;
> +			alloc_info->alloc_size = size;
> +			set_track(&alloc_info->track, flags);
> +		}
>  	}
> -#endif
>  }
>  EXPORT_SYMBOL(kasan_kmalloc);
>  


[..]

> diff --git a/mm/slab.h b/mm/slab.h
> index dedb1a9..fde1fea 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -366,6 +366,10 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
>  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
>  		return s->object_size;
>  # endif
> +# ifdef CONFIG_KASAN

Gush, you love ifdefs, don't you? Hint: it's redundant here.

> +	if (s->flags & SLAB_KASAN)
> +		return s->object_size;
> +# endif
>  	/*
>  	 * If we have the need to store the freelist pointer
...

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-06-09 16:44 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-08 18:40 Alexander Potapenko
2016-06-08 18:08 ` kbuild test robot
2016-06-08 19:02 ` kbuild test robot
2016-06-08 19:23 ` kbuild test robot
2016-06-09 16:45 ` Andrey Ryabinin [this message]
2016-06-09 18:22   ` Alexander Potapenko
2016-06-15 13:37     ` Alexander Potapenko
2016-06-15 15:29     ` Alexander Potapenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=57599D1C.2080701@virtuozzo.com \
    --to=aryabinin@virtuozzo.com \
    --cc=adech.fo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=js1304@gmail.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kcc@google.com \
    --cc=kuthonuzo.luruo@hpe.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox