linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dmitry Vyukov <dvyukov@google.com>
To: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
	Konstantin Serebryany <kcc@google.com>,
	Dmitry Chernenkov <dmitryc@google.com>,
	Andrey Konovalov <adech.fo@gmail.com>,
	Yuri Gribov <tetra2005@gmail.com>,
	Konstantin Khlebnikov <koct9i@gmail.com>,
	Sasha Levin <sasha.levin@oracle.com>,
	Christoph Lameter <cl@linux.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Dave Hansen <dave.hansen@intel.com>,
	Andi Kleen <andi@firstfloor.org>,
	Vegard Nossum <vegard.nossum@gmail.com>,
	"H. Peter Anvin" <hpa@zytor.com>, Dave Jones <davej@redhat.com>,
	x86@kernel.org, linux-mm@kvack.org,
	Pekka Enberg <penberg@kernel.org>,
	David Rientjes <rientjes@google.com>
Subject: Re: [PATCH v3 09/13] mm: slub: add kernel address sanitizer support for slub allocator
Date: Thu, 25 Sep 2014 21:48:16 -0700	[thread overview]
Message-ID: <CACT4Y+a0DMk8vyCcesrsKt7rXVDD2LZsfnGemJAgeRiVbMxxxw@mail.gmail.com> (raw)
In-Reply-To: <1411562649-28231-10-git-send-email-a.ryabinin@samsung.com>

On Wed, Sep 24, 2014 at 5:44 AM, Andrey Ryabinin <a.ryabinin@samsung.com> wrote:
> With this patch kasan will be able to catch bugs in memory allocated
> by slub.
> Initially all objects in newly allocated slab page, marked as free.
> Later, when allocation of slub object happens, requested by caller
> number of bytes marked as accessible, and the rest of the object
> (including slub's metadata) marked as redzone (inaccessible).
>
> We also mark object as accessible if ksize was called for this object.
> There is some places in kernel where ksize function is called to inquire
> size of really allocated area. Such callers could validly access whole
> allocated memory, so it should be marked as accessible.
>
> Code in slub.c and slab_common.c files could validly access to object's
> metadata, so instrumentation for this files are disabled.
>
> Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
> ---
>  include/linux/kasan.h | 24 +++++++++++++
>  include/linux/slab.h  | 11 ++++--
>  lib/Kconfig.kasan     |  1 +
>  mm/Makefile           |  3 ++
>  mm/kasan/kasan.c      | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++
>  mm/kasan/kasan.h      |  5 +++
>  mm/kasan/report.c     | 27 ++++++++++++++
>  mm/slab_common.c      |  5 ++-
>  mm/slub.c             | 36 +++++++++++++++++--
>  9 files changed, 203 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 9714fba..4b866fa 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -32,6 +32,17 @@ void kasan_unpoison_shadow(const void *address, size_t size);
>
>  void kasan_alloc_pages(struct page *page, unsigned int order);
>  void kasan_free_pages(struct page *page, unsigned int order);
> +void kasan_mark_slab_padding(struct kmem_cache *s, void *object);
> +
> +void kasan_kmalloc_large(const void *ptr, size_t size);
> +void kasan_kfree_large(const void *ptr);
> +void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
> +void kasan_krealloc(const void *object, size_t new_size);
> +
> +void kasan_slab_alloc(struct kmem_cache *s, void *object);
> +void kasan_slab_free(struct kmem_cache *s, void *object);
> +
> +void kasan_free_slab_pages(struct page *page, int order);
>
>  #else /* CONFIG_KASAN */
>
> @@ -42,6 +53,19 @@ static inline void kasan_disable_local(void) {}
>
>  static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
>  static inline void kasan_free_pages(struct page *page, unsigned int order) {}
> +static inline void kasan_mark_slab_padding(struct kmem_cache *s,
> +                                       void *object) {}
> +
> +static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
> +static inline void kasan_kfree_large(const void *ptr) {}
> +static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
> +                               size_t size) {}
> +static inline void kasan_krealloc(const void *object, size_t new_size) {}
> +
> +static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
> +static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
> +
> +static inline void kasan_free_slab_pages(struct page *page, int order) {}
>
>  #endif /* CONFIG_KASAN */
>
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index c265bec..5f97037 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -104,6 +104,7 @@
>                                 (unsigned long)ZERO_SIZE_PTR)
>
>  #include <linux/kmemleak.h>
> +#include <linux/kasan.h>
>
>  struct mem_cgroup;
>  /*
> @@ -326,7 +327,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
>  static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
>                 gfp_t flags, size_t size)
>  {
> -       return kmem_cache_alloc(s, flags);
> +       void *ret = kmem_cache_alloc(s, flags);
> +
> +       kasan_kmalloc(s, ret, size);
> +       return ret;
>  }
>
>  static __always_inline void *
> @@ -334,7 +338,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
>                               gfp_t gfpflags,
>                               int node, size_t size)
>  {
> -       return kmem_cache_alloc_node(s, gfpflags, node);
> +       void *ret = kmem_cache_alloc_node(s, gfpflags, node);
> +
> +       kasan_kmalloc(s, ret, size);
> +       return ret;
>  }
>  #endif /* CONFIG_TRACING */
>
> diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
> index b458a00..d16b899 100644
> --- a/lib/Kconfig.kasan
> +++ b/lib/Kconfig.kasan
> @@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
>  config KASAN
>         bool "AddressSanitizer: runtime memory debugger"
>         depends on !MEMORY_HOTPLUG
> +       depends on SLUB_DEBUG


What does SLUB_DEBUG do? I think that generally we don't want any
other *heavy* debug checks to be required for kasan.


>         help
>           Enables address sanitizer - runtime memory debugger,
>           designed to find out-of-bounds accesses and use-after-free bugs.
> diff --git a/mm/Makefile b/mm/Makefile
> index 7a4b87e..c08a70f 100644
> --- a/mm/Makefile
> +++ b/mm/Makefile
> @@ -2,6 +2,9 @@
>  # Makefile for the linux memory manager.
>  #
>
> +KASAN_SANITIZE_slab_common.o := n
> +KASAN_SANITIZE_slub.o := n
> +
>  mmu-y                  := nommu.o
>  mmu-$(CONFIG_MMU)      := gup.o highmem.o madvise.o memory.o mincore.o \
>                            mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
> diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
> index 7cfc1fe..3c1687a 100644
> --- a/mm/kasan/kasan.c
> +++ b/mm/kasan/kasan.c
> @@ -30,6 +30,7 @@
>  #include <linux/kasan.h>
>
>  #include "kasan.h"
> +#include "../slab.h"
>
>  /*
>   * Poisons the shadow memory for 'size' bytes starting from 'addr'.
> @@ -265,6 +266,102 @@ void kasan_free_pages(struct page *page, unsigned int order)
>                                 KASAN_FREE_PAGE);
>  }
>
> +void kasan_free_slab_pages(struct page *page, int order)

Doesn't this callback followed by actually freeing the pages, and so
kasan_free_pages callback that will poison the range? If so, I would
prefer to not double poison.


> +{
> +       kasan_poison_shadow(page_address(page),
> +                       PAGE_SIZE << order, KASAN_SLAB_FREE);
> +}
> +
> +void kasan_mark_slab_padding(struct kmem_cache *s, void *object)
> +{
> +       unsigned long object_end = (unsigned long)object + s->size;
> +       unsigned long padding_end = round_up(object_end, PAGE_SIZE);
> +       unsigned long padding_start = round_up(object_end,
> +                                       KASAN_SHADOW_SCALE_SIZE);
> +       size_t size = padding_end - padding_start;
> +
> +       if (size)
> +               kasan_poison_shadow((void *)padding_start,
> +                               size, KASAN_SLAB_PADDING);
> +}
> +
> +void kasan_slab_alloc(struct kmem_cache *cache, void *object)
> +{
> +       kasan_kmalloc(cache, object, cache->object_size);
> +}
> +
> +void kasan_slab_free(struct kmem_cache *cache, void *object)
> +{
> +       unsigned long size = cache->size;
> +       unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
> +

Add a comment saying that SLAB_DESTROY_BY_RCU objects can be "legally"
used after free.

> +       if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
> +               return;
> +
> +       kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
> +}
> +
> +void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
> +{
> +       unsigned long redzone_start;
> +       unsigned long redzone_end;
> +
> +       if (unlikely(object == NULL))
> +               return;
> +
> +       redzone_start = round_up((unsigned long)(object + size),
> +                               KASAN_SHADOW_SCALE_SIZE);
> +       redzone_end = (unsigned long)object + cache->size;
> +
> +       kasan_unpoison_shadow(object, size);
> +       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> +               KASAN_KMALLOC_REDZONE);
> +
> +}
> +EXPORT_SYMBOL(kasan_kmalloc);
> +
> +void kasan_kmalloc_large(const void *ptr, size_t size)
> +{
> +       struct page *page;
> +       unsigned long redzone_start;
> +       unsigned long redzone_end;
> +
> +       if (unlikely(ptr == NULL))
> +               return;
> +
> +       page = virt_to_page(ptr);
> +       redzone_start = round_up((unsigned long)(ptr + size),
> +                               KASAN_SHADOW_SCALE_SIZE);
> +       redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));

If size == N*PAGE_SIZE - KASAN_SHADOW_SCALE_SIZE - 1, the object does
not receive any redzone at all. Can we pass full memory block size
from above to fix it? Will compound_order(page) do?

> +
> +       kasan_unpoison_shadow(ptr, size);
> +       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> +               KASAN_PAGE_REDZONE);
> +}
> +
> +void kasan_krealloc(const void *object, size_t size)
> +{
> +       struct page *page;
> +
> +       if (unlikely(object == ZERO_SIZE_PTR))
> +               return;
> +
> +       page = virt_to_head_page(object);
> +
> +       if (unlikely(!PageSlab(page)))
> +               kasan_kmalloc_large(object, size);
> +       else
> +               kasan_kmalloc(page->slab_cache, object, size);
> +}
> +
> +void kasan_kfree_large(const void *ptr)
> +{
> +       struct page *page = virt_to_page(ptr);
> +
> +       kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
> +                       KASAN_FREE_PAGE);
> +}
> +
>  void __asan_load1(unsigned long addr)
>  {
>         check_memory_region(addr, 1, false);
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index 5e61799..b3974c7 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -7,6 +7,11 @@
>  #define KASAN_SHADOW_MASK       (KASAN_SHADOW_SCALE_SIZE - 1)
>
>  #define KASAN_FREE_PAGE         0xFF  /* page was freed */
> +#define KASAN_PAGE_REDZONE      0xFE  /* redzone for kmalloc_large allocations */
> +#define KASAN_SLAB_PADDING      0xFD  /* Slab page padding, does not belong to any slub object */
> +#define KASAN_KMALLOC_REDZONE   0xFC  /* redzone inside slub object */
> +#define KASAN_KMALLOC_FREE      0xFB  /* object was freed (kmem_cache_free/kfree) */
> +#define KASAN_SLAB_FREE         0xFA  /* free slab page */
>  #define KASAN_SHADOW_GAP        0xF9  /* address belongs to shadow memory */
>
>  struct access_info {
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index f9d4e8d..c42f6ba 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -24,6 +24,7 @@
>  #include <linux/kasan.h>
>
>  #include "kasan.h"
> +#include "../slab.h"
>
>  /* Shadow layout customization. */
>  #define SHADOW_BYTES_PER_BLOCK 1
> @@ -54,10 +55,15 @@ static void print_error_description(struct access_info *info)
>         shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr);
>
>         switch (shadow_val) {
> +       case KASAN_PAGE_REDZONE:
> +       case KASAN_SLAB_PADDING:
> +       case KASAN_KMALLOC_REDZONE:
>         case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
>                 bug_type = "out of bounds access";
>                 break;
>         case KASAN_FREE_PAGE:
> +       case KASAN_SLAB_FREE:
> +       case KASAN_KMALLOC_FREE:
>                 bug_type = "use after free";
>                 break;
>         case KASAN_SHADOW_GAP:
> @@ -73,12 +79,33 @@ static void print_error_description(struct access_info *info)
>  static void print_address_description(struct access_info *info)
>  {
>         struct page *page;
> +       struct kmem_cache *cache;
>         u8 shadow_val = *(u8 *)kasan_mem_to_shadow(info->first_bad_addr);
>
>         page = virt_to_head_page((void *)info->access_addr);
>
>         switch (shadow_val) {
> +       case KASAN_SLAB_PADDING:
> +               cache = page->slab_cache;
> +               slab_err(cache, page, "access to slab redzone");
> +               dump_stack();
> +               break;
> +       case KASAN_KMALLOC_FREE:
> +       case KASAN_KMALLOC_REDZONE:
> +       case 1 ... KASAN_SHADOW_SCALE_SIZE - 1:
> +               if (PageSlab(page)) {
> +                       void *object;
> +                       void *slab_page = page_address(page);
> +
> +                       cache = page->slab_cache;
> +                       object = virt_to_obj(cache, slab_page,
> +                                       (void *)info->access_addr);
> +                       object_err(cache, page, object, "kasan error");
> +                       break;
> +               }
> +       case KASAN_PAGE_REDZONE:
>         case KASAN_FREE_PAGE:
> +       case KASAN_SLAB_FREE:
>                 dump_page(page, "kasan error");
>                 dump_stack();
>                 break;
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 3a6e0cf..33868b4 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -795,6 +795,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
>         page = alloc_kmem_pages(flags, order);
>         ret = page ? page_address(page) : NULL;
>         kmemleak_alloc(ret, size, 1, flags);
> +       kasan_kmalloc_large(ret, size);
>         return ret;
>  }
>  EXPORT_SYMBOL(kmalloc_order);
> @@ -969,8 +970,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
>         if (p)
>                 ks = ksize(p);
>
> -       if (ks >= new_size)
> +       if (ks >= new_size) {
> +               kasan_krealloc((void *)p, new_size);
>                 return (void *)p;
> +       }
>
>         ret = kmalloc_track_caller(new_size, flags);
>         if (ret && p)
> diff --git a/mm/slub.c b/mm/slub.c
> index 9b1f75c..12ffdd0 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -33,6 +33,7 @@
>  #include <linux/stacktrace.h>
>  #include <linux/prefetch.h>
>  #include <linux/memcontrol.h>
> +#include <linux/kasan.h>
>
>  #include <trace/events/kmem.h>
>
> @@ -469,10 +470,12 @@ static int disable_higher_order_debug;
>
>  static inline void metadata_access_enable(void)
>  {
> +       kasan_disable_local();
>  }
>
>  static inline void metadata_access_disable(void)
>  {
> +       kasan_enable_local();
>  }
>
>  /*
> @@ -1242,11 +1245,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
>  static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
>  {
>         kmemleak_alloc(ptr, size, 1, flags);
> +       kasan_kmalloc_large(ptr, size);
>  }
>
>  static inline void kfree_hook(const void *x)
>  {
>         kmemleak_free(x);
> +       kasan_kfree_large(x);
>  }
>
>  static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
> @@ -1264,11 +1269,13 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
>         flags &= gfp_allowed_mask;
>         kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
>         kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
> +       kasan_slab_alloc(s, object);
>  }
>
>  static inline void slab_free_hook(struct kmem_cache *s, void *x)
>  {
>         kmemleak_free_recursive(x, s->flags);
> +       kasan_slab_free(s, x);
>
>         /*
>          * Trouble is that we may no longer disable interrupts in the fast path
> @@ -1381,8 +1388,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
>                                 void *object)
>  {
>         setup_object_debug(s, page, object);
> -       if (unlikely(s->ctor))
> +       if (unlikely(s->ctor)) {
> +               kasan_slab_alloc(s, object);
>                 s->ctor(object);
> +       }
> +       kasan_slab_free(s, object);
>  }
>
>  static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
> @@ -1416,8 +1426,10 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
>                 setup_object(s, page, p);
>                 if (likely(idx < page->objects))
>                         set_freepointer(s, p, p + s->size);

Sorry, I don't fully follow this code, so I will just ask some questions.
Can we have some slab padding after last object in this case as well?

> -               else
> +               else {
>                         set_freepointer(s, p, NULL);
> +                       kasan_mark_slab_padding(s, p);

kasan_mark_slab_padding poisons only up to end of the page. Can there
be multiple pages that we need to poison?

> +               }
>         }
>
>         page->freelist = start;
> @@ -1442,6 +1454,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
>         }
>
>         kmemcheck_free_shadow(page, compound_order(page));
> +       kasan_free_slab_pages(page, compound_order(page));
>
>         mod_zone_page_state(page_zone(page),
>                 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
> @@ -2488,6 +2501,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
>  {
>         void *ret = slab_alloc(s, gfpflags, _RET_IP_);
>         trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
> +       kasan_kmalloc(s, ret, size);
>         return ret;
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_trace);
> @@ -2514,6 +2528,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
>
>         trace_kmalloc_node(_RET_IP_, ret,
>                            size, s->size, gfpflags, node);
> +
> +       kasan_kmalloc(s, ret, size);
>         return ret;
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
> @@ -2897,6 +2913,7 @@ static void early_kmem_cache_node_alloc(int node)
>         init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
>         init_tracking(kmem_cache_node, n);
>  #endif
> +       kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
>         init_kmem_cache_node(n);
>         inc_slabs_node(kmem_cache_node, node, page->objects);
>
> @@ -3269,6 +3286,8 @@ void *__kmalloc(size_t size, gfp_t flags)
>
>         trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
>
> +       kasan_kmalloc(s, ret, size);
> +
>         return ret;
>  }
>  EXPORT_SYMBOL(__kmalloc);
> @@ -3312,12 +3331,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
>
>         trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
>
> +       kasan_kmalloc(s, ret, size);
> +
>         return ret;
>  }
>  EXPORT_SYMBOL(__kmalloc_node);
>  #endif
>
> -size_t ksize(const void *object)
> +static size_t __ksize(const void *object)
>  {
>         struct page *page;
>
> @@ -3333,6 +3354,15 @@ size_t ksize(const void *object)
>
>         return slab_ksize(page->slab_cache);
>  }
> +
> +size_t ksize(const void *object)
> +{
> +       size_t size = __ksize(object);
> +       /* We assume that ksize callers could use whole allocated area,
> +          so we need unpoison this area. */
> +       kasan_krealloc(object, size);
> +       return size;
> +}
>  EXPORT_SYMBOL(ksize);
>
>  void kfree(const void *x)
> --
> 2.1.1
>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2014-09-26  4:48 UTC|newest]

Thread overview: 380+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-07-09 11:29 [RFC/PATCH RESEND -next 00/21] Address sanitizer for kernel (kasan) - dynamic memory error detector Andrey Ryabinin
2014-07-09 11:29 ` [RFC/PATCH RESEND -next 01/21] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-07-09 14:26   ` Christoph Lameter
2014-07-10  7:31     ` Andrey Ryabinin
2014-07-09 19:29   ` Andi Kleen
2014-07-09 20:40     ` Yuri Gribov
2014-07-10 12:10     ` Andrey Ryabinin
2014-07-09 20:26   ` Dave Hansen
2014-07-10 12:12     ` Andrey Ryabinin
2014-07-10 15:55       ` Dave Hansen
2014-07-10 19:48         ` Andrey Ryabinin
2014-07-10 20:04           ` Dave Hansen
2014-07-09 20:37   ` Dave Hansen
2014-07-09 20:38   ` Dave Hansen
2014-07-10 11:55   ` Sasha Levin
2014-07-10 13:01     ` Andrey Ryabinin
2014-07-10 13:31       ` Sasha Levin
2014-07-10 13:39         ` Andrey Ryabinin
2014-07-10 14:02           ` Sasha Levin
2014-07-10 19:04             ` Andrey Ryabinin
2014-07-10 13:50         ` Andrey Ryabinin
2014-07-09 11:29 ` [RFC/PATCH RESEND -next 02/21] init: main: initialize kasan's shadow area on boot Andrey Ryabinin
2014-07-09 11:29 ` [RFC/PATCH RESEND -next 03/21] x86: add kasan hooks fort memcpy/memmove/memset functions Andrey Ryabinin
2014-07-09 19:31   ` Andi Kleen
2014-07-10 13:54     ` Andrey Ryabinin
2014-07-09 11:29 ` [RFC/PATCH RESEND -next 04/21] x86: boot: vdso: disable instrumentation for code not linked with kernel Andrey Ryabinin
2014-07-09 11:29 ` [RFC/PATCH RESEND -next 05/21] x86: cpu: don't sanitize early stages of a secondary CPU boot Andrey Ryabinin
2014-07-09 19:33   ` Andi Kleen
2014-07-10 13:15     ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 06/21] x86: mm: init: allocate shadow memory for kasan Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 07/21] x86: Kconfig: enable kernel address sanitizer Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 08/21] mm: page_alloc: add kasan hooks on alloc and free pathes Andrey Ryabinin
2014-07-15  5:52   ` Joonsoo Kim
2014-07-15  6:54     ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 09/21] mm: Makefile: kasan: don't instrument slub.c and slab_common.c files Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 10/21] mm: slab: share virt_to_cache() between slab and slub Andrey Ryabinin
2014-07-15  5:53   ` Joonsoo Kim
2014-07-15  6:56     ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 11/21] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-07-09 14:29   ` Christoph Lameter
2014-07-10  7:41     ` Andrey Ryabinin
2014-07-10 14:07       ` Christoph Lameter
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 12/21] mm: util: move krealloc/kzfree to slab_common.c Andrey Ryabinin
2014-07-09 14:32   ` Christoph Lameter
2014-07-10  7:43     ` Andrey Ryabinin
2014-07-10 14:08       ` Christoph Lameter
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 13/21] mm: slub: add allocation size field to struct kmem_cache Andrey Ryabinin
2014-07-09 14:33   ` Christoph Lameter
2014-07-10  8:44     ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 14/21] mm: slub: kasan: disable kasan when touching unaccessible memory Andrey Ryabinin
2014-07-15  6:04   ` Joonsoo Kim
2014-07-15  7:37     ` Andrey Ryabinin
2014-07-15  8:18       ` Joonsoo Kim
2014-07-15  9:51         ` Andrey Ryabinin
2014-07-15 14:26         ` Christoph Lameter
2014-07-15 15:02           ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 15/21] mm: slub: add kernel address sanitizer hooks to slub allocator Andrey Ryabinin
2014-07-09 14:48   ` Christoph Lameter
2014-07-10  9:24     ` Andrey Ryabinin
2014-07-15  6:09   ` Joonsoo Kim
2014-07-15  7:45     ` Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 16/21] arm: boot: compressed: disable kasan's instrumentation Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 17/21] arm: add kasan hooks fort memcpy/memmove/memset functions Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 18/21] arm: mm: reserve shadow memory for kasan Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 19/21] arm: Kconfig: enable kernel address sanitizer Andrey Ryabinin
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 20/21] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-07-15  6:12   ` Joonsoo Kim
2014-07-15  6:08     ` Dmitry Vyukov
2014-07-15  9:34     ` Andrey Ryabinin
2014-07-15  9:45       ` Dmitry Vyukov
2014-07-09 11:30 ` [RFC/PATCH RESEND -next 21/21] lib: add kmalloc_bug_test module Andrey Ryabinin
2014-07-09 21:19 ` [RFC/PATCH RESEND -next 00/21] Address sanitizer for kernel (kasan) - dynamic memory error detector Dave Hansen
2014-07-09 21:44   ` Andi Kleen
2014-07-09 21:59     ` Vegard Nossum
2014-07-09 23:33       ` Dave Hansen
2014-07-10  0:03       ` Andi Kleen
2014-07-10 13:59       ` Andrey Ryabinin
2014-09-10 14:31 ` [RFC/PATCH v2 00/10] Kernel address sainitzer (KASan) - dynamic memory error deetector Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 01/10] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-09-11  3:55     ` Sasha Levin
2014-09-14  1:35     ` Randy Dunlap
2014-09-15 15:28       ` Andrey Ryabinin
2014-09-15 16:24         ` Randy Dunlap
2014-09-10 14:31   ` [RFC/PATCH v2 02/10] x86_64: add KASan support Andrey Ryabinin
2014-09-10 15:46     ` Dave Hansen
2014-09-10 20:30       ` Andrey Ryabinin
2014-09-10 22:45         ` Dave Hansen
2014-09-11  4:26           ` H. Peter Anvin
2014-09-11  4:29             ` Sasha Levin
2014-09-11  4:33               ` H. Peter Anvin
2014-09-11  4:33               ` H. Peter Anvin
2014-09-11  4:46                 ` Andi Kleen
2014-09-11  4:52                   ` H. Peter Anvin
2014-09-11  5:25                   ` Andrey Ryabinin
2014-09-11 11:51               ` Andrey Ryabinin
2014-09-18 16:54                 ` Sasha Levin
2014-09-11  4:01     ` H. Peter Anvin
2014-09-11  5:31       ` Andrey Ryabinin
2014-10-01 15:31         ` H. Peter Anvin
2014-10-01 16:28           ` Andrey Ryabinin
2014-09-11  4:01     ` H. Peter Anvin
2014-09-10 14:31   ` [RFC/PATCH v2 03/10] mm: page_alloc: add kasan hooks on alloc and free pathes Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 04/10] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-09-10 16:16     ` Christoph Lameter
2014-09-10 20:32       ` Andrey Ryabinin
2014-09-15  7:11         ` Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 05/10] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-09-15  7:11     ` Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 06/10] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 07/10] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 08/10] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 09/10] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-09-10 14:31   ` [RFC/PATCH v2 10/10] lib: add kasan test module Andrey Ryabinin
2014-09-10 20:38     ` Dave Jones
2014-09-10 20:46       ` Andrey Ryabinin
2014-09-10 20:47         ` Dave Jones
2014-09-10 20:50           ` Andrey Ryabinin
2014-09-10 15:01   ` [RFC/PATCH v2 00/10] Kernel address sainitzer (KASan) - dynamic memory error deetector Dave Hansen
2014-09-10 14:58     ` Andrey Ryabinin
2014-09-10 15:12   ` Sasha Levin
2014-09-24 12:43 ` [PATCH v3 00/13] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2014-09-24 12:43   ` [PATCH v3 01/13] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-09-24 12:43   ` [PATCH v3 02/13] efi: libstub: disable KASAN for efistub Andrey Ryabinin
2014-09-24 12:43   ` [PATCH v3 03/13] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-09-24 12:44   ` [PATCH v3 04/13] x86_64: add KASan support Andrey Ryabinin
2014-09-24 12:44   ` [PATCH v3 05/13] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-09-25 17:04     ` Dmitry Vyukov
2014-09-24 12:44   ` [PATCH v3 06/13] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-09-24 12:44   ` [PATCH v3 07/13] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-09-24 12:44   ` [PATCH v3 08/13] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-09-26  4:03     ` Dmitry Vyukov
2014-09-24 12:44   ` [PATCH v3 09/13] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-09-26  4:48     ` Dmitry Vyukov [this message]
2014-09-26  7:25       ` Andrey Ryabinin
2014-09-26 15:52         ` Dmitry Vyukov
2014-09-26 14:22       ` Christoph Lameter
2014-09-26 15:55         ` Dmitry Vyukov
2014-09-24 12:44   ` [PATCH v3 10/13] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-09-24 12:44   ` [PATCH v3 11/13] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-09-26 17:10     ` Dmitry Vyukov
2014-09-26 17:36       ` Andrey Ryabinin
2014-09-29 14:10         ` Dmitry Vyukov
2014-10-01 10:39           ` Catalin Marinas
2014-10-01 11:45             ` Andrey Ryabinin
2014-10-01 13:27               ` Dmitry Vyukov
2014-10-01 14:11                 ` Andrey Ryabinin
2014-10-01 14:24                   ` Dmitry Vyukov
2014-09-24 12:44   ` [PATCH v3 12/13] lib: add kasan test module Andrey Ryabinin
2014-09-26 17:11     ` Dmitry Vyukov
2014-09-24 12:44   ` [RFC PATCH v3 13/13] kasan: introduce inline instrumentation Andrey Ryabinin
2014-09-26 17:18     ` Dmitry Vyukov
2014-09-26 17:33       ` Andrey Ryabinin
2014-09-29 14:28         ` Dmitry Vyukov
2014-09-29 14:27           ` Andrey Ryabinin
2014-09-29 14:27     ` Dmitry Vyukov
2014-09-24 15:11   ` [PATCH v3 00/13] Kernel address sanitizer - runtime memory debugger Andrew Morton
2014-09-26 17:01   ` Sasha Levin
2014-09-26 17:07     ` Dmitry Vyukov
2014-09-26 17:22       ` Andrey Ryabinin
2014-09-26 17:29         ` Dmitry Vyukov
2014-09-26 18:48           ` Yuri Gribov
2014-09-29 14:22             ` Dmitry Vyukov
2014-09-29 14:36               ` Peter Zijlstra
2014-09-29 14:48                 ` Dmitry Vyukov
2014-09-26 17:17     ` Andrey Ryabinin
2014-10-16 17:18   ` Yuri Gribov
2014-10-06 15:53 ` [PATCH v4 " Andrey Ryabinin
2014-10-06 15:53   ` [PATCH v4 01/13] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-10-06 15:53   ` [PATCH v4 02/13] efi: libstub: disable KASAN for efistub Andrey Ryabinin
2014-10-07  9:19     ` Dmitry Vyukov
2014-10-06 15:53   ` [PATCH v4 03/13] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-10-06 15:53   ` [PATCH v4 04/13] x86_64: add KASan support Andrey Ryabinin
2014-10-06 15:53   ` [PATCH v4 05/13] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 06/13] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 07/13] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 08/13] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 09/13] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 10/13] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 11/13] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-10-06 15:54   ` [PATCH v4 12/13] lib: add kasan test module Andrey Ryabinin
2014-10-06 15:54   ` [RFC PATCH v4 13/13] kasan: introduce inline instrumentation Andrey Ryabinin
2014-10-07  9:17     ` Dmitry Vyukov
2014-10-27 16:46 ` [PATCH v5 00/12] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 01/12] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-10-27 17:20     ` Jonathan Corbet
2014-10-28 12:24       ` Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 02/12] kasan: Add support for upcoming GCC 5.0 asan ABI changes Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 03/12] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 04/12] x86_64: add KASan support Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 05/12] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 06/12] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 07/12] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-10-27 17:00     ` Joe Perches
2014-10-27 17:07       ` Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 08/12] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 09/12] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 10/12] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 11/12] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-10-27 16:46   ` [PATCH v5 12/12] lib: add kasan test module Andrey Ryabinin
2014-11-05 14:53 ` [PATCH v6 00/11] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 01/11] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 02/11] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 03/11] x86_64: add KASan support Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 04/11] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 05/11] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 06/11] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 07/11] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 08/11] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-11-05 14:53   ` [PATCH v6 09/11] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-11-05 14:54   ` [PATCH v6 10/11] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-11-05 14:54   ` [PATCH] lib: add kasan test module Andrey Ryabinin
2014-11-11  7:21   ` [PATCH v6 00/11] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2014-11-18 17:08     ` Andrey Ryabinin
2014-11-18 20:58     ` Andrew Morton
2014-11-18 21:09       ` Sasha Levin
2014-11-18 21:15       ` Andi Kleen
2014-11-18 21:32         ` Dave Hansen
2014-11-18 23:53       ` Andrey Ryabinin
2014-11-20  9:03         ` Ingo Molnar
2014-11-20 12:35           ` Andrey Ryabinin
2014-11-20 16:32           ` Dmitry Vyukov
2014-11-20 23:00             ` Andrew Morton
2014-11-20 23:14               ` Thomas Gleixner
2014-11-21 16:06                 ` Andrey Ryabinin
2014-11-21  7:32               ` Dmitry Vyukov
2014-11-21 11:19                 ` Andrey Ryabinin
2014-11-21 11:06               ` Andrey Ryabinin
2014-11-18 23:38   ` Sasha Levin
2014-11-19  0:09     ` Andrey Ryabinin
2014-11-19  0:44       ` Sasha Levin
2014-11-19 12:41         ` Andrey Ryabinin
2014-11-24 18:02 ` [PATCH v7 00/12] " Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 01/12] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-11-25 12:40     ` Dmitry Chernenkov
2014-11-25 14:16       ` Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 02/12] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-11-25 12:41     ` Dmitry Chernenkov
2014-11-24 18:02   ` [PATCH v7 03/12] x86_64: add KASan support Andrey Ryabinin
2014-11-24 18:45     ` Sasha Levin
2014-11-24 21:26       ` Andrey Ryabinin
2014-11-25 10:47         ` Dmitry Chernenkov
2014-11-24 18:02   ` [PATCH v7 04/12] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-11-25 12:28     ` Dmitry Chernenkov
2014-11-24 18:02   ` [PATCH v7 05/12] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-11-24 20:08     ` Christoph Lameter
2014-11-24 18:02   ` [PATCH v7 06/12] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-11-25 12:26     ` Dmitry Chernenkov
2014-11-24 18:02   ` [PATCH v7 07/12] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-11-25 12:22     ` Dmitry Chernenkov
2014-11-25 13:11       ` Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 08/12] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-11-25 12:17     ` Dmitry Chernenkov
2014-11-25 13:18       ` Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 09/12] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 10/12] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 11/12] lib: add kasan test module Andrey Ryabinin
2014-11-25 11:14     ` Dmitry Chernenkov
2014-11-25 13:09       ` Andrey Ryabinin
2014-11-24 18:02   ` [PATCH v7 12/12] x86_64: kasan: add interceptors for memset/memmove/memcpy functions Andrey Ryabinin
2014-11-27 16:00 ` [PATCH v8 00/12] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 01/12] Add kernel address sanitizer infrastructure Andrey Ryabinin
2014-12-01 23:13     ` David Rientjes
2014-11-27 16:00   ` [PATCH v8 02/12] x86_64: load_percpu_segment: read irq_stack_union.gs_base before load_segment Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 03/12] x86_64: add KASan support Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 04/12] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 05/12] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 06/12] mm: slub: share slab_err and object_err functions Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 07/12] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 08/12] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 09/12] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 10/12] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2014-12-01 16:28     ` Catalin Marinas
2014-11-27 16:00   ` [PATCH v8 11/12] lib: add kasan test module Andrey Ryabinin
2014-11-27 16:00   ` [PATCH v8 12/12] x86_64: kasan: add interceptors for memset/memmove/memcpy functions Andrey Ryabinin
2015-01-21 16:51 ` [PATCH v9 00/17] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 01/17] Add kernel address sanitizer infrastructure Andrey Ryabinin
2015-01-23 12:20     ` Michal Marek
2015-01-23 12:35     ` Michal Marek
2015-01-23 12:48       ` Andrey Ryabinin
2015-01-23 12:51         ` Michal Marek
2015-01-21 16:51   ` [PATCH v9 02/17] x86_64: add KASan support Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 03/17] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 04/17] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 05/17] mm: slub: share object_err function Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 06/17] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 07/17] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2015-01-21 20:47     ` Sasha Levin
2015-01-21 21:48       ` Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 08/17] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 09/17] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 10/17] lib: add kasan test module Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 11/17] x86_64: kasan: add interceptors for memset/memmove/memcpy functions Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 12/17] kasan: enable stack instrumentation Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 13/17] mm: vmalloc: add flag preventing guard hole allocation Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 14/17] mm: vmalloc: pass additional vm_flags to __vmalloc_node_range() Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 15/17] kernel: add support for .init_array.* constructors Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 16/17] module: fix types of device tables aliases Andrey Ryabinin
2015-01-21 16:51   ` [PATCH v9 17/17] kasan: enable instrumentation of global variables Andrey Ryabinin
2015-01-22  0:22   ` [PATCH v9 00/17] Kernel address sanitizer - runtime memory debugger Sasha Levin
2015-01-22  5:34     ` Andrey Ryabinin
2015-01-22  5:53       ` Andrey Ryabinin
2015-01-22 21:46         ` Sasha Levin
2015-01-23 10:14           ` Andrey Ryabinin
2015-01-29 15:11 ` [PATCH v10 " Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 01/17] Add kernel address sanitizer infrastructure Andrey Ryabinin
2015-01-29 15:39     ` Michal Marek
2015-01-29 23:12     ` Andrew Morton
2015-01-30 16:04       ` Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 02/17] x86_64: add KASan support Andrey Ryabinin
2015-01-29 23:12     ` Andrew Morton
2015-01-30 16:15       ` Andrey Ryabinin
2015-01-30 21:35         ` Andrew Morton
2015-01-30 21:37         ` Andrew Morton
2015-01-30 23:27           ` Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 03/17] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 04/17] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2015-01-29 23:12     ` Andrew Morton
2015-01-30 16:17       ` Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 05/17] mm: slub: share object_err function Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 06/17] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2015-01-29 23:12     ` Andrew Morton
2015-01-30 17:05       ` Andrey Ryabinin
2015-01-30 21:42         ` Andrew Morton
2015-01-30 23:11           ` Andrey Ryabinin
2015-01-30 23:16             ` Andrew Morton
2015-01-30 23:19               ` Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 07/17] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 08/17] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 09/17] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 10/17] lib: add kasan test module Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 11/17] x86_64: kasan: add interceptors for memset/memmove/memcpy functions Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 12/17] kasan: enable stack instrumentation Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 13/17] mm: vmalloc: add flag preventing guard hole allocation Andrey Ryabinin
2015-01-29 23:12     ` Andrew Morton
2015-01-30 17:51       ` Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 14/17] mm: vmalloc: pass additional vm_flags to __vmalloc_node_range() Andrey Ryabinin
2015-01-29 15:11   ` [PATCH v10 15/17] kernel: add support for .init_array.* constructors Andrey Ryabinin
2015-01-29 23:13     ` Andrew Morton
2015-01-30 17:21       ` Andrey Ryabinin
2015-01-29 15:12   ` [PATCH v10 16/17] module: fix types of device tables aliases Andrey Ryabinin
2015-01-29 23:13     ` Andrew Morton
2015-01-30 17:44       ` Andrey Ryabinin
2015-01-29 15:12   ` [PATCH v10 17/17] kasan: enable instrumentation of global variables Andrey Ryabinin
2015-01-29 23:13     ` Andrew Morton
2015-01-30 17:47       ` Andrey Ryabinin
2015-01-30 21:45         ` Andrew Morton
2015-01-30 23:18           ` Andrey Ryabinin
2015-02-03 17:42 ` [PATCH v11 00/19] Kernel address sanitizer - runtime memory debugger Andrey Ryabinin
2015-02-03 17:42   ` [PATCH v11 01/19] compiler: introduce __alias(symbol) shortcut Andrey Ryabinin
2015-02-03 17:42   ` [PATCH v11 02/19] Add kernel address sanitizer infrastructure Andrey Ryabinin
2015-02-03 23:04     ` Andrew Morton
2015-02-04  3:56       ` Andrey Konovalov
2015-02-04  4:00       ` Andrey Konovalov
2015-02-03 17:42   ` [PATCH v11 03/19] kasan: disable memory hotplug Andrey Ryabinin
2015-02-03 17:42   ` [PATCH v11 04/19] x86_64: add KASan support Andrey Ryabinin
2015-02-03 17:42   ` [PATCH v11 05/19] mm: page_alloc: add kasan hooks on alloc and free paths Andrey Ryabinin
2015-02-03 17:42   ` [PATCH v11 06/19] mm: slub: introduce virt_to_obj function Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 07/19] mm: slub: share object_err function Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 08/19] mm: slub: introduce metadata_access_enable()/metadata_access_disable() Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 09/19] mm: slub: add kernel address sanitizer support for slub allocator Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 10/19] fs: dcache: manually unpoison dname after allocation to shut up kasan's reports Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 11/19] kmemleak: disable kasan instrumentation for kmemleak Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 12/19] lib: add kasan test module Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 13/19] x86_64: kasan: add interceptors for memset/memmove/memcpy functions Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 14/19] kasan: enable stack instrumentation Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 15/19] mm: vmalloc: add flag preventing guard hole allocation Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 16/19] mm: vmalloc: pass additional vm_flags to __vmalloc_node_range() Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 17/19] kernel: add support for .init_array.* constructors Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 18/19] module: fix types of device tables aliases Andrey Ryabinin
2015-02-03 23:51     ` Andrew Morton
2015-02-04  0:01       ` Sasha Levin
2015-02-04  0:10         ` Andrew Morton
2015-02-16  2:44     ` Rusty Russell
2015-02-16 14:01       ` Andrey Ryabinin
2015-02-03 17:43   ` [PATCH v11 19/19] kasan: enable instrumentation of global variables Andrey Ryabinin
2015-02-16  2:58     ` Rusty Russell
2015-02-16 14:44       ` Andrey Ryabinin
2015-02-16 14:47         ` Dmitry Vyukov
2015-02-16 15:09           ` Andrey Ryabinin
2015-02-16 23:55         ` Rusty Russell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CACT4Y+a0DMk8vyCcesrsKt7rXVDD2LZsfnGemJAgeRiVbMxxxw@mail.gmail.com \
    --to=dvyukov@google.com \
    --cc=a.ryabinin@samsung.com \
    --cc=adech.fo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=cl@linux.com \
    --cc=dave.hansen@intel.com \
    --cc=davej@redhat.com \
    --cc=dmitryc@google.com \
    --cc=hpa@zytor.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=kcc@google.com \
    --cc=koct9i@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=sasha.levin@oracle.com \
    --cc=tetra2005@gmail.com \
    --cc=vegard.nossum@gmail.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox