From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-it0-f69.google.com (mail-it0-f69.google.com [209.85.214.69]) by kanga.kvack.org (Postfix) with ESMTP id 625276B0273 for ; Tue, 15 May 2018 09:12:29 -0400 (EDT) Received: by mail-it0-f69.google.com with SMTP id i200-v6so1084519itb.9 for ; Tue, 15 May 2018 06:12:29 -0700 (PDT) Received: from EUR03-VE1-obe.outbound.protection.outlook.com (mail-eopbgr50139.outbound.protection.outlook.com. [40.107.5.139]) by mx.google.com with ESMTPS id r87-v6si12447ioe.169.2018.05.15.06.12.27 for (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Tue, 15 May 2018 06:12:27 -0700 (PDT) Subject: Re: [PATCH v1 13/16] khwasan: add hooks implementation References: <5dddd7d6f18927de291e7b09e1ff45190dd6d361.1525798754.git.andreyknvl@google.com> From: Andrey Ryabinin Message-ID: Date: Tue, 15 May 2018 16:13:20 +0300 MIME-Version: 1.0 In-Reply-To: <5dddd7d6f18927de291e7b09e1ff45190dd6d361.1525798754.git.andreyknvl@google.com> Content-Type: text/plain; charset=utf-8 Content-Language: en-US Content-Transfer-Encoding: 7bit Sender: owner-linux-mm@kvack.org List-ID: To: Andrey Konovalov , Alexander Potapenko , Dmitry Vyukov , Jonathan Corbet , Catalin Marinas , Will Deacon , Christopher Li , Christoph Lameter , Pekka Enberg , David Rientjes , Joonsoo Kim , Andrew Morton , Masahiro Yamada , Michal Marek , Mark Rutland , Nick Desaulniers , Yury Norov , Marc Zyngier , Kristina Martsenko , Suzuki K Poulose , Punit Agrawal , Dave Martin , Ard Biesheuvel , James Morse , Michael Weiser , Julien Thierry , Tyler Baicar , "Eric W . Biederman" , Thomas Gleixner , Ingo Molnar , Kees Cook , Sandipan Das , David Woodhouse , Paul Lawrence , Herbert Xu , Josh Poimboeuf , Geert Uytterhoeven , Tom Lendacky , Arnd Bergmann , Dan Williams , Michal Hocko , Jan Kara , Ross Zwisler , =?UTF-8?B?SsOpcsO0bWUgR2xpc3Nl?= , Matthew Wilcox , "Kirill A . Shutemov" , Souptick Joarder , Hugh Dickins , Davidlohr Bueso , Greg Kroah-Hartman , Philippe Ombredanne , Kate Stewart , Laura Abbott , Boris Brezillon , Vlastimil Babka , Pintu Agarwal , Doug Berger , Anshuman Khandual , Mike Rapoport , Mel Gorman , Pavel Tatashin , Tetsuo Handa , kasan-dev@googlegroups.com, linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-sparse@vger.kernel.org, linux-mm@kvack.org, linux-kbuild@vger.kernel.org Cc: Kostya Serebryany , Evgeniy Stepanov , Lee Smith , Ramana Radhakrishnan , Jacob Bramley , Ruben Ayrapetyan , Kees Cook , Jann Horn , Mark Brand , Chintan Pandya On 05/08/2018 08:20 PM, Andrey Konovalov wrote: > > static bool __kasan_slab_free(struct kmem_cache *cache, void *object, > unsigned long ip, bool quarantine) > { > s8 shadow_byte; > + u8 tag; > unsigned long rounded_up_size; > > + tag = get_tag(object); > + object = reset_tag(object); > + > if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != > object)) { > - kasan_report_invalid_free(object, ip); > + kasan_report_invalid_free(set_tag(object, tag), ip); Using variable to store untagged_object pointer, instead of tagging/untagging back and forth would make the code easier to follow. > return true; > } > > @@ -326,20 +346,29 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object, > return false; > > shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); > +#ifdef CONFIG_KASAN_GENERIC > if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { > kasan_report_invalid_free(object, ip); > return true; > } > +#else > + if (tag != (u8)shadow_byte) { > + kasan_report_invalid_free(set_tag(object, tag), ip); > + return true; > + } > +#endif static bool inline shadow_ivalid(u8 tag, s8 shadow_byte) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE; else return tag != (u8)shadow_byte; } static bool __kasan_slab_free(struct kmem_cache *cache, void *object, ... if (shadow_invalid(tag, shadow_byte)) { kasan_report_invalid_free(object, ip); return true; } > > rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); > kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); > > - if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN))) > + if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) || > + unlikely(!(cache->flags & SLAB_KASAN))) > return false; > > set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); > quarantine_put(get_free_info(cache, object), cache); > - return true; > + > + return IS_ENABLED(CONFIG_KASAN_GENERIC); > } > > bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) > @@ -352,6 +381,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, > { > unsigned long redzone_start; > unsigned long redzone_end; > + u8 tag; > > if (gfpflags_allow_blocking(flags)) > quarantine_reduce(); > @@ -364,14 +394,19 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, > redzone_end = round_up((unsigned long)object + cache->object_size, > KASAN_SHADOW_SCALE_SIZE); > > +#ifdef CONFIG_KASAN_GENERIC > kasan_unpoison_shadow(object, size); > +#else > + tag = random_tag(); > + kasan_poison_shadow(object, redzone_start - (unsigned long)object, tag); > +# diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 7cd4a4e8c3be..f11d6059fc06 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -404,12 +404,9 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, redzone_end = round_up((unsigned long)object + cache->object_size, KASAN_SHADOW_SCALE_SIZE); -#ifdef CONFIG_KASAN_GENERIC - kasan_unpoison_shadow(object, size); -#else tag = random_tag(); - kasan_poison_shadow(object, redzone_start - (unsigned long)object, tag); -#endif + kasan_unpoison_shadow(set_tag(object, tag), size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, KASAN_KMALLOC_REDZONE); > kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, > KASAN_KMALLOC_REDZONE); > > if (cache->flags & SLAB_KASAN) > set_track(&get_alloc_info(cache, object)->alloc_track, flags); > > - return (void *)object; > + return set_tag(object, tag); > } > EXPORT_SYMBOL(kasan_kmalloc); > > @@ -380,6 +415,7 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) > struct page *page; > unsigned long redzone_start; > unsigned long redzone_end; > + u8 tag; > > if (gfpflags_allow_blocking(flags)) > quarantine_reduce(); > @@ -392,11 +428,16 @@ void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) > KASAN_SHADOW_SCALE_SIZE); > redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); > > +#ifdef CONFIG_KASAN_GENERIC > kasan_unpoison_shadow(ptr, size); > +#else > + tag = random_tag(); > + kasan_poison_shadow(ptr, redzone_start - (unsigned long)ptr, tag); > +#endif > kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, > KASAN_PAGE_REDZONE); > > - return (void *)ptr; > + return set_tag(ptr, tag); > } kasan_kmalloc_large() should be left untouched. It works correctly as is in both cases. ptr comes from page allocator already already tagged at this point.