From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pa0-f52.google.com (mail-pa0-f52.google.com [209.85.220.52]) by kanga.kvack.org (Postfix) with ESMTP id 641006B0032 for ; Mon, 8 Jun 2015 01:15:05 -0400 (EDT) Received: by padev16 with SMTP id ev16so31000304pad.0 for ; Sun, 07 Jun 2015 22:15:05 -0700 (PDT) Received: from mga02.intel.com (mga02.intel.com. [134.134.136.20]) by mx.google.com with ESMTP id pp3si2269842pac.191.2015.06.07.22.15.04 for ; Sun, 07 Jun 2015 22:15:04 -0700 (PDT) From: "Liu, XinwuX" Subject: [PATCH] slub/slab: fix kmemleak didn't work on some case Date: Mon, 8 Jun 2015 05:14:32 +0000 Message-ID: <99C214DF91337140A8D774E25DF6CD5FC89DA2@shsmsx102.ccr.corp.intel.com> Content-Language: en-US Content-Type: multipart/alternative; boundary="_000_99C214DF91337140A8D774E25DF6CD5FC89DA2shsmsx102ccrcorpi_" MIME-Version: 1.0 Sender: owner-linux-mm@kvack.org List-ID: To: "catalin.marinas@arm.com" , "cl@linux-foundation.org" , "penberg@kernel.org" , "mpm@selenic.com" Cc: "linux-mm@kvack.org" , "linux-kernel@vger.kernel.org" , "yanmin_zhang@linux.intel.com" , "He, Bo" , "Chen, Lin Z" --_000_99C214DF91337140A8D774E25DF6CD5FC89DA2shsmsx102ccrcorpi_ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable when kernel uses kmalloc to allocate memory, slub/slab will find a suitable kmem_cache. Ususally the cache's object size is often greater than requested size. There is unused space which contains dirty data. These dirty data might have pointers pointing to a block of leaked memory. Kernel wouldn't consider this memory as leaked when scanning kmemleak object. The patch fixes it by clearing the unused memory. Signed-off-by: Liu, XinwuX Signed-off-by: Chen Lin Z --- mm/slab.c | 22 +++++++++++++++++++++- mm/slub.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/mm/slab.c b/mm/slab.c index 7eb38dd..ef25e7d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gf= p_t flags, size_t size) ret =3D slab_alloc(cachep, flags, _RET_IP_); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D cachep->object_size - size; + + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc(_RET_IP_, ret, size, cachep->size, flags); return ret; @@ -3476,11 +3482,19 @@ static __always_inline void * __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) { struct kmem_cache *cachep; + void *ret; cachep =3D kmalloc_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; - return kmem_cache_alloc_node_trace(cachep, flags, node, size= ); + ret =3D kmem_cache_alloc_node_trace(cachep, flags, node, size= ); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D cachep->object_size - size; + + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif + return ret; } void *__kmalloc_node(size_t size, gfp_t flags, int node) @@ -3513,6 +3527,12 @@ static __always_inline void *__do_kmalloc(size_t siz= e, gfp_t flags, if (unlikely(ZERO_OR_NULL_PTR(cachep))) return cachep; ret =3D slab_alloc(cachep, flags, caller); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D cachep->object_size - size; + + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc(caller, ret, size, cachep->size, flags); diff --git a/mm/slub.c b/mm/slub.c index 54c0876..b53d9af 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2530,6 +2530,12 @@ EXPORT_SYMBOL(kmem_cache_alloc); void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t s= ize) { void *ret =3D slab_alloc(s, gfpflags, _RET_IP_); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + + if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); kasan_kmalloc(s, ret, size); return ret; @@ -2556,6 +2562,12 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache = *s, { void *ret =3D slab_alloc_node(s, gfpflags, node, _RET_IP_); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + + if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, = node); @@ -3316,6 +3328,12 @@ void *__kmalloc(size_t size, gfp_t flags) return s; ret =3D slab_alloc(s, flags, _RET_IP_); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -3361,6 +3379,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int n= ode) return s; ret =3D slab_alloc_node(s, flags, node, _RET_IP_); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + + if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, nod= e); @@ -3819,7 +3843,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpf= lags, unsigned long caller) return s; ret =3D slab_alloc(s, gfpflags, caller); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif /* Honor the call site pointer we received. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -3849,6 +3878,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t= gfpflags, return s; ret =3D slab_alloc_node(s, gfpflags, node, caller); +#ifdef CONFIG_DEBUG_KMEMLEAK + int delta =3D s->object_size - size; + + if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0)) + memset((void *)((char *)ret + size), 0, delta= ); +#endif /* Honor the call site pointer we received. */ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, nod= e); -- 1.9.1 --_000_99C214DF91337140A8D774E25DF6CD5FC89DA2shsmsx102ccrcorpi_ Content-Type: text/html; charset="us-ascii" Content-Transfer-Encoding: quoted-printable

when kernel uses kmalloc to allocate memory, slub/sl= ab will find

a suitable kmem_cache. Ususally the cache's object s= ize is often

greater than requested size. There is unused sp= ace which contains

dirty data. These dirty data might have pointer= s pointing to a block

of leaked memory. Kernel wouldn't consider= this memory as leaked when

scanning kmemleak object.

 

The patch fixes it by clearing the unused memory.

 

Signed-off-by: Liu, XinwuX <xinwux.liu@intel.com&= gt;

Signed-off-by: Chen Lin Z <lin.z.chen@intel.com&g= t;

---

mm/slab.c | 22 +++++++&#= 43;+++++++++++++-<= /o:p>

mm/slub.c | 35 +++++++&#= 43;++++++++++++++&#= 43;++++++++++++<= /p>

2 files changed, 56 insertions(+), 1 deletion(-)=

 

diff --git a/mm/slab.c b/mm/slab.c

index 7eb38dd..ef25e7d 100644

--- a/mm/slab.c

+++ b/mm/slab.c

@@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(st= ruct kmem_cache *cachep, gfp_t flags, size_t size)

        &nbs= p;       ret =3D slab_alloc(cachep, flags, _R= ET_IP_);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D cachep->object_size - size;=

+

+        = ;     if (ret && likely(!(flags & __GFP_ZER= O)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;      trace_kmalloc(_RET_IP_, ret,

        &nbs= p;            &= nbsp;            &nb= sp;  size, cachep->size, flags);

        &nbs= p;      return ret;

@@ -3476,11 +3482,19 @@ static __always_inline v= oid *

__do_kmalloc_node(size_t size, gfp_t flags, int node= , unsigned long caller)

{

        &nbs= p;      struct kmem_cache *cachep;

+        = ;     void *ret;

        &nbs= p;       cachep =3D kmalloc_slab(size, flags)= ;

        &nbs= p;      if (unlikely(ZERO_OR_NULL_PTR(cachep)))

        &nbs= p;            &= nbsp;         return cachep;

-        &nb= sp;     return kmem_cache_alloc_node_trace(cachep, flag= s, node, size);

+        = ;     ret =3D kmem_cache_alloc_node_trace(cachep, flags= , node, size);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D cachep->object_size - size;=

+

+        = ;     if (ret && likely(!(flags & __GFP_ZER= O)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

+        = ;     return ret;

}

 void *__kmalloc_node(size_t size, gfp_t flags,= int node)

@@ -3513,6 +3527,12 @@ static __always_inline vo= id *__do_kmalloc(size_t size, gfp_t flags,

        &nbs= p;      if (unlikely(ZERO_OR_NULL_PTR(cachep)))

        &nbs= p;            &= nbsp;         return cachep;

        &nbs= p;      ret =3D slab_alloc(cachep, flags, caller);=

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D cachep->object_size - size;=

+

+        = ;     if (ret && likely(!(flags & __GFP_ZER= O)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;       trace_kmalloc(caller, ret,

        &nbs= p;            &= nbsp;            &nb= sp;  size, cachep->size, flags);

diff --git a/mm/slub.c b/mm/slub.c

index 54c0876..b53d9af 100644

--- a/mm/slub.c

+++ b/mm/slub.c

@@ -2530,6 +2530,12 @@ EXPORT_SYMBOL(kmem_cache_= alloc);

void *kmem_cache_alloc_trace(struct kmem_cache *s, g= fp_t gfpflags, size_t size)

{

        &nbs= p;      void *ret =3D slab_alloc(s, gfpflags, _RET= _IP_);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+

+        = ;     if (ret && likely(!(gfpflags & __GFP_= ZERO)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;      trace_kmalloc(_RET_IP_, ret, size, s->s= ize, gfpflags);

        &nbs= p;      kasan_kmalloc(s, ret, size);

        &nbs= p;      return ret;

@@ -2556,6 +2562,12 @@ void *kmem_cache_alloc_no= de_trace(struct kmem_cache *s,

{

        &nbs= p;      void *ret =3D slab_alloc_node(s, gfpflags,= node, _RET_IP_);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+

+        = ;     if (ret && likely(!(gfpflags & __GFP_= ZERO)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;      trace_kmalloc_node(_RET_IP_, ret,

        &nbs= p;            &= nbsp;           &nbs= p;             =    size, s->size, gfpflags, node);

@@ -3316,6 +3328,12 @@ void *__kmalloc(size_t si= ze, gfp_t flags)

        &nbs= p;            &= nbsp;         return s;<= /p>

        &nbs= p;       ret =3D slab_alloc(s, flags, _RET_IP= _);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+

+        = ;     if (ret && likely(!(flags & __GFP_ZER= O)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;       trace_kmalloc(_RET_IP_, ret, size, s= ->size, flags);

@@ -3361,6 +3379,12 @@ void *__kmalloc_node(size= _t size, gfp_t flags, int node)

        &nbs= p;            &= nbsp;         return s;<= /p>

        &nbs= p;       ret =3D slab_alloc_node(s, flags, no= de, _RET_IP_);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+

+        = ;     if (ret && likely(!(flags & __GFP_ZER= O)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;       trace_kmalloc_node(_RET_IP_, ret, si= ze, s->size, flags, node);

@@ -3819,7 +3843,12 @@ void *__kmalloc_track_cal= ler(size_t size, gfp_t gfpflags, unsigned long caller)

        &nbs= p;            &= nbsp;         return s;<= /p>

        &nbs= p;       ret =3D slab_alloc(s, gfpflags, call= er);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+        = ;     if (ret && likely(!(gfpflags & __GFP_= ZERO)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;      /* Honor the call site pointer we received= . */

        &nbs= p;      trace_kmalloc(caller, ret, size, s->siz= e, gfpflags);

@@ -3849,6 +3878,12 @@ void *__kmalloc_node_trac= k_caller(size_t size, gfp_t gfpflags,

        &nbs= p;            &= nbsp;         return s;<= /p>

        &nbs= p;       ret =3D slab_alloc_node(s, gfpflags,= node, caller);

+#ifdef CONFIG_DEBUG_KMEMLEAK

+        = ;     int delta =3D s->object_size - size;

+

+        = ;     if (ret && likely(!(gfpflags & __GFP_= ZERO)) && (delta > 0))

+        = ;            &n= bsp;        memset((void *)((char *)ret = + size), 0, delta);

+#endif

        &nbs= p;       /* Honor the call site pointer we re= ceived. */

        &nbs= p;      trace_kmalloc_node(caller, ret, size, s-&g= t;size, gfpflags, node);

--

1.9.1

--_000_99C214DF91337140A8D774E25DF6CD5FC89DA2shsmsx102ccrcorpi_-- -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org