when kernel uses kmalloc to allocate memory, slub/slab will find
a suitable kmem_cache. Ususally the cache's object size is often
greater than requested size. There is unused space which contains
dirty data. These dirty data might have pointers pointing to a block
of leaked memory. Kernel wouldn't consider this memory as leaked when
scanning kmemleak object.
The patch fixes it by clearing the unused memory.
Signed-off-by: Liu, XinwuX <xinwux.liu@intel.com>
Signed-off-by: Chen Lin Z <lin.z.chen@intel.com>
---
mm/slab.c | 22 +++++++++++++++++++++-
mm/slub.c | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 56 insertions(+), 1 deletion(-)
diff --git a/mm/slab.c b/mm/slab.c
index 7eb38dd..ef25e7d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3423,6 +3423,12 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@@ -3476,11 +3482,19 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
+ return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3513,6 +3527,12 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = slab_alloc(cachep, flags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = cachep->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(caller, ret,
size, cachep->size, flags);
diff --git a/mm/slub.c b/mm/slub.c
index 54c0876..b53d9af 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2530,6 +2530,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size);
return ret;
@@ -2556,6 +2562,12 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
@@ -3316,6 +3328,12 @@ void *__kmalloc(size_t size, gfp_t flags)
return s;
ret = slab_alloc(s, flags, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
@@ -3361,6 +3379,12 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
return s;
ret = slab_alloc_node(s, flags, node, _RET_IP_);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(flags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
@@ -3819,7 +3843,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
return s;
ret = slab_alloc(s, gfpflags, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
@@ -3849,6 +3878,12 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return s;
ret = slab_alloc_node(s, gfpflags, node, caller);
+#ifdef CONFIG_DEBUG_KMEMLEAK
+ int delta = s->object_size - size;
+
+ if (ret && likely(!(gfpflags & __GFP_ZERO)) && (delta > 0))
+ memset((void *)((char *)ret + size), 0, delta);
+#endif
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
--
1.9.1