* [3.12 1/5] Move kmalloc_node functions to common code
[not found] <20130731171257.629155011@linux.com>
@ 2013-07-31 17:12 ` Christoph Lameter
2013-07-31 17:12 ` [3.12 2/5] Move kmalloc definitions to slab.h Christoph Lameter
` (3 subsequent siblings)
4 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2013-07-31 17:12 UTC (permalink / raw)
To: Pekka Enberg; +Cc: Joonsoo Kim, Glauber Costa, linux-mm, David Rientjes
The kmalloc_node functions of all slab allcoators are similar now so
lets move them into slab.h. This requires some function naming changes
in slob.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slab.h
===================================================================
--- linux.orig/include/linux/slab.h 2013-07-15 09:25:30.055679733 -0500
+++ linux/include/linux/slab.h 2013-07-15 09:38:34.633019260 -0500
@@ -289,6 +289,50 @@ static __always_inline int kmalloc_index
}
#endif /* !CONFIG_SLOB */
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+ return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
@@ -321,6 +365,23 @@ static __always_inline int kmalloc_size(
return 0;
}
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLAB_CACHE_DMA)) {
+ int i = kmalloc_index(size);
+
+ if (!i)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ flags, node, size);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h 2013-07-15 09:25:30.055679733 -0500
+++ linux/include/linux/slub_def.h 2013-07-15 09:37:59.000000000 -0500
@@ -104,9 +104,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
static __always_inline void *
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
{
@@ -174,38 +171,4 @@ static __always_inline void *kmalloc(siz
return __kmalloc(size, flags);
}
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(kmalloc_caches[index],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#endif
-
#endif /* _LINUX_SLUB_DEF_H */
Index: linux/include/linux/slab_def.h
===================================================================
--- linux.orig/include/linux/slab_def.h 2013-07-15 09:25:30.055679733 -0500
+++ linux/include/linux/slab_def.h 2013-07-15 09:37:59.000000000 -0500
@@ -102,9 +102,6 @@ struct kmem_cache {
*/
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
#else
@@ -145,53 +142,4 @@ static __always_inline void *kmalloc(siz
return __kmalloc(size, flags);
}
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size)
-{
- return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- struct kmem_cache *cachep;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-
-#endif /* CONFIG_NUMA */
-
#endif /* _LINUX_SLAB_DEF_H */
Index: linux/include/linux/slob_def.h
===================================================================
--- linux.orig/include/linux/slob_def.h 2013-07-15 09:25:30.055679733 -0500
+++ linux/include/linux/slob_def.h 2013-07-15 09:37:59.000000000 -0500
@@ -1,31 +1,9 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
- gfp_t flags)
-{
- return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc_node(size, flags, node);
-}
-
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __kmalloc_node(size, flags, NUMA_NO_NODE);
}
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
- return kmalloc(size, flags);
-}
-
#endif /* __LINUX_SLOB_DEF_H */
Index: linux/mm/slob.c
===================================================================
--- linux.orig/mm/slob.c 2013-07-15 09:25:30.055679733 -0500
+++ linux/mm/slob.c 2013-07-15 09:25:30.051679666 -0500
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp
return ret;
}
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+void *__kmalloc(size_t size, gfp_t gfp)
{
- return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+ return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
}
-EXPORT_SYMBOL(__kmalloc_node);
+EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_TRACING
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cach
return 0;
}
-void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
@@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
}
+EXPORT_SYMBOL(slob_alloc_node);
+
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+{
+ return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+{
+ return slob_alloc_node(cachep, gfp, node);
+}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
static void __kmem_cache_free(void *b, int size)
{
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [3.12 2/5] Move kmalloc definitions to slab.h
[not found] <20130731171257.629155011@linux.com>
2013-07-31 17:12 ` [3.12 1/5] Move kmalloc_node functions to common code Christoph Lameter
@ 2013-07-31 17:12 ` Christoph Lameter
2013-07-31 17:22 ` [3.12 3/5] slabs: Remove unnecessary #includes Christoph Lameter
` (2 subsequent siblings)
4 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2013-07-31 17:12 UTC (permalink / raw)
To: Pekka Enberg; +Cc: Joonsoo Kim, Glauber Costa, linux-mm, David Rientjes
All the kmallocs are mostly doing the same. Unify them.
slob_def.h becomes empty. So remove it.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slab.h
===================================================================
--- linux.orig/include/linux/slab.h 2013-07-15 09:38:40.853127741 -0500
+++ linux/include/linux/slab.h 2013-07-15 09:39:49.026313739 -0500
@@ -4,6 +4,8 @@
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ * Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
+#include <linux/kmemleak.h>
struct mem_cgroup;
/*
@@ -308,6 +311,7 @@ static __always_inline void *kmem_cache_
#endif
#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -316,14 +320,20 @@ extern void *kmem_cache_alloc_node_trace
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
+ gfp_t gfpflags,
+ int node, size_t size)
{
return kmem_cache_alloc_trace(s, gfpflags, size);
}
#endif /* CONFIG_NUMA */
-#else
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
+{
+ return kmem_cache_alloc(s, flags);
+}
+
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
@@ -341,10 +351,61 @@ kmem_cache_alloc_node_trace(struct kmem_
#include <linux/slub_def.h>
#endif
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
+ kmemleak_alloc(ret, size, 1, flags);
+ return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
#endif
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
+#endif
+ }
+ return __kmalloc(size, flags);
+}
+
/*
* Determine size used for the nth kmalloc cache.
* return size or 0 if a kmalloc cache for that
Index: linux/include/linux/slab_def.h
===================================================================
--- linux.orig/include/linux/slab_def.h 2013-07-15 09:38:40.853127741 -0500
+++ linux/include/linux/slab_def.h 2013-07-15 09:38:40.853127741 -0500
@@ -102,44 +102,4 @@ struct kmem_cache {
*/
};
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- struct kmem_cache *cachep;
- void *ret;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- ret = kmem_cache_alloc_trace(cachep, flags, size);
-
- return ret;
- }
- return __kmalloc(size, flags);
-}
-
#endif /* _LINUX_SLAB_DEF_H */
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h 2013-07-15 09:38:40.853127741 -0500
+++ linux/include/linux/slub_def.h 2013-07-15 09:38:40.853127741 -0500
@@ -12,8 +12,6 @@
#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <linux/kmemleak.h>
-
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
@@ -104,17 +102,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret;
-
- flags |= (__GFP_COMP | __GFP_KMEMCG);
- ret = (void *) __get_free_pages(flags, order);
- kmemleak_alloc(ret, size, 1, flags);
- return ret;
-}
-
/**
* Calling this on allocated memory will check that the memory
* is expected to be in use, and print warnings if not.
@@ -128,47 +115,4 @@ static inline bool verify_mem_not_delete
}
#endif
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-
- if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
- }
- return __kmalloc(size, flags);
-}
-
#endif /* _LINUX_SLUB_DEF_H */
Index: linux/include/linux/slob_def.h
===================================================================
--- linux.orig/include/linux/slob_def.h 2013-07-15 09:38:40.853127741 -0500
+++ /dev/null 1970-01-01 00:00:00.000000000 +0000
@@ -1,9 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [3.12 3/5] slabs: Remove unnecessary #includes
[not found] <20130731171257.629155011@linux.com>
2013-07-31 17:12 ` [3.12 1/5] Move kmalloc_node functions to common code Christoph Lameter
2013-07-31 17:12 ` [3.12 2/5] Move kmalloc definitions to slab.h Christoph Lameter
@ 2013-07-31 17:22 ` Christoph Lameter
2013-07-31 17:22 ` [3.12 5/5] seq_file: Use kmalloc_large for page sized allocation Christoph Lameter
2013-07-31 17:22 ` [3.12 4/5] slub: remove verify_mem_not_deleted() Christoph Lameter
4 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2013-07-31 17:22 UTC (permalink / raw)
To: Pekka Enberg; +Cc: Joonsoo Kim, Glauber Costa, linux-mm, David Rientjes
Now that there are only some struct definitions left in
sl?b_def.h we can remove most of the #include statements.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slab_def.h
===================================================================
--- linux.orig/include/linux/slab_def.h 2013-07-18 11:11:59.757679886 -0500
+++ linux/include/linux/slab_def.h 2013-07-18 11:11:59.753679819 -0500
@@ -3,20 +3,6 @@
/*
* Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
*/
struct kmem_cache {
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h 2013-07-18 11:11:59.757679886 -0500
+++ linux/include/linux/slub_def.h 2013-07-18 11:13:32.000000000 -0500
@@ -6,10 +6,6 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
#include <linux/kobject.h>
enum stat_item {
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [3.12 5/5] seq_file: Use kmalloc_large for page sized allocation
[not found] <20130731171257.629155011@linux.com>
` (2 preceding siblings ...)
2013-07-31 17:22 ` [3.12 3/5] slabs: Remove unnecessary #includes Christoph Lameter
@ 2013-07-31 17:22 ` Christoph Lameter
2013-07-31 17:22 ` [3.12 4/5] slub: remove verify_mem_not_deleted() Christoph Lameter
4 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2013-07-31 17:22 UTC (permalink / raw)
To: Pekka Enberg
Cc: Joonsoo Kim, Wladislav Wiebe, Glauber Costa, linux-mm, David Rientjes
There is no point in using the slab allocation functions for
large page order allocation. Use kmalloc_large().
This fixes the warning about large allocs but it will still cause
large contiguous allocs that could fail because of memory fragmentation.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/fs/seq_file.c
===================================================================
--- linux.orig/fs/seq_file.c 2013-07-31 10:39:03.050472030 -0500
+++ linux/fs/seq_file.c 2013-07-31 10:39:03.050472030 -0500
@@ -136,7 +136,7 @@ static int traverse(struct seq_file *m,
Eoverflow:
m->op->stop(m, p);
kfree(m->buf);
- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+ m->buf = kmalloc_large(m->size <<= 1, GFP_KERNEL);
return !m->buf ? -ENOMEM : -EAGAIN;
}
@@ -232,7 +232,7 @@ ssize_t seq_read(struct file *file, char
goto Fill;
m->op->stop(m, p);
kfree(m->buf);
- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+ m->buf = kmalloc_large(m->size <<= 1, GFP_KERNEL);
if (!m->buf)
goto Enomem;
m->count = 0;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [3.12 4/5] slub: remove verify_mem_not_deleted()
[not found] <20130731171257.629155011@linux.com>
` (3 preceding siblings ...)
2013-07-31 17:22 ` [3.12 5/5] seq_file: Use kmalloc_large for page sized allocation Christoph Lameter
@ 2013-07-31 17:22 ` Christoph Lameter
4 siblings, 0 replies; 5+ messages in thread
From: Christoph Lameter @ 2013-07-31 17:22 UTC (permalink / raw)
To: Pekka Enberg; +Cc: Joonsoo Kim, Glauber Costa, linux-mm, David Rientjes
I do not see any user for this code in the tree.
Signed-off-by: Christoph Lameter <cl@linux.com>
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h 2013-07-19 09:08:35.184029519 -0500
+++ linux/include/linux/slub_def.h 2013-07-19 09:08:35.180029449 -0500
@@ -98,17 +98,4 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
- return true;
-}
-#endif
-
#endif /* _LINUX_SLUB_DEF_H */
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c 2013-07-19 09:06:36.000000000 -0500
+++ linux/mm/slub.c 2013-07-19 09:09:02.492486583 -0500
@@ -3319,42 +3319,6 @@ size_t ksize(const void *object)
}
EXPORT_SYMBOL(ksize);
-#ifdef CONFIG_SLUB_DEBUG
-bool verify_mem_not_deleted(const void *x)
-{
- struct page *page;
- void *object = (void *)x;
- unsigned long flags;
- bool rv;
-
- if (unlikely(ZERO_OR_NULL_PTR(x)))
- return false;
-
- local_irq_save(flags);
-
- page = virt_to_head_page(x);
- if (unlikely(!PageSlab(page))) {
- /* maybe it was from stack? */
- rv = true;
- goto out_unlock;
- }
-
- slab_lock(page);
- if (on_freelist(page->slab_cache, page, object)) {
- object_err(page->slab_cache, page, object, "Object is on free-list");
- rv = false;
- } else {
- rv = true;
- }
- slab_unlock(page);
-
-out_unlock:
- local_irq_restore(flags);
- return rv;
-}
-EXPORT_SYMBOL(verify_mem_not_deleted);
-#endif
-
void kfree(const void *x)
{
struct page *page;
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2013-07-31 17:22 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20130731171257.629155011@linux.com>
2013-07-31 17:12 ` [3.12 1/5] Move kmalloc_node functions to common code Christoph Lameter
2013-07-31 17:12 ` [3.12 2/5] Move kmalloc definitions to slab.h Christoph Lameter
2013-07-31 17:22 ` [3.12 3/5] slabs: Remove unnecessary #includes Christoph Lameter
2013-07-31 17:22 ` [3.12 5/5] seq_file: Use kmalloc_large for page sized allocation Christoph Lameter
2013-07-31 17:22 ` [3.12 4/5] slub: remove verify_mem_not_deleted() Christoph Lameter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox