linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
@ 2013-07-12  1:43 Chen Gang
  2013-07-12  3:27 ` [PATCH] mm/slub.c: beautify code of this file Chen Gang
  2013-07-12 13:53 ` [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Christoph Lameter
  0 siblings, 2 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-12  1:43 UTC (permalink / raw)
  To: cl, Pekka Enberg, mpm; +Cc: linux-mm

Since all values which can be assigned to 'slub_debug' are 'unsigned
long', recommend also to define 'slub_debug' as 'unsigned long' to
match the type precisely

e.g DEBUG_DEFAULT_FLAGS, SLAB_TRACE, SLAB_FAILSLAB ...


Signed-off-by: Chen Gang <gang.chen@asianux.com>
---
 mm/slub.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 2b02d66..4fd8c50 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -452,9 +452,9 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
  * Debug settings:
  */
 #ifdef CONFIG_SLUB_DEBUG_ON
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static unsigned long slub_debug = DEBUG_DEFAULT_FLAGS;
 #else
-static int slub_debug;
+static unsigned long slub_debug;
 #endif
 
 static char *slub_debug_slabs;
-- 
1.7.7.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH] mm/slub.c: beautify code of this file
  2013-07-12  1:43 [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Chen Gang
@ 2013-07-12  3:27 ` Chen Gang
  2013-07-12 13:58   ` Christoph Lameter
  2013-07-12 13:53 ` [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Christoph Lameter
  1 sibling, 1 reply; 22+ messages in thread
From: Chen Gang @ 2013-07-12  3:27 UTC (permalink / raw)
  To: cl, Pekka Enberg, mpm; +Cc: linux-mm

Be sure of 80 column limitation for both code and comments.

Correct tab alignment for 'if-else' statement.

Remove redundancy 'break' statement.

Remove useless BUG_ON(), since it can never happen.


Signed-off-by: Chen Gang <gang.chen@asianux.com>
---
 mm/slub.c |   94 ++++++++++++++++++++++++++++++++++++------------------------
 1 files changed, 56 insertions(+), 38 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 2b02d66..2b02d64 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
 #endif
 	{
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
 		local_irq_save(flags);
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-		page, page->objects, page->inuse, page->freelist, page->flags);
+	printk(KERN_ERR
+	       "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+	       page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
 	print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+			const char *fmt, ...)
 {
 	va_list args;
 	char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
 	} else {
 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 			check_bytes_and_report(s, page, p, "Alignment padding",
-				endobject, POISON_INUSE, s->inuse - s->object_size);
+				endobject, POISON_INUSE,
+				s->inuse - s->object_size);
 		}
 	}
 
@@ -881,7 +886,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 				slab_fix(s, "Freelist cleared");
 				return 0;
 			}
-			break;
 		}
 		object = fp;
 		fp = get_freepointer(s, object);
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object, s->object_size);
+			print_section("Object ", (void *)object,
+					s->object_size);
 
 		dump_stack();
 	}
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
 	return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+					gfp_t flags, void *object)
 {
 	flags &= gfp_allowed_mask;
 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
 	init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+					struct page *page,
 					void *object, unsigned long addr)
 {
 	if (!check_slab(s, page))
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+				void *freelist)
 {
 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2002,7 +2010,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 		page->pobjects = pobjects;
 		page->next = oldpage;
 
-	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+								!= oldpage);
 #endif
 }
 
@@ -2172,8 +2181,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2317,7 +2326,8 @@ new_slab:
 		goto load_freelist;
 
 	/* Only entered in the debug case */
-	if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+	if (kmem_cache_debug(s) &&
+			!alloc_debug_processing(s, page, freelist, addr))
 		goto new_slab;	/* Slab failed checks. Next slab needed */
 
 	deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2385,13 +2395,15 @@ redo:
 		 * The cmpxchg will only match if there was no additional
 		 * operation and if we are on the right processor.
 		 *
-		 * The cmpxchg does the following atomically (without lock semantics!)
+		 * The cmpxchg does the following atomically (without lock
+		 * semantics!)
 		 * 1. Relocate first pointer to the current per cpu area.
 		 * 2. Verify that tid and freelist have not been changed
 		 * 3. If they were not changed replace tid and freelist
 		 *
-		 * Since this is without lock semantics the protection is only against
-		 * code executing on this cpu *not* from access by other cpus.
+		 * Since this is without lock semantics the protection is only
+		 * against code executing on this cpu *not* from access by
+		 * other cpus.
 		 */
 		if (unlikely(!this_cpu_cmpxchg_double(
 				s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2423,7 +2435,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+				s->size, gfpflags);
 
 	return ret;
 }
@@ -2515,8 +2528,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 			if (kmem_cache_has_cpu_partial(s) && !prior)
 
 				/*
-				 * Slab was on no list before and will be partially empty
-				 * We can defer the list move and instead freeze it.
+				 * Slab was on no list before and will be
+				 * partially empty
+				 * We can defer the list move and instead
+				 * freeze it.
 				 */
 				new.frozen = 1;
 
@@ -3074,8 +3089,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
 	 * A) The number of objects from per cpu partial slabs dumped to the
 	 *    per node list when we reach the limit.
 	 * B) The number of objects in cpu partial slabs to extract from the
-	 *    per node list when we run out of per cpu objects. We only fetch 50%
-	 *    to keep some capacity around for frees.
+	 *    per node list when we run out of per cpu objects. We only fetch
+	 *    50% to keep some capacity around for frees.
 	 */
 	if (!kmem_cache_has_cpu_partial(s))
 		s->cpu_partial = 0;
@@ -3102,8 +3117,8 @@ error:
 	if (flags & SLAB_PANIC)
 		panic("Cannot create slab %s size=%lu realsize=%u "
 			"order=%u offset=%u flags=%lx\n",
-			s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-			s->offset, flags);
+			s->name, (unsigned long)s->size, s->size,
+			oo_order(s->oo), s->offset, flags);
 	return -EINVAL;
 }
 
@@ -3341,7 +3356,8 @@ bool verify_mem_not_deleted(const void *x)
 
 	slab_lock(page);
 	if (on_freelist(page->slab_cache, page, object)) {
-		object_err(page->slab_cache, page, object, "Object is on free-list");
+		object_err(page->slab_cache, page, object,
+				"Object is on free-list");
 		rv = false;
 	} else {
 		rv = true;
@@ -4165,15 +4181,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
 				!cpumask_empty(to_cpumask(l->cpus)) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " cpus=");
-			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+			len += cpulist_scnprintf(buf + len,
+						 PAGE_SIZE - len - 50,
 						 to_cpumask(l->cpus));
 		}
 
 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " nodes=");
-			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-					l->nodes);
+			len += nodelist_scnprintf(buf + len,
+						  PAGE_SIZE - len - 50,
+						  l->nodes);
 		}
 
 		len += sprintf(buf + len, "\n");
@@ -4282,7 +4300,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		int cpu;
 
 		for_each_possible_cpu(cpu) {
-			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+							       cpu);
 			int node;
 			struct page *page;
 
@@ -4318,12 +4337,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		for_each_node_state(node, N_NORMAL_MEMORY) {
 			struct kmem_cache_node *n = get_node(s, node);
 
-		if (flags & SO_TOTAL)
-			x = atomic_long_read(&n->total_objects);
-		else if (flags & SO_OBJECTS)
-			x = atomic_long_read(&n->total_objects) -
-				count_partial(n, count_free);
-
+			if (flags & SO_TOTAL)
+				x = atomic_long_read(&n->total_objects);
+			else if (flags & SO_OBJECTS)
+				x = atomic_long_read(&n->total_objects) -
+					count_partial(n, count_free);
 			else
 				x = atomic_long_read(&n->nr_slabs);
 			total += x;
@@ -5139,10 +5157,10 @@ static char *create_unique_id(struct kmem_cache *s)
 
 #ifdef CONFIG_MEMCG_KMEM
 	if (!is_root_cache(s))
-		p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+		p += sprintf(p, "-%08d",
+				memcg_cache_id(s->memcg_params->memcg));
 #endif
 
-	BUG_ON(p > name + ID_STR_LENGTH - 1);
 	return name;
 }
 
-- 
1.7.7.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-12  1:43 [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Chen Gang
  2013-07-12  3:27 ` [PATCH] mm/slub.c: beautify code of this file Chen Gang
@ 2013-07-12 13:53 ` Christoph Lameter
  2013-07-15  0:19   ` Chen Gang
  1 sibling, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-12 13:53 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Fri, 12 Jul 2013, Chen Gang wrote:

> Since all values which can be assigned to 'slub_debug' are 'unsigned
> long', recommend also to define 'slub_debug' as 'unsigned long' to
> match the type precisely

The bit definitions in slab.h as well as slub.c all assume that these are
32 bit entities. See f.e. the defition of the internal slub flags:

/* Internal SLUB flags */
#define __OBJECT_POISON         0x80000000UL /* Poison object */
#define __CMPXCHG_DOUBLE        0x40000000UL /* Use cmpxchg_double */


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: beautify code of this file
  2013-07-12  3:27 ` [PATCH] mm/slub.c: beautify code of this file Chen Gang
@ 2013-07-12 13:58   ` Christoph Lameter
  2013-07-15  0:31     ` Chen Gang
                       ` (3 more replies)
  0 siblings, 4 replies; 22+ messages in thread
From: Christoph Lameter @ 2013-07-12 13:58 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Fri, 12 Jul 2013, Chen Gang wrote:

> Be sure of 80 column limitation for both code and comments.
> Correct tab alignment for 'if-else' statement.

Thanks.

> Remove redundancy 'break' statement.

Hmm... I'd rather have the first break removed.

> Remove useless BUG_ON(), since it can never happen.

It may happen if more code is added to that function. Recently the cgroups
thing was added f.e.

Could you separate this out into multiple patches that each do one thing
only?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-12 13:53 ` [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Christoph Lameter
@ 2013-07-15  0:19   ` Chen Gang
  2013-07-15 14:17     ` Christoph Lameter
  0 siblings, 1 reply; 22+ messages in thread
From: Chen Gang @ 2013-07-15  0:19 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/12/2013 09:53 PM, Christoph Lameter wrote:
> On Fri, 12 Jul 2013, Chen Gang wrote:
> 
>> Since all values which can be assigned to 'slub_debug' are 'unsigned
>> long', recommend also to define 'slub_debug' as 'unsigned long' to
>> match the type precisely
> 
> The bit definitions in slab.h as well as slub.c all assume that these are
> 32 bit entities. See f.e. the defition of the internal slub flags:
> 
> /* Internal SLUB flags */
> #define __OBJECT_POISON         0x80000000UL /* Poison object */
> #define __CMPXCHG_DOUBLE        0x40000000UL /* Use cmpxchg_double */
> 

As far as I know, 'UL' means "unsigned long", is it correct ?

Thanks.
-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: beautify code of this file
  2013-07-12 13:58   ` Christoph Lameter
@ 2013-07-15  0:31     ` Chen Gang
  2013-07-15  1:04     ` [PATCH 0/2] " Chen Gang
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-15  0:31 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/12/2013 09:58 PM, Christoph Lameter wrote:
> On Fri, 12 Jul 2013, Chen Gang wrote:
> 
>> Be sure of 80 column limitation for both code and comments.
>> Correct tab alignment for 'if-else' statement.
> 
> Thanks.
> 

Thank you too.

>> Remove redundancy 'break' statement.
> 
> Hmm... I'd rather have the first break removed.
> 

Yeah, that will be better for coding and understanding. I will send
patch v2 for it.

>> Remove useless BUG_ON(), since it can never happen.
> 
> It may happen if more code is added to that function. Recently the cgroups
> thing was added f.e.
> 

Hmm... at least what you said is reasonable. I need respect the related
maintainers' willing and opinions (e.g. you for "slub.c").

> Could you separate this out into multiple patches that each do one thing
> only?
> 

OK, thanks. I will do.

Originally, I think these are minor, I should do with them in a patch,
so can save maintainers' time resource (now, I know it is incorrect).

:-)


Thanks.
-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 0/2] mm/slub.c: beautify code of this file
  2013-07-12 13:58   ` Christoph Lameter
  2013-07-15  0:31     ` Chen Gang
@ 2013-07-15  1:04     ` Chen Gang
  2013-07-15  1:05     ` [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment Chen Gang
  2013-07-15  1:06     ` [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement Chen Gang
  3 siblings, 0 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-15  1:04 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

Beautify code for 80 column limitation and tab alignment (patch 1/2).

Also remove redundancy 'break' statement (patch 2/2).

---
 mm/slub.c |   93 ++++++++++++++++++++++++++++++++++++------------------------
 1 files changed, 56 insertions(+), 37 deletions(-)

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment.
  2013-07-12 13:58   ` Christoph Lameter
  2013-07-15  0:31     ` Chen Gang
  2013-07-15  1:04     ` [PATCH 0/2] " Chen Gang
@ 2013-07-15  1:05     ` Chen Gang
  2013-07-15 14:19       ` Christoph Lameter
  2013-07-15  1:06     ` [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement Chen Gang
  3 siblings, 1 reply; 22+ messages in thread
From: Chen Gang @ 2013-07-15  1:05 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

Be sure of 80 column limitation for both code and comments.

Correct tab alignment for 'if-else' statement.

Signed-off-by: Chen Gang <gang.chen@asianux.com>
---
 mm/slub.c |   92 +++++++++++++++++++++++++++++++++++++------------------------
 1 files changed, 56 insertions(+), 36 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 2b02d66..05ab2d5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
 #endif
 	{
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
 
 		local_irq_save(flags);
 		slab_lock(page);
-		if (page->freelist == freelist_old && page->counters == counters_old) {
+		if (page->freelist == freelist_old &&
+					page->counters == counters_old) {
 			page->freelist = freelist_new;
 			page->counters = counters_new;
 			slab_unlock(page);
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-	printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
-		page, page->objects, page->inuse, page->freelist, page->flags);
+	printk(KERN_ERR
+	       "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+	       page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
 	print_trailer(s, page, object);
 }
 
-static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
+static void slab_err(struct kmem_cache *s, struct page *page,
+			const char *fmt, ...)
 {
 	va_list args;
 	char buf[100];
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
 	} else {
 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
 			check_bytes_and_report(s, page, p, "Alignment padding",
-				endobject, POISON_INUSE, s->inuse - s->object_size);
+				endobject, POISON_INUSE,
+				s->inuse - s->object_size);
 		}
 	}
 
@@ -918,7 +923,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object, s->object_size);
+			print_section("Object ", (void *)object,
+					s->object_size);
 
 		dump_stack();
 	}
@@ -937,7 +943,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
 	return should_failslab(s->object_size, flags, s->flags);
 }
 
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+					gfp_t flags, void *object)
 {
 	flags &= gfp_allowed_mask;
 	kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
@@ -1039,7 +1046,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
 	init_tracking(s, object);
 }
 
-static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s,
+					struct page *page,
 					void *object, unsigned long addr)
 {
 	if (!check_slab(s, page))
@@ -1743,7 +1751,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
 /*
  * Remove the cpu slab
  */
-static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
+static void deactivate_slab(struct kmem_cache *s, struct page *page,
+				void *freelist)
 {
 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2002,7 +2011,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 		page->pobjects = pobjects;
 		page->next = oldpage;
 
-	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
+								!= oldpage);
 #endif
 }
 
@@ -2172,8 +2182,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
 }
 
 /*
- * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
- * or deactivate the page.
+ * Check the page->freelist of a page and either transfer the freelist to the
+ * per cpu freelist or deactivate the page.
  *
  * The page is still frozen if the return value is not NULL.
  *
@@ -2317,7 +2327,8 @@ new_slab:
 		goto load_freelist;
 
 	/* Only entered in the debug case */
-	if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
+	if (kmem_cache_debug(s) &&
+			!alloc_debug_processing(s, page, freelist, addr))
 		goto new_slab;	/* Slab failed checks. Next slab needed */
 
 	deactivate_slab(s, page, get_freepointer(s, freelist));
@@ -2385,13 +2396,15 @@ redo:
 		 * The cmpxchg will only match if there was no additional
 		 * operation and if we are on the right processor.
 		 *
-		 * The cmpxchg does the following atomically (without lock semantics!)
+		 * The cmpxchg does the following atomically (without lock
+		 * semantics!)
 		 * 1. Relocate first pointer to the current per cpu area.
 		 * 2. Verify that tid and freelist have not been changed
 		 * 3. If they were not changed replace tid and freelist
 		 *
-		 * Since this is without lock semantics the protection is only against
-		 * code executing on this cpu *not* from access by other cpus.
+		 * Since this is without lock semantics the protection is only
+		 * against code executing on this cpu *not* from access by
+		 * other cpus.
 		 */
 		if (unlikely(!this_cpu_cmpxchg_double(
 				s->cpu_slab->freelist, s->cpu_slab->tid,
@@ -2423,7 +2436,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
 
-	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
+	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
+				s->size, gfpflags);
 
 	return ret;
 }
@@ -2515,8 +2529,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 			if (kmem_cache_has_cpu_partial(s) && !prior)
 
 				/*
-				 * Slab was on no list before and will be partially empty
-				 * We can defer the list move and instead freeze it.
+				 * Slab was on no list before and will be
+				 * partially empty
+				 * We can defer the list move and instead
+				 * freeze it.
 				 */
 				new.frozen = 1;
 
@@ -3074,8 +3090,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
 	 * A) The number of objects from per cpu partial slabs dumped to the
 	 *    per node list when we reach the limit.
 	 * B) The number of objects in cpu partial slabs to extract from the
-	 *    per node list when we run out of per cpu objects. We only fetch 50%
-	 *    to keep some capacity around for frees.
+	 *    per node list when we run out of per cpu objects. We only fetch
+	 *    50% to keep some capacity around for frees.
 	 */
 	if (!kmem_cache_has_cpu_partial(s))
 		s->cpu_partial = 0;
@@ -3102,8 +3118,8 @@ error:
 	if (flags & SLAB_PANIC)
 		panic("Cannot create slab %s size=%lu realsize=%u "
 			"order=%u offset=%u flags=%lx\n",
-			s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
-			s->offset, flags);
+			s->name, (unsigned long)s->size, s->size,
+			oo_order(s->oo), s->offset, flags);
 	return -EINVAL;
 }
 
@@ -3341,7 +3357,8 @@ bool verify_mem_not_deleted(const void *x)
 
 	slab_lock(page);
 	if (on_freelist(page->slab_cache, page, object)) {
-		object_err(page->slab_cache, page, object, "Object is on free-list");
+		object_err(page->slab_cache, page, object,
+				"Object is on free-list");
 		rv = false;
 	} else {
 		rv = true;
@@ -4165,15 +4182,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
 				!cpumask_empty(to_cpumask(l->cpus)) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " cpus=");
-			len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
+			len += cpulist_scnprintf(buf + len,
+						 PAGE_SIZE - len - 50,
 						 to_cpumask(l->cpus));
 		}
 
 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
 				len < PAGE_SIZE - 60) {
 			len += sprintf(buf + len, " nodes=");
-			len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-					l->nodes);
+			len += nodelist_scnprintf(buf + len,
+						  PAGE_SIZE - len - 50,
+						  l->nodes);
 		}
 
 		len += sprintf(buf + len, "\n");
@@ -4282,7 +4301,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		int cpu;
 
 		for_each_possible_cpu(cpu) {
-			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
+							       cpu);
 			int node;
 			struct page *page;
 
@@ -4318,12 +4338,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 		for_each_node_state(node, N_NORMAL_MEMORY) {
 			struct kmem_cache_node *n = get_node(s, node);
 
-		if (flags & SO_TOTAL)
-			x = atomic_long_read(&n->total_objects);
-		else if (flags & SO_OBJECTS)
-			x = atomic_long_read(&n->total_objects) -
-				count_partial(n, count_free);
-
+			if (flags & SO_TOTAL)
+				x = atomic_long_read(&n->total_objects);
+			else if (flags & SO_OBJECTS)
+				x = atomic_long_read(&n->total_objects) -
+					count_partial(n, count_free);
 			else
 				x = atomic_long_read(&n->nr_slabs);
 			total += x;
@@ -5139,7 +5158,8 @@ static char *create_unique_id(struct kmem_cache *s)
 
 #ifdef CONFIG_MEMCG_KMEM
 	if (!is_root_cache(s))
-		p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
+		p += sprintf(p, "-%08d",
+				memcg_cache_id(s->memcg_params->memcg));
 #endif
 
 	BUG_ON(p > name + ID_STR_LENGTH - 1);
-- 
1.7.7.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement.
  2013-07-12 13:58   ` Christoph Lameter
                       ` (2 preceding siblings ...)
  2013-07-15  1:05     ` [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment Chen Gang
@ 2013-07-15  1:06     ` Chen Gang
  2013-07-17  7:08       ` Pekka Enberg
  2013-07-17 14:47       ` Christoph Lameter
  3 siblings, 2 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-15  1:06 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

Remove redundancy 'break' statement.

Signed-off-by: Chen Gang <gang.chen@asianux.com>
---
 mm/slub.c |    1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 05ab2d5..db93fa4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -878,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 				object_err(s, page, object,
 					"Freechain corrupt");
 				set_freepointer(s, object, NULL);
-				break;
 			} else {
 				slab_err(s, page, "Freepointer corrupt");
 				page->freelist = NULL;
-- 
1.7.7.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-15  0:19   ` Chen Gang
@ 2013-07-15 14:17     ` Christoph Lameter
  2013-07-16  0:53       ` Chen Gang
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-15 14:17 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Mon, 15 Jul 2013, Chen Gang wrote:

> On 07/12/2013 09:53 PM, Christoph Lameter wrote:
> > On Fri, 12 Jul 2013, Chen Gang wrote:
> >
> >> Since all values which can be assigned to 'slub_debug' are 'unsigned
> >> long', recommend also to define 'slub_debug' as 'unsigned long' to
> >> match the type precisely
> >
> > The bit definitions in slab.h as well as slub.c all assume that these are
> > 32 bit entities. See f.e. the defition of the internal slub flags:
> >
> > /* Internal SLUB flags */
> > #define __OBJECT_POISON         0x80000000UL /* Poison object */
> > #define __CMPXCHG_DOUBLE        0x40000000UL /* Use cmpxchg_double */
> >
>
> As far as I know, 'UL' means "unsigned long", is it correct ?

This is the way hex constants are generally specified.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment.
  2013-07-15  1:05     ` [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment Chen Gang
@ 2013-07-15 14:19       ` Christoph Lameter
  2013-07-16  0:55         ` Chen Gang
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-15 14:19 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Mon, 15 Jul 2013, Chen Gang wrote:

> Be sure of 80 column limitation for both code and comments.

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-15 14:17     ` Christoph Lameter
@ 2013-07-16  0:53       ` Chen Gang
  2013-07-17 14:46         ` Christoph Lameter
  0 siblings, 1 reply; 22+ messages in thread
From: Chen Gang @ 2013-07-16  0:53 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/15/2013 10:17 PM, Christoph Lameter wrote:
> On Mon, 15 Jul 2013, Chen Gang wrote:
> 
>> > On 07/12/2013 09:53 PM, Christoph Lameter wrote:
>>> > > On Fri, 12 Jul 2013, Chen Gang wrote:
>>> > >
>>>> > >> Since all values which can be assigned to 'slub_debug' are 'unsigned
>>>> > >> long', recommend also to define 'slub_debug' as 'unsigned long' to
>>>> > >> match the type precisely
>>> > >
>>> > > The bit definitions in slab.h as well as slub.c all assume that these are
>>> > > 32 bit entities. See f.e. the defition of the internal slub flags:
>>> > >
>>> > > /* Internal SLUB flags */
>>> > > #define __OBJECT_POISON         0x80000000UL /* Poison object */
>>> > > #define __CMPXCHG_DOUBLE        0x40000000UL /* Use cmpxchg_double */
>>> > >
>> >
>> > As far as I know, 'UL' means "unsigned long", is it correct ?
> This is the way hex constants are generally specified.
> 
> 

The C compiler will treat 'UL' as "unsigned long".

If we really use 32-bit as unsigned number, better to use 'U' instead of
'UL' (e.g. 0x80000000U instead of 0x80000000UL).

Since it is unsigned 32-bit number, it is better to use 'unsigned int'
instead of 'int', which can avoid related warnings if "EXTRA_CFLAGS=-W".


Thanks.
-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment.
  2013-07-15 14:19       ` Christoph Lameter
@ 2013-07-16  0:55         ` Chen Gang
  0 siblings, 0 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-16  0:55 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/15/2013 10:19 PM, Christoph Lameter wrote:
> On Mon, 15 Jul 2013, Chen Gang wrote:
> 
>> Be sure of 80 column limitation for both code and comments.
> 
> Acked-by: Christoph Lameter <cl@linux.com>
> 
> 
> 

Thanks.  :-)

-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement.
  2013-07-15  1:06     ` [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement Chen Gang
@ 2013-07-17  7:08       ` Pekka Enberg
  2013-07-17 14:47       ` Christoph Lameter
  1 sibling, 0 replies; 22+ messages in thread
From: Pekka Enberg @ 2013-07-17  7:08 UTC (permalink / raw)
  To: Chen Gang; +Cc: Christoph Lameter, Matt Mackall, linux-mm

On Mon, Jul 15, 2013 at 4:06 AM, Chen Gang <gang.chen@asianux.com> wrote:
> Remove redundancy 'break' statement.
>
> Signed-off-by: Chen Gang <gang.chen@asianux.com>

Christoph?

> ---
>  mm/slub.c |    1 -
>  1 files changed, 0 insertions(+), 1 deletions(-)
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 05ab2d5..db93fa4 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -878,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
>                                 object_err(s, page, object,
>                                         "Freechain corrupt");
>                                 set_freepointer(s, object, NULL);
> -                               break;
>                         } else {
>                                 slab_err(s, page, "Freepointer corrupt");
>                                 page->freelist = NULL;
> --
> 1.7.7.6
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-16  0:53       ` Chen Gang
@ 2013-07-17 14:46         ` Christoph Lameter
  2013-07-18  0:13           ` Chen Gang
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-17 14:46 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Tue, 16 Jul 2013, Chen Gang wrote:

> If we really use 32-bit as unsigned number, better to use 'U' instead of
> 'UL' (e.g. 0x80000000U instead of 0x80000000UL).
>
> Since it is unsigned 32-bit number, it is better to use 'unsigned int'
> instead of 'int', which can avoid related warnings if "EXTRA_CFLAGS=-W".

Ok could you go through the kernel source and change that?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement.
  2013-07-15  1:06     ` [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement Chen Gang
  2013-07-17  7:08       ` Pekka Enberg
@ 2013-07-17 14:47       ` Christoph Lameter
  2013-07-18  0:02         ` Chen Gang
  1 sibling, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-17 14:47 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Mon, 15 Jul 2013, Chen Gang wrote:

> Remove redundancy 'break' statement.

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement.
  2013-07-17 14:47       ` Christoph Lameter
@ 2013-07-18  0:02         ` Chen Gang
  0 siblings, 0 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-18  0:02 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/17/2013 10:47 PM, Christoph Lameter wrote:
> On Mon, 15 Jul 2013, Chen Gang wrote:
> 
>> Remove redundancy 'break' statement.
> 
> Acked-by: Christoph Lameter <cl@linux.com>
> 
> 

Thanks.

-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-17 14:46         ` Christoph Lameter
@ 2013-07-18  0:13           ` Chen Gang
  2013-07-18 13:42             ` Christoph Lameter
  0 siblings, 1 reply; 22+ messages in thread
From: Chen Gang @ 2013-07-18  0:13 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Pekka Enberg, mpm, linux-mm

On 07/17/2013 10:46 PM, Christoph Lameter wrote:
> On Tue, 16 Jul 2013, Chen Gang wrote:
> 
>> If we really use 32-bit as unsigned number, better to use 'U' instead of
>> 'UL' (e.g. 0x80000000U instead of 0x80000000UL).
>>
>> Since it is unsigned 32-bit number, it is better to use 'unsigned int'
>> instead of 'int', which can avoid related warnings if "EXTRA_CFLAGS=-W".
> 
> Ok could you go through the kernel source and change that?
> 

Yeah, thanks, I should do it.

Hmm... for each case of this issue, it need communicate with (review by)
various related maintainers.

So, I think one patch for one variable (and related macro contents) is
enough.

Is it OK ?

Thanks.
-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-18  0:13           ` Chen Gang
@ 2013-07-18 13:42             ` Christoph Lameter
  2013-07-19  0:50               ` Chen Gang F T
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-18 13:42 UTC (permalink / raw)
  To: Chen Gang; +Cc: Pekka Enberg, mpm, linux-mm

On Thu, 18 Jul 2013, Chen Gang wrote:

> On 07/17/2013 10:46 PM, Christoph Lameter wrote:
> > On Tue, 16 Jul 2013, Chen Gang wrote:
> >
> >> If we really use 32-bit as unsigned number, better to use 'U' instead of
> >> 'UL' (e.g. 0x80000000U instead of 0x80000000UL).
> >>
> >> Since it is unsigned 32-bit number, it is better to use 'unsigned int'
> >> instead of 'int', which can avoid related warnings if "EXTRA_CFLAGS=-W".
> >
> > Ok could you go through the kernel source and change that?
> >
>
> Yeah, thanks, I should do it.
>
> Hmm... for each case of this issue, it need communicate with (review by)
> various related maintainers.
>
> So, I think one patch for one variable (and related macro contents) is
> enough.
>
> Is it OK ?

The fundamental issue is that typically ints are used for flags and I
would like to keep it that way. Changing the constants in slab.h and the
allocator code to be unsigned int instead of unsigned long wont be that
much of a deal.

Will the code then be clean enough for you?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-18 13:42             ` Christoph Lameter
@ 2013-07-19  0:50               ` Chen Gang F T
  2013-07-19 14:00                 ` Christoph Lameter
  0 siblings, 1 reply; 22+ messages in thread
From: Chen Gang F T @ 2013-07-19  0:50 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Chen Gang, Pekka Enberg, mpm, linux-mm

On 07/18/2013 09:42 PM, Christoph Lameter wrote:
> On Thu, 18 Jul 2013, Chen Gang wrote:
> 
>> On 07/17/2013 10:46 PM, Christoph Lameter wrote:
>>> On Tue, 16 Jul 2013, Chen Gang wrote:
>>>
>>>> If we really use 32-bit as unsigned number, better to use 'U' instead of
>>>> 'UL' (e.g. 0x80000000U instead of 0x80000000UL).
>>>>
>>>> Since it is unsigned 32-bit number, it is better to use 'unsigned int'
>>>> instead of 'int', which can avoid related warnings if "EXTRA_CFLAGS=-W".
>>>
>>> Ok could you go through the kernel source and change that?
>>>
>>
>> Yeah, thanks, I should do it.
>>
>> Hmm... for each case of this issue, it need communicate with (review by)
>> various related maintainers.
>>
>> So, I think one patch for one variable (and related macro contents) is
>> enough.
>>
>> Is it OK ?
> 
> The fundamental issue is that typically ints are used for flags and I
> would like to keep it that way. Changing the constants in slab.h and the
> allocator code to be unsigned int instead of unsigned long wont be that
> much of a deal.
>

At least, we need use 'unsigned' instead of 'signed'.

e.g.
----------------------------diff begin---------------------------------

diff --git a/mm/slub.c b/mm/slub.c
index 2b02d66..7111d7a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -452,9 +452,9 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
  * Debug settings:
  */
 #ifdef CONFIG_SLUB_DEBUG_ON
-static int slub_debug = DEBUG_DEFAULT_FLAGS;
+static unsigned int slub_debug = DEBUG_DEFAULT_FLAGS;
 #else
-static int slub_debug;
+static unsigned int slub_debug;
 #endif
 
 static char *slub_debug_slabs;

----------------------------diff end-----------------------------------
 
> Will the code then be clean enough for you?
> 

Hmm... Things maybe seem more complex, please see bellow:

For 'SLAB_RED_ZONE' (or the other constants), they also can be assigned
to "struct kmem_cache" member variable 'flags'.

But for "struct kmem_cache", it has 2 different definitions, they share
with the 'SLAB_RED_ZONE' (or the other constants).

One defines 'flags' as 'unsigned int' in "include/linux/slab_def.h"

 16 /*
 17  * struct kmem_cache
 18  *
 19  * manages a cache.
 20  */
 21 
 22 struct kmem_cache {
 23 /* 1) Cache tunables. Protected by cache_chain_mutex */
 24         unsigned int batchcount;
 25         unsigned int limit;
 26         unsigned int shared;
 27 
 28         unsigned int size;
 29         u32 reciprocal_buffer_size;
 30 /* 2) touched by every alloc & free from the backend */
 31 
 32         unsigned int flags;             /* constant flags */
 33         unsigned int num;               /* # of objs per slab */
...

The other defines 'flags' as 'unsigned long' in "include/linux/slub_def.h"
(but from its comments, it even says it is for 'Slab' cache management !!)

 65 /*
 66  * Slab cache management.
 67  */
 68 struct kmem_cache {
 69         struct kmem_cache_cpu __percpu *cpu_slab;
 70         /* Used for retriving partial slabs etc */
 71         unsigned long flags;
 72         unsigned long min_partial;
 73         int size;               /* The size of an object including meta data */
 74         int object_size;        /* The size of an object without meta data */
 75         int offset;             /* Free pointer offset. */
 76         int cpu_partial;        /* Number of per cpu partial objects to keep around */
 77         struct kmem_cache_order_objects oo;
...


Maybe it is also related with our discussion ('unsigned int' or 'unsigned long') ?



> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 


-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-19  0:50               ` Chen Gang F T
@ 2013-07-19 14:00                 ` Christoph Lameter
  2013-07-22  1:19                   ` Chen Gang
  0 siblings, 1 reply; 22+ messages in thread
From: Christoph Lameter @ 2013-07-19 14:00 UTC (permalink / raw)
  To: Chen Gang F T; +Cc: Chen Gang, Pekka Enberg, mpm, linux-mm

On Fri, 19 Jul 2013, Chen Gang F T wrote:

> > The fundamental issue is that typically ints are used for flags and I
> > would like to keep it that way. Changing the constants in slab.h and the
> > allocator code to be unsigned int instead of unsigned long wont be that
> > much of a deal.
> >
>
> At least, we need use 'unsigned' instead of 'signed'.

Ok.

> Hmm... Things maybe seem more complex, please see bellow:
>
> For 'SLAB_RED_ZONE' (or the other constants), they also can be assigned
> to "struct kmem_cache" member variable 'flags'.
>
> But for "struct kmem_cache", it has 2 different definitions, they share
> with the 'SLAB_RED_ZONE' (or the other constants).
>
> One defines 'flags' as 'unsigned int' in "include/linux/slab_def.h"
>
>  16 /*
>  17  * struct kmem_cache
>  18  *
>  19  * manages a cache.
>  20  */
>  21
>  22 struct kmem_cache {
>  23 /* 1) Cache tunables. Protected by cache_chain_mutex */
>  24         unsigned int batchcount;
>  25         unsigned int limit;
>  26         unsigned int shared;
>  27
>  28         unsigned int size;
>  29         u32 reciprocal_buffer_size;
>  30 /* 2) touched by every alloc & free from the backend */
>  31
>  32         unsigned int flags;             /* constant flags */
>  33         unsigned int num;               /* # of objs per slab */
> ...
>
> The other defines 'flags' as 'unsigned long' in "include/linux/slub_def.h"
> (but from its comments, it even says it is for 'Slab' cache management !!)

SLUB is slab allocator so there is nothing wrong with that.

> Maybe it is also related with our discussion ('unsigned int' or 'unsigned long') ?

Well we can make this uniformly unsigned int or long I guess. What would
be the benefits of one vs the other?

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug'
  2013-07-19 14:00                 ` Christoph Lameter
@ 2013-07-22  1:19                   ` Chen Gang
  0 siblings, 0 replies; 22+ messages in thread
From: Chen Gang @ 2013-07-22  1:19 UTC (permalink / raw)
  To: Christoph Lameter; +Cc: Chen Gang F T, Pekka Enberg, mpm, linux-mm

On 07/19/2013 10:00 PM, Christoph Lameter wrote:
> On Fri, 19 Jul 2013, Chen Gang F T wrote:
> 
>>> The fundamental issue is that typically ints are used for flags and I
>>> would like to keep it that way. Changing the constants in slab.h and the
>>> allocator code to be unsigned int instead of unsigned long wont be that
>>> much of a deal.
>>>
>>
>> At least, we need use 'unsigned' instead of 'signed'.
> 
> Ok.
> 
>> Hmm... Things maybe seem more complex, please see bellow:
>>
>> For 'SLAB_RED_ZONE' (or the other constants), they also can be assigned
>> to "struct kmem_cache" member variable 'flags'.
>>
>> But for "struct kmem_cache", it has 2 different definitions, they share
>> with the 'SLAB_RED_ZONE' (or the other constants).
>>
>> One defines 'flags' as 'unsigned int' in "include/linux/slab_def.h"
>>
>>  16 /*
>>  17  * struct kmem_cache
>>  18  *
>>  19  * manages a cache.
>>  20  */
>>  21
>>  22 struct kmem_cache {
>>  23 /* 1) Cache tunables. Protected by cache_chain_mutex */
>>  24         unsigned int batchcount;
>>  25         unsigned int limit;
>>  26         unsigned int shared;
>>  27
>>  28         unsigned int size;
>>  29         u32 reciprocal_buffer_size;
>>  30 /* 2) touched by every alloc & free from the backend */
>>  31
>>  32         unsigned int flags;             /* constant flags */
>>  33         unsigned int num;               /* # of objs per slab */
>> ...
>>
>> The other defines 'flags' as 'unsigned long' in "include/linux/slub_def.h"
>> (but from its comments, it even says it is for 'Slab' cache management !!)
> 
> SLUB is slab allocator so there is nothing wrong with that.
> 

OK, thanks.

>> Maybe it is also related with our discussion ('unsigned int' or 'unsigned long') ?
> 
> Well we can make this uniformly unsigned int or long I guess. What would
> be the benefits of one vs the other?
> 

Yeah, need let the 2 'flags' with the same type: "make this uniformly
unsigned int or long".


Hmm... Flags variable is always the solid length variable, so it is not
suitable to use 'unsigned long' which length depends on 32/64 machine
automatically.

If the 'unsigned int' is not enough, we need use 'unsigned long long'
instead of. Else (it's enough), better to still use 'unsigned int' to
save memory usage.

'unsigned long' is useful(necessary) for some variables (e.g. address
related variables), but is not suitable for the always solid length
variable.


Thanks.
-- 
Chen Gang

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2013-07-22  1:20 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-07-12  1:43 [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Chen Gang
2013-07-12  3:27 ` [PATCH] mm/slub.c: beautify code of this file Chen Gang
2013-07-12 13:58   ` Christoph Lameter
2013-07-15  0:31     ` Chen Gang
2013-07-15  1:04     ` [PATCH 0/2] " Chen Gang
2013-07-15  1:05     ` [PATCH 1/2] mm/slub.c: beautify code for 80 column limitation and tab alignment Chen Gang
2013-07-15 14:19       ` Christoph Lameter
2013-07-16  0:55         ` Chen Gang
2013-07-15  1:06     ` [PATCH 2/2] mm/slub.c: beautify code for removing redundancy 'break' statement Chen Gang
2013-07-17  7:08       ` Pekka Enberg
2013-07-17 14:47       ` Christoph Lameter
2013-07-18  0:02         ` Chen Gang
2013-07-12 13:53 ` [PATCH] mm/slub.c: use 'unsigned long' instead of 'int' for variable 'slub_debug' Christoph Lameter
2013-07-15  0:19   ` Chen Gang
2013-07-15 14:17     ` Christoph Lameter
2013-07-16  0:53       ` Chen Gang
2013-07-17 14:46         ` Christoph Lameter
2013-07-18  0:13           ` Chen Gang
2013-07-18 13:42             ` Christoph Lameter
2013-07-19  0:50               ` Chen Gang F T
2013-07-19 14:00                 ` Christoph Lameter
2013-07-22  1:19                   ` Chen Gang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox