From: Alexey Dobriyan <adobriyan@gmail.com>
To: akpm@linux-foundation.org
Cc: cl@linux.com, penberg@kernel.org, rientjes@google.com,
iamjoonsoo.kim@lge.com, linux-mm@kvack.org, adobriyan@gmail.com
Subject: [PATCH 23/25] slub: make struct kmem_cache_order_objects::x unsigned int
Date: Mon, 5 Mar 2018 23:07:28 +0300 [thread overview]
Message-ID: <20180305200730.15812-23-adobriyan@gmail.com> (raw)
In-Reply-To: <20180305200730.15812-1-adobriyan@gmail.com>
struct kmem_cache_order_objects is for mixing order and number of objects,
and orders aren't bit enough to warrant 64-bit width.
Propagate unsignedness down so that everything fits.
!!! Patch assumes that "PAGE_SIZE << order" doesn't overflow. !!!
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
---
include/linux/slub_def.h | 2 +-
mm/slub.c | 74 +++++++++++++++++++++++++-----------------------
2 files changed, 40 insertions(+), 36 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 623d6ba92036..3773e26c08c1 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -73,7 +73,7 @@ struct kmem_cache_cpu {
* given order would contain.
*/
struct kmem_cache_order_objects {
- unsigned long x;
+ unsigned int x;
};
/*
diff --git a/mm/slub.c b/mm/slub.c
index 5d367e0a64ca..9df658ee83fe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -316,13 +316,13 @@ static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
return (p - addr) / s->size;
}
-static inline int order_objects(int order, unsigned long size, int reserved)
+static inline unsigned int order_objects(unsigned int order, unsigned int size, unsigned int reserved)
{
- return ((PAGE_SIZE << order) - reserved) / size;
+ return (((unsigned int)PAGE_SIZE << order) - reserved) / size;
}
-static inline struct kmem_cache_order_objects oo_make(int order,
- unsigned long size, int reserved)
+static inline struct kmem_cache_order_objects oo_make(unsigned int order,
+ unsigned int size, unsigned int reserved)
{
struct kmem_cache_order_objects x = {
(order << OO_SHIFT) + order_objects(order, size, reserved)
@@ -331,12 +331,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
return x;
}
-static inline int oo_order(struct kmem_cache_order_objects x)
+static inline unsigned int oo_order(struct kmem_cache_order_objects x)
{
return x.x >> OO_SHIFT;
}
-static inline int oo_objects(struct kmem_cache_order_objects x)
+static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
{
return x.x & OO_MASK;
}
@@ -1435,7 +1435,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
gfp_t flags, int node, struct kmem_cache_order_objects oo)
{
struct page *page;
- int order = oo_order(oo);
+ unsigned int order = oo_order(oo);
if (node == NUMA_NO_NODE)
page = alloc_pages(flags, order);
@@ -1454,8 +1454,8 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
/* Pre-initialize the random sequence cache */
static int init_cache_random_seq(struct kmem_cache *s)
{
+ unsigned int count = oo_objects(s->oo);
int err;
- unsigned long i, count = oo_objects(s->oo);
/* Bailout if already initialised */
if (s->random_seq)
@@ -1470,6 +1470,8 @@ static int init_cache_random_seq(struct kmem_cache *s)
/* Transform to an offset on the set of pages */
if (s->random_seq) {
+ unsigned int i;
+
for (i = 0; i < count; i++)
s->random_seq[i] *= s->size;
}
@@ -2398,7 +2400,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
nid, gfpflags, &gfpflags);
- pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %d, min order: %d\n",
+ pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
s->name, s->object_size, s->size, oo_order(s->oo),
oo_order(s->min));
@@ -3181,9 +3183,9 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
* and increases the number of allocations possible without having to
* take the list_lock.
*/
-static int slub_min_order;
-static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
-static int slub_min_objects;
+static unsigned int slub_min_order;
+static unsigned int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+static unsigned int slub_min_objects;
/*
* Calculate the order of allocation given an slab object size.
@@ -3210,20 +3212,21 @@ static int slub_min_objects;
* requested a higher mininum order then we start with that one instead of
* the smallest order which will fit the object.
*/
-static inline int slab_order(int size, int min_objects,
- int max_order, int fract_leftover, int reserved)
+static inline unsigned int slab_order(unsigned int size,
+ unsigned int min_objects, unsigned int max_order,
+ unsigned int fract_leftover, unsigned int reserved)
{
- int order;
- int rem;
- int min_order = slub_min_order;
+ unsigned int min_order = slub_min_order;
+ unsigned int order;
if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
- for (order = max(min_order, get_order(min_objects * size + reserved));
+ for (order = max(min_order, (unsigned int)get_order(min_objects * size + reserved));
order <= max_order; order++) {
- unsigned long slab_size = PAGE_SIZE << order;
+ unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
+ unsigned int rem;
rem = (slab_size - reserved) % size;
@@ -3234,12 +3237,11 @@ static inline int slab_order(int size, int min_objects,
return order;
}
-static inline int calculate_order(int size, int reserved)
+static inline int calculate_order(unsigned int size, unsigned int reserved)
{
- int order;
- int min_objects;
- int fraction;
- int max_objects;
+ unsigned int order;
+ unsigned int min_objects;
+ unsigned int max_objects;
/*
* Attempt to find best configuration for a slab. This
@@ -3256,6 +3258,8 @@ static inline int calculate_order(int size, int reserved)
min_objects = min(min_objects, max_objects);
while (min_objects > 1) {
+ unsigned int fraction;
+
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
@@ -3458,7 +3462,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
slab_flags_t flags = s->flags;
unsigned int size = s->object_size;
- int order;
+ unsigned int order;
/*
* Round up object size to the next word boundary. We can only
@@ -3548,7 +3552,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
else
order = calculate_order(size, s->reserved);
- if (order < 0)
+ if ((int)order < 0)
return 0;
s->allocflags = 0;
@@ -3716,7 +3720,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
static int __init setup_slub_min_order(char *str)
{
- get_option(&str, &slub_min_order);
+ get_option(&str, (int *)&slub_min_order);
return 1;
}
@@ -3725,8 +3729,8 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
- get_option(&str, &slub_max_order);
- slub_max_order = min(slub_max_order, MAX_ORDER - 1);
+ get_option(&str, (int *)&slub_max_order);
+ slub_max_order = min(slub_max_order, (unsigned int)MAX_ORDER - 1);
return 1;
}
@@ -3735,7 +3739,7 @@ __setup("slub_max_order=", setup_slub_max_order);
static int __init setup_slub_min_objects(char *str)
{
- get_option(&str, &slub_min_objects);
+ get_option(&str, (int *)&slub_min_objects);
return 1;
}
@@ -4230,7 +4234,7 @@ void __init kmem_cache_init(void)
cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
slub_cpu_dead);
- pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%u, Nodes=%d\n",
+ pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%d\n",
cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
nr_cpu_ids, nr_node_ids);
@@ -4906,17 +4910,17 @@ SLAB_ATTR_RO(object_size);
static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", oo_objects(s->oo));
+ return sprintf(buf, "%u\n", oo_objects(s->oo));
}
SLAB_ATTR_RO(objs_per_slab);
static ssize_t order_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- unsigned long order;
+ unsigned int order;
int err;
- err = kstrtoul(buf, 10, &order);
+ err = kstrtouint(buf, 10, &order);
if (err)
return err;
@@ -4929,7 +4933,7 @@ static ssize_t order_store(struct kmem_cache *s,
static ssize_t order_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", oo_order(s->oo));
+ return sprintf(buf, "%u\n", oo_order(s->oo));
}
SLAB_ATTR(order);
--
2.16.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2018-03-05 20:08 UTC|newest]
Thread overview: 61+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-05 20:07 [PATCH 01/25] slab: fixup calculate_alignment() argument type Alexey Dobriyan
2018-03-05 20:07 ` [PATCH 02/25] slab: make kmalloc_index() return "unsigned int" Alexey Dobriyan
2018-03-06 18:24 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 03/25] slab: make kmalloc_size() " Alexey Dobriyan
2018-03-06 18:24 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 04/25] slab: make create_kmalloc_cache() work with 32-bit sizes Alexey Dobriyan
2018-03-06 18:32 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 05/25] slab: make create_boot_cache() " Alexey Dobriyan
2018-03-06 18:34 ` Christopher Lameter
2018-03-06 19:14 ` Matthew Wilcox
2018-03-05 20:07 ` [PATCH 06/25] slab: make kmem_cache_create() " Alexey Dobriyan
2018-03-06 18:37 ` Christopher Lameter
2018-04-05 21:48 ` Andrew Morton
2018-04-06 8:40 ` Alexey Dobriyan
2018-04-07 15:13 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 07/25] slab: make size_index[] array u8 Alexey Dobriyan
2018-03-06 18:38 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 08/25] slab: make size_index_elem() unsigned int Alexey Dobriyan
2018-03-06 18:39 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 09/25] slub: make ->remote_node_defrag_ratio " Alexey Dobriyan
2018-03-06 18:41 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 10/25] slub: make ->max_attr_size " Alexey Dobriyan
2018-03-06 18:42 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 11/25] slub: make ->red_left_pad " Alexey Dobriyan
2018-03-06 18:42 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 12/25] slub: make ->reserved " Alexey Dobriyan
2018-03-06 18:43 ` Christopher Lameter
2018-03-09 15:51 ` Alexey Dobriyan
2018-03-06 18:45 ` Matthew Wilcox
2018-03-09 22:42 ` Alexey Dobriyan
2018-03-05 20:07 ` [PATCH 13/25] slub: make ->align " Alexey Dobriyan
2018-03-06 18:43 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 14/25] slub: make ->inuse " Alexey Dobriyan
2018-03-06 18:44 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 15/25] slub: make ->cpu_partial " Alexey Dobriyan
2018-03-06 18:44 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 16/25] slub: make ->offset " Alexey Dobriyan
2018-03-06 18:45 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 17/25] slub: make ->object_size " Alexey Dobriyan
2018-03-06 18:45 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 18/25] slub: make ->size " Alexey Dobriyan
2018-03-06 18:46 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 19/25] slab: make kmem_cache_flags accept 32-bit object size Alexey Dobriyan
2018-03-06 18:47 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 20/25] kasan: make kasan_cache_create() work with 32-bit slab cache sizes Alexey Dobriyan
2018-03-05 20:07 ` [PATCH 21/25] slab: make usercopy region 32-bit Alexey Dobriyan
2018-03-05 20:07 ` [PATCH 22/25] slub: make slab_index() return unsigned int Alexey Dobriyan
2018-03-06 18:48 ` Christopher Lameter
2018-03-05 20:07 ` Alexey Dobriyan [this message]
2018-03-06 18:51 ` [PATCH 23/25] slub: make struct kmem_cache_order_objects::x " Christopher Lameter
2018-04-05 21:51 ` Andrew Morton
2018-04-06 18:02 ` Alexey Dobriyan
2018-04-07 15:18 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 24/25] slub: make size_from_object() return " Alexey Dobriyan
2018-03-06 18:52 ` Christopher Lameter
2018-03-05 20:07 ` [PATCH 25/25] slab: use 32-bit arithmetic in freelist_randomize() Alexey Dobriyan
2018-03-06 18:52 ` Christopher Lameter
2018-03-06 18:21 ` [PATCH 01/25] slab: fixup calculate_alignment() argument type Christopher Lameter
2018-04-10 20:25 ` Matthew Wilcox
2018-04-10 20:47 ` Alexey Dobriyan
2018-04-10 21:02 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180305200730.15812-23-adobriyan@gmail.com \
--to=adobriyan@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox