From: Vlastimil Babka <vbabka@suse.cz>
To: David Rientjes <rientjes@google.com>,
Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Alexander Potapenko <glider@google.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Dmitry Vyukov <dvyukov@google.com>,
Vincenzo Frascino <vincenzo.frascino@arm.com>,
Marco Elver <elver@google.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@kernel.org>,
Shakeel Butt <shakeelb@google.com>,
Muchun Song <muchun.song@linux.dev>,
Kees Cook <keescook@chromium.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
kasan-dev@googlegroups.com, cgroups@vger.kernel.org,
linux-hardening@vger.kernel.org,
Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v2 11/21] mm/slab: move the rest of slub_def.h to mm/slab.h
Date: Mon, 20 Nov 2023 19:34:22 +0100 [thread overview]
Message-ID: <20231120-slab-remove-slab-v2-11-9c9c70177183@suse.cz> (raw)
In-Reply-To: <20231120-slab-remove-slab-v2-0-9c9c70177183@suse.cz>
mm/slab.h is the only place to include include/linux/slub_def.h which
has allowed switching between SLAB and SLUB. Now we can simply move the
contents over and remove slub_def.h.
Use this opportunity to fix up some whitespace (alignment) issues.
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
include/linux/slub_def.h | 150 -----------------------------------------------
mm/slab.h | 138 ++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 137 insertions(+), 151 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index a0229ea42977..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-#include <linux/local_lock.h>
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
-#ifndef CONFIG_SLUB_TINY
- struct kmem_cache_cpu __percpu *cpu_slab;
-#endif
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- return slab->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/slab.h b/mm/slab.h
index 014c36ea51fa..3a8d13c099fa 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -209,7 +209,143 @@ static inline size_t slab_size(const struct slab *slab)
return PAGE_SIZE << slab_order(slab);
}
-#include <linux/slub_def.h>
+#include <linux/kfence.h>
+#include <linux/kobject.h>
+#include <linux/reciprocal_div.h>
+#include <linux/local_lock.h>
+
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_percpu_partial(c) ((c)->partial)
+
+#define slub_set_percpu_partial(c, p) \
+({ \
+ slub_percpu_partial(c) = (p)->next; \
+})
+
+#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
+#else
+#define slub_percpu_partial(c) NULL
+
+#define slub_set_percpu_partial(c, p)
+
+#define slub_percpu_partial_read_once(c) NULL
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned int x;
+};
+
+/*
+ * Slab cache management.
+ */
+struct kmem_cache {
+#ifndef CONFIG_SLUB_TINY
+ struct kmem_cache_cpu __percpu *cpu_slab;
+#endif
+ /* Used for retrieving partial slabs, etc. */
+ slab_flags_t flags;
+ unsigned long min_partial;
+ unsigned int size; /* Object size including metadata */
+ unsigned int object_size; /* Object size without metadata */
+ struct reciprocal_value reciprocal_size;
+ unsigned int offset; /* Free pointer offset */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
+ /* Number of per cpu partial slabs to keep around */
+ unsigned int cpu_partial_slabs;
+#endif
+ struct kmem_cache_order_objects oo;
+
+ /* Allocation and freeing of slabs */
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+ int refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *object); /* Object constructor */
+ unsigned int inuse; /* Offset to metadata */
+ unsigned int align; /* Alignment */
+ unsigned int red_left_pad; /* Left redzone padding size */
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ unsigned long random;
+#endif
+
+#ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+ */
+ unsigned int remote_node_defrag_ratio;
+#endif
+
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ unsigned int *random_seq;
+#endif
+
+#ifdef CONFIG_KASAN_GENERIC
+ struct kasan_cache kasan_info;
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+#endif
+
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *s);
+void sysfs_slab_release(struct kmem_cache *s);
+#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
+static inline void sysfs_slab_release(struct kmem_cache *s) { }
+#endif
+
+void *fixup_red_left(struct kmem_cache *s, void *p);
+
+static inline void *nearest_obj(struct kmem_cache *cache,
+ const struct slab *slab, void *x)
+{
+ void *object = x - (x - slab_address(slab)) % cache->size;
+ void *last_object = slab_address(slab) +
+ (slab->objects - 1) * cache->size;
+ void *result = (unlikely(object > last_object)) ? last_object : object;
+
+ result = fixup_red_left(cache, result);
+ return result;
+}
+
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+ void *addr, void *obj)
+{
+ return reciprocal_divide(kasan_reset_tag(obj) - addr,
+ cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
+{
+ if (is_kfence_address(obj))
+ return 0;
+ return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ return slab->objects;
+}
#include <linux/memcontrol.h>
#include <linux/fault-inject.h>
--
2.42.1
next prev parent reply other threads:[~2023-11-20 18:35 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-20 18:34 [PATCH v2 00/21] remove the SLAB allocator Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 01/21] mm/slab, docs: switch mm-api docs generation from slab.c to slub.c Vlastimil Babka
2023-11-24 0:46 ` David Rientjes
2023-12-05 3:53 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 02/21] mm/slab: remove CONFIG_SLAB from all Kconfig and Makefile Vlastimil Babka
2023-12-05 4:15 ` Hyeonggon Yoo
2023-12-05 10:14 ` Vlastimil Babka
2023-12-06 0:08 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 03/21] KASAN: remove code paths guarded by CONFIG_SLAB Vlastimil Babka
2023-11-21 8:23 ` Hyeonggon Yoo
2023-11-21 16:47 ` Andrey Konovalov
2023-12-05 4:26 ` Hyeonggon Yoo
2023-12-05 4:48 ` Hyeonggon Yoo
2023-12-05 10:16 ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 04/21] KFENCE: cleanup kfence_guarded_alloc() after CONFIG_SLAB removal Vlastimil Babka
2023-12-06 8:01 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 05/21] mm/memcontrol: remove CONFIG_SLAB #ifdef guards Vlastimil Babka
2023-12-06 8:12 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 06/21] cpu/hotplug: remove CPUHP_SLAB_PREPARE hooks Vlastimil Babka
2023-12-01 11:28 ` Thomas Gleixner
2023-12-06 8:28 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 07/21] mm/slab: remove CONFIG_SLAB code from slab common code Vlastimil Babka
2023-12-06 9:05 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 08/21] mm/mempool/dmapool: remove CONFIG_DEBUG_SLAB ifdefs Vlastimil Babka
2023-12-06 9:10 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 09/21] mm/slab: remove mm/slab.c and slab_def.h Vlastimil Babka
2023-11-22 20:07 ` Christoph Lameter
2023-12-06 9:31 ` Hyeonggon Yoo
2023-12-06 9:37 ` Vlastimil Babka
2023-11-20 18:34 ` [PATCH v2 10/21] mm/slab: move struct kmem_cache_cpu declaration to slub.c Vlastimil Babka
2023-12-06 9:35 ` Hyeonggon Yoo
2023-11-20 18:34 ` Vlastimil Babka [this message]
2023-12-06 9:45 ` [PATCH v2 11/21] mm/slab: move the rest of slub_def.h to mm/slab.h Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 12/21] mm/slab: consolidate includes in the internal mm/slab.h Vlastimil Babka
2023-12-07 0:30 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 13/21] mm/slab: move pre/post-alloc hooks from slab.h to slub.c Vlastimil Babka
2023-12-07 0:43 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 14/21] mm/slab: move memcg related functions " Vlastimil Babka
2023-12-07 0:59 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 15/21] mm/slab: move struct kmem_cache_node " Vlastimil Babka
2023-12-07 1:11 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 16/21] mm/slab: move kfree() from slab_common.c " Vlastimil Babka
2023-12-05 4:38 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 17/21] mm/slab: move kmalloc_slab() to mm/slab.h Vlastimil Babka
2023-12-07 1:28 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 18/21] mm/slab: move kmalloc() functions from slab_common.c to slub.c Vlastimil Babka
2023-12-07 1:30 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 19/21] mm/slub: remove slab_alloc() and __kmem_cache_alloc_lru() wrappers Vlastimil Babka
2023-12-07 1:35 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 20/21] mm/slub: optimize alloc fastpath code layout Vlastimil Babka
2023-12-07 2:32 ` Hyeonggon Yoo
2023-11-20 18:34 ` [PATCH v2 21/21] mm/slub: optimize free fast path " Vlastimil Babka
2023-12-07 2:40 ` Hyeonggon Yoo
2023-11-24 0:45 ` [PATCH v2 00/21] remove the SLAB allocator David Rientjes
2023-11-24 9:26 ` Vlastimil Babka
2023-12-07 2:45 ` Hyeonggon Yoo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231120-slab-remove-slab-v2-11-9c9c70177183@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=cgroups@vger.kernel.org \
--cc=cl@linux.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=glider@google.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=kasan-dev@googlegroups.com \
--cc=keescook@chromium.org \
--cc=linux-hardening@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=muchun.song@linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=ryabinin.a.a@gmail.com \
--cc=shakeelb@google.com \
--cc=vincenzo.frascino@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox