linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: Functions used internally should not be put into slub_def.h
@ 2023-01-16  8:50 Rong Tao
  2023-01-16 11:59 ` Hyeonggon Yoo
  0 siblings, 1 reply; 6+ messages in thread
From: Rong Tao @ 2023-01-16  8:50 UTC (permalink / raw)
  To: cl
  Cc: sdf, yhs, Rong Tao, Pekka Enberg, David Rientjes, Joonsoo Kim,
	Andrew Morton, Vlastimil Babka, Roman Gushchin, Hyeonggon Yoo,
	open list:SLAB ALLOCATOR, open list

From: Rong Tao <rongtao@cestc.cn>

commit 40f3bf0cb04c("mm: Convert struct page to struct slab in functions
used by other subsystems") introduce 'slab_address()' and 'struct slab'
in slab_def.h(CONFIG_SLAB) and slub_def.h(CONFIG_SLUB). When referencing
a header file <linux/slub_def.h> in a module or BPF code, 'slab_address()'
and 'struct slab' are not recognized, resulting in incomplete and
undefined errors(see bcc slabratetop.py error [0]).

Moving the function definitions of reference data structures such as
struct slab and slab_address() such as nearest_obj(), obj_to_index(),
and objs_per_slab() to the internal header file slab.h solves this
fatal problem.

[0] https://github.com/iovisor/bcc/issues/4438

Signed-off-by: Rong Tao <rongtao@cestc.cn>
---
 include/linux/slab_def.h | 33 --------------------
 include/linux/slub_def.h | 32 -------------------
 mm/slab.h                | 66 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 66 insertions(+), 65 deletions(-)

diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 5834bad8ad78..5658b5fddf9b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -88,37 +88,4 @@ struct kmem_cache {
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
-				void *x)
-{
-	void *object = x - (x - slab->s_mem) % cache->size;
-	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
-	if (unlikely(object > last_object))
-		return last_object;
-	else
-		return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- *   Using the fact that size is a constant for a particular cache,
- *   we can replace (offset / cache->size) by
- *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-					const struct slab *slab, void *obj)
-{
-	u32 offset = (obj - slab->s_mem);
-	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
-				     const struct slab *slab)
-{
-	if (is_kfence_address(slab_address(slab)))
-		return 1;
-	return cache->num;
-}
-
 #endif	/* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index aa0ee1678d29..660fd6b2a748 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -163,36 +163,4 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
 
 void *fixup_red_left(struct kmem_cache *s, void *p);
 
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
-				void *x) {
-	void *object = x - (x - slab_address(slab)) % cache->size;
-	void *last_object = slab_address(slab) +
-		(slab->objects - 1) * cache->size;
-	void *result = (unlikely(object > last_object)) ? last_object : object;
-
-	result = fixup_red_left(cache, result);
-	return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
-					  void *addr, void *obj)
-{
-	return reciprocal_divide(kasan_reset_tag(obj) - addr,
-				 cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
-					const struct slab *slab, void *obj)
-{
-	if (is_kfence_address(obj))
-		return 0;
-	return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
-				     const struct slab *slab)
-{
-	return slab->objects;
-}
 #endif /* _LINUX_SLUB_DEF_H */
diff --git a/mm/slab.h b/mm/slab.h
index 7cc432969945..38350a0efa91 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -227,10 +227,76 @@ struct kmem_cache {
 
 #ifdef CONFIG_SLAB
 #include <linux/slab_def.h>
+
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
+				void *x)
+{
+	void *object = x - (x - slab->s_mem) % cache->size;
+	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
+
+	if (unlikely(object > last_object))
+		return last_object;
+	else
+		return object;
+}
+
+/*
+ * We want to avoid an expensive divide : (offset / cache->size)
+ *   Using the fact that size is a constant for a particular cache,
+ *   we can replace (offset / cache->size) by
+ *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+					const struct slab *slab, void *obj)
+{
+	u32 offset = (obj - slab->s_mem);
+	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+				     const struct slab *slab)
+{
+	if (is_kfence_address(slab_address(slab)))
+		return 1;
+	return cache->num;
+}
 #endif
 
 #ifdef CONFIG_SLUB
 #include <linux/slub_def.h>
+
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
+				void *x) {
+	void *object = x - (x - slab_address(slab)) % cache->size;
+	void *last_object = slab_address(slab) +
+		(slab->objects - 1) * cache->size;
+	void *result = (unlikely(object > last_object)) ? last_object : object;
+
+	result = fixup_red_left(cache, result);
+	return result;
+}
+
+/* Determine object index from a given position */
+static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
+					  void *addr, void *obj)
+{
+	return reciprocal_divide(kasan_reset_tag(obj) - addr,
+				 cache->reciprocal_size);
+}
+
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+					const struct slab *slab, void *obj)
+{
+	if (is_kfence_address(obj))
+		return 0;
+	return __obj_to_index(cache, slab_address(slab), obj);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+				     const struct slab *slab)
+{
+	return slab->objects;
+}
 #endif
 
 #include <linux/memcontrol.h>
-- 
2.39.0



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-01-21 20:34 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-16  8:50 [PATCH] mm: Functions used internally should not be put into slub_def.h Rong Tao
2023-01-16 11:59 ` Hyeonggon Yoo
2023-01-17  2:01   ` [PATCH] mm: Functions used internally should not be put into Rong Tao
2023-01-17 12:57     ` Vlastimil Babka
2023-01-18  7:23       ` Rong Tao
2023-01-21 20:34         ` Yonghong Song

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox