linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Rong Tao <rtoax@foxmail.com>
Cc: cl@linux.com, sdf@google.com, yhs@fb.com,
	Rong Tao <rongtao@cestc.cn>, Pekka Enberg <penberg@kernel.org>,
	David Rientjes <rientjes@google.com>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vlastimil Babka <vbabka@suse.cz>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	"open list:SLAB ALLOCATOR" <linux-mm@kvack.org>,
	open list <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH] mm: Functions used internally should not be put into slub_def.h
Date: Mon, 16 Jan 2023 11:59:53 +0000	[thread overview]
Message-ID: <Y8U8OfoVwkJPdJcv@localhost> (raw)
In-Reply-To: <tencent_ABA832E296819D1053D6C625ADCAF76BC706@qq.com>

On Mon, Jan 16, 2023 at 04:50:05PM +0800, Rong Tao wrote:
> From: Rong Tao <rongtao@cestc.cn>
>
> commit 40f3bf0cb04c("mm: Convert struct page to struct slab in functions
> used by other subsystems") introduce 'slab_address()' and 'struct slab'
> in slab_def.h(CONFIG_SLAB) and slub_def.h(CONFIG_SLUB). When referencing
> a header file <linux/slub_def.h> in a module or BPF code, 'slab_address()'
> and 'struct slab' are not recognized, resulting in incomplete and
> undefined errors(see bcc slabratetop.py error [0]).
> 

Hello Rong,

IMO sl*b_def.h is not intended to be used externally.
and I'm not sure if it's worth for -stable release too.

IIUC The reason for slabratetop.py to rely on sl*b_def.h is to read
cachep->cache and cachep->size.

I think this can be solved if you use a tool that supports BPF Type Format?

> Moving the function definitions of reference data structures such as
> struct slab and slab_address() such as nearest_obj(), obj_to_index(),
> and objs_per_slab() to the internal header file slab.h solves this
> fatal problem.
>
> [0] https://github.com/iovisor/bcc/issues/4438
>
> Signed-off-by: Rong Tao <rongtao@cestc.cn>
> ---
>  include/linux/slab_def.h | 33 --------------------
>  include/linux/slub_def.h | 32 -------------------
>  mm/slab.h                | 66 ++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 66 insertions(+), 65 deletions(-)
> 
> diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
> index 5834bad8ad78..5658b5fddf9b 100644
> --- a/include/linux/slab_def.h
> +++ b/include/linux/slab_def.h
> @@ -88,37 +88,4 @@ struct kmem_cache {
>  	struct kmem_cache_node *node[MAX_NUMNODES];
>  };
>  
> -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> -				void *x)
> -{
> -	void *object = x - (x - slab->s_mem) % cache->size;
> -	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
> -
> -	if (unlikely(object > last_object))
> -		return last_object;
> -	else
> -		return object;
> -}
> -
> -/*
> - * We want to avoid an expensive divide : (offset / cache->size)
> - *   Using the fact that size is a constant for a particular cache,
> - *   we can replace (offset / cache->size) by
> - *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
> - */
> -static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> -					const struct slab *slab, void *obj)
> -{
> -	u32 offset = (obj - slab->s_mem);
> -	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
> -}
> -
> -static inline int objs_per_slab(const struct kmem_cache *cache,
> -				     const struct slab *slab)
> -{
> -	if (is_kfence_address(slab_address(slab)))
> -		return 1;
> -	return cache->num;
> -}
> -
>  #endif	/* _LINUX_SLAB_DEF_H */
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> index aa0ee1678d29..660fd6b2a748 100644
> --- a/include/linux/slub_def.h
> +++ b/include/linux/slub_def.h
> @@ -163,36 +163,4 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
>  
>  void *fixup_red_left(struct kmem_cache *s, void *p);
>  
> -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> -				void *x) {
> -	void *object = x - (x - slab_address(slab)) % cache->size;
> -	void *last_object = slab_address(slab) +
> -		(slab->objects - 1) * cache->size;
> -	void *result = (unlikely(object > last_object)) ? last_object : object;
> -
> -	result = fixup_red_left(cache, result);
> -	return result;
> -}
> -
> -/* Determine object index from a given position */
> -static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> -					  void *addr, void *obj)
> -{
> -	return reciprocal_divide(kasan_reset_tag(obj) - addr,
> -				 cache->reciprocal_size);
> -}
> -
> -static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> -					const struct slab *slab, void *obj)
> -{
> -	if (is_kfence_address(obj))
> -		return 0;
> -	return __obj_to_index(cache, slab_address(slab), obj);
> -}
> -
> -static inline int objs_per_slab(const struct kmem_cache *cache,
> -				     const struct slab *slab)
> -{
> -	return slab->objects;
> -}
>  #endif /* _LINUX_SLUB_DEF_H */
> diff --git a/mm/slab.h b/mm/slab.h
> index 7cc432969945..38350a0efa91 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -227,10 +227,76 @@ struct kmem_cache {
>  
>  #ifdef CONFIG_SLAB
>  #include <linux/slab_def.h>
> +
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> +				void *x)
> +{
> +	void *object = x - (x - slab->s_mem) % cache->size;
> +	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
> +
> +	if (unlikely(object > last_object))
> +		return last_object;
> +	else
> +		return object;
> +}
> +
> +/*
> + * We want to avoid an expensive divide : (offset / cache->size)
> + *   Using the fact that size is a constant for a particular cache,
> + *   we can replace (offset / cache->size) by
> + *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
> + */
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> +					const struct slab *slab, void *obj)
> +{
> +	u32 offset = (obj - slab->s_mem);
> +	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
> +}
> +
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> +				     const struct slab *slab)
> +{
> +	if (is_kfence_address(slab_address(slab)))
> +		return 1;
> +	return cache->num;
> +}
>  #endif
>  
>  #ifdef CONFIG_SLUB
>  #include <linux/slub_def.h>
> +
> +static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> +				void *x) {
> +	void *object = x - (x - slab_address(slab)) % cache->size;
> +	void *last_object = slab_address(slab) +
> +		(slab->objects - 1) * cache->size;
> +	void *result = (unlikely(object > last_object)) ? last_object : object;
> +
> +	result = fixup_red_left(cache, result);
> +	return result;
> +}
> +
> +/* Determine object index from a given position */
> +static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> +					  void *addr, void *obj)
> +{
> +	return reciprocal_divide(kasan_reset_tag(obj) - addr,
> +				 cache->reciprocal_size);
> +}
> +
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> +					const struct slab *slab, void *obj)
> +{
> +	if (is_kfence_address(obj))
> +		return 0;
> +	return __obj_to_index(cache, slab_address(slab), obj);
> +}
> +
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> +				     const struct slab *slab)
> +{
> +	return slab->objects;
> +}
>  #endif
>  
>  #include <linux/memcontrol.h>
> -- 
> 2.39.0
> 


  reply	other threads:[~2023-01-16 12:00 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-16  8:50 Rong Tao
2023-01-16 11:59 ` Hyeonggon Yoo [this message]
2023-01-17  2:01   ` [PATCH] mm: Functions used internally should not be put into Rong Tao
2023-01-17 12:57     ` Vlastimil Babka
2023-01-18  7:23       ` Rong Tao
2023-01-21 20:34         ` Yonghong Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Y8U8OfoVwkJPdJcv@localhost \
    --to=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=penberg@kernel.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rongtao@cestc.cn \
    --cc=rtoax@foxmail.com \
    --cc=sdf@google.com \
    --cc=vbabka@suse.cz \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox