linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yu Kuai <yukuai1@huaweicloud.com>
To: stable@vger.kernel.org, gregkh@linuxfoundation.org,
	harry.wentland@amd.com, sunpeng.li@amd.com,
	Rodrigo.Siqueira@amd.com, alexander.deucher@amd.com,
	christian.koenig@amd.com, Xinhui.Pan@amd.com, airlied@gmail.com,
	daniel@ffwll.ch, viro@zeniv.linux.org.uk, brauner@kernel.org,
	Liam.Howlett@oracle.com, akpm@linux-foundation.org,
	hughd@google.com, willy@infradead.org, sashal@kernel.org,
	srinivasan.shanmugam@amd.com, chiahsuan.chung@amd.com,
	mingo@kernel.org, mgorman@techsingularity.net,
	yukuai3@huawei.com, chengming.zhou@linux.dev,
	zhangpeng.00@bytedance.com, chuck.lever@oracle.com
Cc: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	maple-tree@lists.infradead.org, linux-mm@kvack.org,
	yukuai1@huaweicloud.com, yi.zhang@huawei.com,
	yangerkun@huawei.com
Subject: [PATCH 6.6 12/28] maple_tree: clean up inlines for some functions
Date: Thu, 24 Oct 2024 21:19:53 +0800	[thread overview]
Message-ID: <20241024132009.2267260-13-yukuai1@huaweicloud.com> (raw)
In-Reply-To: <20241024132009.2267260-1-yukuai1@huaweicloud.com>

From: "Liam R. Howlett" <Liam.Howlett@oracle.com>

commit 271f61a8b41dcd86e1ecc2e0455bcc071bc7dde4 upstream.

There are a few functions which were inlined but are somewhat too large to
inline, so remove the inline key word.

There are also several very small functions which are used in critical
code sections which gcc was not inlining, so make this more strict and use
__always_line for these functions.

Link: https://lkml.kernel.org/r/20231101171629.3612299-8-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Peng Zhang <zhangpeng.00@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 lib/maple_tree.c | 78 ++++++++++++++++++++++++------------------------
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 3df7e3456205..d1416276f1ef 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -217,23 +217,24 @@ static inline unsigned int mt_attr(struct maple_tree *mt)
 	return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
 }
 
-static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+static __always_inline enum maple_type mte_node_type(
+		const struct maple_enode *entry)
 {
 	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
 		MAPLE_NODE_TYPE_MASK;
 }
 
-static inline bool ma_is_dense(const enum maple_type type)
+static __always_inline bool ma_is_dense(const enum maple_type type)
 {
 	return type < maple_leaf_64;
 }
 
-static inline bool ma_is_leaf(const enum maple_type type)
+static __always_inline bool ma_is_leaf(const enum maple_type type)
 {
 	return type < maple_range_64;
 }
 
-static inline bool mte_is_leaf(const struct maple_enode *entry)
+static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
 {
 	return ma_is_leaf(mte_node_type(entry));
 }
@@ -242,7 +243,7 @@ static inline bool mte_is_leaf(const struct maple_enode *entry)
  * We also reserve values with the bottom two bits set to '10' which are
  * below 4096
  */
-static inline bool mt_is_reserved(const void *entry)
+static __always_inline bool mt_is_reserved(const void *entry)
 {
 	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
 		xa_is_internal(entry);
@@ -295,7 +296,8 @@ static inline bool mas_searchable(struct ma_state *mas)
 	return true;
 }
 
-static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+static __always_inline struct maple_node *mte_to_node(
+		const struct maple_enode *entry)
 {
 	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
 }
@@ -372,12 +374,12 @@ static inline bool mte_has_null(const struct maple_enode *node)
 	return (unsigned long)node & MAPLE_ENODE_NULL;
 }
 
-static inline bool ma_is_root(struct maple_node *node)
+static __always_inline bool ma_is_root(struct maple_node *node)
 {
 	return ((unsigned long)node->parent & MA_ROOT_PARENT);
 }
 
-static inline bool mte_is_root(const struct maple_enode *node)
+static __always_inline bool mte_is_root(const struct maple_enode *node)
 {
 	return ma_is_root(mte_to_node(node));
 }
@@ -387,7 +389,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas)
 	return !mas->min && mas->max == ULONG_MAX;
 }
 
-static inline bool mt_is_alloc(struct maple_tree *mt)
+static __always_inline bool mt_is_alloc(struct maple_tree *mt)
 {
 	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
 }
@@ -526,11 +528,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
  *
  * Return: The slot in the parent node where @enode resides.
  */
-static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+static __always_inline
+unsigned int mte_parent_slot(const struct maple_enode *enode)
 {
 	unsigned long val = (unsigned long)mte_to_node(enode)->parent;
 
-	if (val & MA_ROOT_PARENT)
+	if (unlikely(val & MA_ROOT_PARENT))
 		return 0;
 
 	/*
@@ -546,7 +549,8 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
  *
  * Return: The parent maple node.
  */
-static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+static __always_inline
+struct maple_node *mte_parent(const struct maple_enode *enode)
 {
 	return (void *)((unsigned long)
 			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
@@ -558,7 +562,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
  *
  * Return: true if dead, false otherwise.
  */
-static inline bool ma_dead_node(const struct maple_node *node)
+static __always_inline bool ma_dead_node(const struct maple_node *node)
 {
 	struct maple_node *parent;
 
@@ -574,7 +578,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
  *
  * Return: true if dead, false otherwise.
  */
-static inline bool mte_dead_node(const struct maple_enode *enode)
+static __always_inline bool mte_dead_node(const struct maple_enode *enode)
 {
 	struct maple_node *parent, *node;
 
@@ -730,7 +734,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
  * otherwise.
  */
-static inline unsigned long
+static __always_inline unsigned long
 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
 	       unsigned char piv, enum maple_type type)
 {
@@ -812,20 +816,20 @@ static inline bool mt_write_locked(const struct maple_tree *mt)
 		lockdep_is_held(&mt->ma_lock);
 }
 
-static inline bool mt_locked(const struct maple_tree *mt)
+static __always_inline bool mt_locked(const struct maple_tree *mt)
 {
 	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
 		lockdep_is_held(&mt->ma_lock);
 }
 
-static inline void *mt_slot(const struct maple_tree *mt,
+static __always_inline void *mt_slot(const struct maple_tree *mt,
 		void __rcu **slots, unsigned char offset)
 {
 	return rcu_dereference_check(slots[offset], mt_locked(mt));
 }
 
-static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
-				   unsigned char offset)
+static __always_inline void *mt_slot_locked(struct maple_tree *mt,
+		void __rcu **slots, unsigned char offset)
 {
 	return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
 }
@@ -837,8 +841,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
  *
  * Return: The entry stored in @slots at the @offset.
  */
-static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
-				       unsigned char offset)
+static __always_inline void *mas_slot_locked(struct ma_state *mas,
+		void __rcu **slots, unsigned char offset)
 {
 	return mt_slot_locked(mas->tree, slots, offset);
 }
@@ -851,8 +855,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
  *
  * Return: The entry stored in @slots at the @offset
  */
-static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
-			     unsigned char offset)
+static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+		unsigned char offset)
 {
 	return mt_slot(mas->tree, slots, offset);
 }
@@ -863,7 +867,7 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
  *
  * Return: The pointer to the root of the tree
  */
-static inline void *mas_root(struct ma_state *mas)
+static __always_inline void *mas_root(struct ma_state *mas)
 {
 	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
 }
@@ -1437,10 +1441,8 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
  * Uses metadata to find the end of the data when possible.
  * Return: The zero indexed last slot with data (may be null).
  */
-static inline unsigned char ma_data_end(struct maple_node *node,
-					enum maple_type type,
-					unsigned long *pivots,
-					unsigned long max)
+static __always_inline unsigned char ma_data_end(struct maple_node *node,
+		enum maple_type type, unsigned long *pivots, unsigned long max)
 {
 	unsigned char offset;
 
@@ -4344,7 +4346,7 @@ static inline void *mas_insert(struct ma_state *mas, void *entry)
 
 }
 
-static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
 {
 retry:
 	mas_set(mas, index);
@@ -4353,7 +4355,7 @@ static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
 		goto retry;
 }
 
-static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
 		struct maple_node *node, const unsigned long index)
 {
 	if (unlikely(ma_dead_node(node))) {
@@ -4372,7 +4374,7 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas,
  * The prev node value will be mas->node[mas->offset] or MAS_NONE.
  * Return: 1 if the node is dead, 0 otherwise.
  */
-static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+static int mas_prev_node(struct ma_state *mas, unsigned long min)
 {
 	enum maple_type mt;
 	int offset, level;
@@ -4533,8 +4535,8 @@ static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
  * The next value will be mas->node[mas->offset] or MAS_NONE.
  * Return: 1 on dead node, 0 otherwise.
  */
-static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
-				unsigned long max)
+static int mas_next_node(struct ma_state *mas, struct maple_node *node,
+		unsigned long max)
 {
 	unsigned long min;
 	unsigned long *pivots;
@@ -5675,7 +5677,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
 }
 EXPORT_SYMBOL_GPL(mas_expected_entries);
 
-static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+static bool mas_next_setup(struct ma_state *mas, unsigned long max,
 		void **entry)
 {
 	bool was_none = mas_is_none(mas);
@@ -5791,8 +5793,7 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
 }
 EXPORT_SYMBOL_GPL(mt_next);
 
-static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
-		void **entry)
+static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
 {
 	if (unlikely(mas->index <= min)) {
 		mas->node = MAS_UNDERFLOW;
@@ -5941,8 +5942,7 @@ EXPORT_SYMBOL_GPL(mas_pause);
  *
  * Returns: True if entry is the answer, false otherwise.
  */
-static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
-		void **entry)
+static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
 {
 	if (mas_is_active(mas)) {
 		if (mas->last < max)
@@ -6058,7 +6058,7 @@ EXPORT_SYMBOL_GPL(mas_find_range);
  *
  * Returns: True if entry is the answer, false otherwise.
  */
-static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
 		void **entry)
 {
 	if (mas_is_active(mas)) {
-- 
2.39.2



  parent reply	other threads:[~2024-10-24 13:23 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-24 13:19 [PATCH 6.6 00/28] fix CVE-2024-46701 Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 01/28] maple_tree: add mt_free_one() and mt_attr() helpers Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 02/28] maple_tree: introduce {mtree,mas}_lock_nested() Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 03/28] maple_tree: introduce interfaces __mt_dup() and mtree_dup() Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 04/28] maple_tree: skip other tests when BENCH is enabled Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 05/28] maple_tree: preserve the tree attributes when destroying maple tree Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 06/28] maple_tree: remove unnecessary default labels from switch statements Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 07/28] maple_tree: make mas_erase() more robust Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 08/28] maple_tree: move debug check to __mas_set_range() Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 09/28] maple_tree: add end of node tracking to the maple state Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 10/28] maple_tree: use cached node end in mas_next() Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 11/28] maple_tree: use cached node end in mas_destroy() Yu Kuai
2024-10-24 13:19 ` Yu Kuai [this message]
2024-10-24 13:19 ` [PATCH 6.6 13/28] maple_tree: add test for mtree_dup() Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 14/28] maple_tree: separate ma_state node from status Yu Kuai
2024-10-24 13:19 ` [PATCH 6.6 15/28] maple_tree: remove mas_searchable() Yu Kuai
2024-10-24 13:22 ` [PATCH 6.6 16/28] Revert "maple_tree: correct tree corruption on spanning store" Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 17/28] maple_tree: use maple state end for write operations Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 18/28] maple_tree: don't find node end in mtree_lookup_walk() Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 19/28] maple_tree: mtree_range_walk() clean up Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 20/28] lib/maple_tree.c: fix build error due to hotfix alteration Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 21/28] maple_tree: avoid checking other gaps after getting the largest gap Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 22/28] libfs: Re-arrange locking in offset_iterate_dir() Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 23/28] libfs: Define a minimum directory offset Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 24/28] libfs: Add simple_offset_empty() Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 25/28] maple_tree: Add mtree_alloc_cyclic() Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 26/28] libfs: Convert simple directory offsets to use a Maple Tree Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 27/28] libfs: fix infinite directory reads for offset dir Yu Kuai
2024-10-24 13:22   ` [PATCH 6.6 28/28] maple_tree: correct tree corruption on spanning store Yu Kuai
2024-11-06 15:02     ` Lorenzo Stoakes
2024-11-07  1:22       ` Yu Kuai
2024-11-06  6:16 ` [PATCH 6.6 00/28] fix CVE-2024-46701 Greg KH
2024-11-06 14:44   ` Liam R. Howlett
2024-11-06 15:19   ` Chuck Lever III
2024-11-06 16:21     ` James Bottomley
2024-11-07  0:57     ` Yu Kuai
2024-11-07 14:41       ` Chuck Lever
2024-11-08  1:19         ` Yu Kuai
2024-11-08 13:23           ` Chuck Lever III
2024-11-08 17:03             ` Liam R. Howlett
2024-11-09  1:38               ` Yu Kuai
2024-11-09  1:30             ` Yu Kuai
2024-11-09 16:58               ` Chuck Lever III
2024-11-11  0:56                 ` Yu Kuai
2024-11-07 14:44       ` Liam R. Howlett
2024-11-06 14:43 ` Lorenzo Stoakes
2024-11-07  1:43   ` Yu Kuai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241024132009.2267260-13-yukuai1@huaweicloud.com \
    --to=yukuai1@huaweicloud.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=Rodrigo.Siqueira@amd.com \
    --cc=Xinhui.Pan@amd.com \
    --cc=airlied@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=brauner@kernel.org \
    --cc=chengming.zhou@linux.dev \
    --cc=chiahsuan.chung@amd.com \
    --cc=christian.koenig@amd.com \
    --cc=chuck.lever@oracle.com \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=harry.wentland@amd.com \
    --cc=hughd@google.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@kernel.org \
    --cc=sashal@kernel.org \
    --cc=srinivasan.shanmugam@amd.com \
    --cc=stable@vger.kernel.org \
    --cc=sunpeng.li@amd.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    --cc=yangerkun@huawei.com \
    --cc=yi.zhang@huawei.com \
    --cc=yukuai3@huawei.com \
    --cc=zhangpeng.00@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox