From: Peng Zhang <zhangpeng.00@bytedance.com>
To: Liam.Howlett@oracle.com
Cc: akpm@linux-foundation.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, maple-tree@lists.infradead.org,
Peng Zhang <zhangpeng.00@bytedance.com>
Subject: [PATCH 10/10] maple_tree: Simplify and clean up mas_wr_node_store()
Date: Mon, 15 May 2023 21:17:57 +0800 [thread overview]
Message-ID: <20230515131757.60035-11-zhangpeng.00@bytedance.com> (raw)
In-Reply-To: <20230515131757.60035-1-zhangpeng.00@bytedance.com>
Simplify and clean up mas_wr_node_store(), remove unnecessary code.
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
---
lib/maple_tree.c | 75 +++++++++++++-----------------------------------
1 file changed, 20 insertions(+), 55 deletions(-)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d558e7bcb6da8..ff4aa01cf88b6 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4066,46 +4066,21 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
*
* Return: True if stored, false otherwise
*/
-static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **dst_slots;
unsigned long *dst_pivots;
unsigned char dst_offset;
- unsigned char new_end = wr_mas->node_end;
- unsigned char offset;
- unsigned char node_slots = mt_slots[wr_mas->type];
struct maple_node reuse, *newnode;
- unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
+ unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
- offset = mas->offset;
- if (mas->last == wr_mas->r_max) {
- /* runs right to the end of the node */
- if (mas->last == mas->max)
- new_end = offset;
- /* don't copy this offset */
+ if (mas->last == wr_mas->end_piv)
wr_mas->offset_end++;
- } else if (mas->last < wr_mas->r_max) {
- /* new range ends in this range */
- if (unlikely(wr_mas->r_max == ULONG_MAX))
- mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
-
- new_end++;
- } else {
- if (wr_mas->end_piv == mas->last)
- wr_mas->offset_end++;
-
- new_end -= wr_mas->offset_end - offset - 1;
- }
-
- /* new range starts within a range */
- if (wr_mas->r_min < mas->index)
- new_end++;
-
- /* Not enough room */
- if (new_end >= node_slots)
- return false;
+ else if (unlikely(wr_mas->r_max == ULONG_MAX))
+ mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
/* Not enough data. */
if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
@@ -4128,47 +4103,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
dst_pivots = ma_pivots(newnode, wr_mas->type);
dst_slots = ma_slots(newnode, wr_mas->type);
/* Copy from start to insert point */
- memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
- memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
- dst_offset = offset;
+ memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
+ memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
/* Handle insert of new range starting after old range */
if (wr_mas->r_min < mas->index) {
- mas->offset++;
- rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
- dst_pivots[dst_offset++] = mas->index - 1;
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
+ dst_pivots[mas->offset++] = mas->index - 1;
}
/* Store the new entry and range end. */
- if (dst_offset < max_piv)
- dst_pivots[dst_offset] = mas->last;
- mas->offset = dst_offset;
- rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
+ if (mas->offset < node_pivots)
+ dst_pivots[mas->offset] = mas->last;
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
/*
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
- if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
- new_end = dst_offset;
+ if (wr_mas->offset_end > wr_mas->node_end)
goto done;
- }
- dst_offset++;
+ dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
sizeof(void *) * copy_size);
- if (dst_offset < max_piv) {
- if (copy_size > max_piv - dst_offset)
- copy_size = max_piv - dst_offset;
+ memcpy(dst_pivots + dst_offset, wr_mas->pivots + wr_mas->offset_end,
+ sizeof(unsigned long) * (copy_size - 1));
- memcpy(dst_pivots + dst_offset,
- wr_mas->pivots + wr_mas->offset_end,
- sizeof(unsigned long) * copy_size);
- }
-
- if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
+ if (new_end < node_pivots)
dst_pivots[new_end] = mas->max;
done:
@@ -4429,7 +4393,8 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
return;
- else if (mas_wr_node_store(wr_mas))
+
+ if (mas_wr_node_store(wr_mas, new_end))
return;
if (mas_is_err(mas))
--
2.20.1
next prev parent reply other threads:[~2023-05-15 13:18 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-15 13:17 [PATCH 00/10] Clean ups for maple tree Peng Zhang
2023-05-15 13:17 ` [PATCH 01/10] maple_tree: Drop the test code for mtree_alloc_{range,rrange}() Peng Zhang
2023-05-15 16:52 ` Liam R. Howlett
2023-05-15 13:17 ` [PATCH 02/10] maple_tree: Drop mtree_alloc_{range,rrange}() and related functions Peng Zhang
2023-05-15 16:52 ` Liam R. Howlett
2023-05-15 17:27 ` Matthew Wilcox
2023-05-15 17:35 ` Liam R. Howlett
2023-05-16 0:39 ` Peng Zhang
2023-05-15 13:17 ` [PATCH 03/10] maple_tree: Remove __must_hold() which does not work Peng Zhang
2023-05-15 14:55 ` Matthew Wilcox
2023-05-16 0:42 ` Peng Zhang
2023-05-15 15:00 ` Liam R. Howlett
2023-05-15 13:17 ` [PATCH 04/10] maple_tree: Simplify mas_is_span_wr() Peng Zhang
2023-05-15 16:06 ` Liam R. Howlett
2023-05-15 13:17 ` [PATCH 05/10] maple_tree: Make the code symmetrical in mas_wr_extend_null() Peng Zhang
2023-05-15 16:54 ` Liam R. Howlett
2023-05-15 13:17 ` [PATCH 06/10] maple_tree: Wrap the replace operation with an inline function Peng Zhang
2023-05-15 17:07 ` Liam R. Howlett
2023-05-16 0:46 ` Peng Zhang
2023-05-16 14:16 ` Liam R. Howlett
2023-05-16 14:22 ` Peng Zhang
2023-05-15 13:17 ` [PATCH 07/10] maple_tree: Add mas_wr_new_end() to calculate new_end accurately Peng Zhang
2023-05-15 13:17 ` [PATCH 08/10] maple_tree: Add comments and some minor cleanups to mas_wr_append() Peng Zhang
2023-05-15 17:29 ` Liam R. Howlett
2023-05-16 10:06 ` Peng Zhang
2023-05-15 13:17 ` [PATCH 09/10] maple_tree: Rework mas_wr_slot_store() to be cleaner and more efficient Peng Zhang
2023-05-15 18:01 ` Liam R. Howlett
2023-05-16 7:27 ` Peng Zhang
2023-05-16 14:17 ` Liam R. Howlett
2023-05-15 13:17 ` Peng Zhang [this message]
2023-05-15 18:58 ` [PATCH 10/10] maple_tree: Simplify and clean up mas_wr_node_store() Liam R. Howlett
2023-05-16 0:36 ` Peng Zhang
2023-05-16 10:53 ` Peng Zhang
2023-05-16 15:52 ` Liam R. Howlett
2023-05-16 23:53 ` Peng Zhang
2023-05-17 3:10 ` Peng Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230515131757.60035-11-zhangpeng.00@bytedance.com \
--to=zhangpeng.00@bytedance.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maple-tree@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox