linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Liam R. Howlett" <Liam.Howlett@oracle.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: maple-tree@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	Suren Baghdasaryan <surenb@google.com>,
	Matthew Wilcox <willy@infradead.org>,
	Sidhartha Kumar <sidhartha.kumar@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	Alice Ryhl <aliceryhl@google.com>,
	Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Arnd Bergmann <arnd@arndb.de>,
	Christian Kujau <lists@nerdbynature.de>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>
Subject: [PATCH 11/28] maple_tree: Testing update for spanning store
Date: Thu, 15 Jan 2026 14:36:30 -0500	[thread overview]
Message-ID: <20260115193647.1695937-12-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20260115193647.1695937-1-Liam.Howlett@oracle.com>

Spanning store had some corner cases which showed up during rcu stress
testing.  Add explicit tests for those cases.

At the same time add some locking for easier visibility of the rcu
stress testing.  Only a single dump of the tree will happen on the first
detected issue instead of flooding the console with output.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 tools/testing/radix-tree/maple.c | 172 +++++++++++++++++++++++++++++--
 1 file changed, 163 insertions(+), 9 deletions(-)

diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 5c1b18e3ed210..85fb5616c133c 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -38,6 +38,7 @@ struct rcu_test_struct2 {
 
 	unsigned long index[RCU_RANGE_COUNT];
 	unsigned long last[RCU_RANGE_COUNT];
+	pthread_mutex_t dump;
 };
 
 struct rcu_test_struct3 {
@@ -33997,8 +33998,25 @@ static void *rcu_reader_fwd(void *ptr)
 				}
 			}
 
-			RCU_MT_BUG_ON(test, mas.index != r_start);
-			RCU_MT_BUG_ON(test, mas.last != r_end);
+			if (mas.index != r_start) {
+				if (pthread_mutex_trylock(&test->dump) != 0) {
+					rcu_read_unlock();
+					goto quit;
+				}
+				printk("start is wrong: %lx (%lu) vs expected %lx (%lu)\n",
+				       mas.index, mas.index, r_start, r_start);
+				RCU_MT_BUG_ON(test, mas.index != r_start);
+			}
+
+			if (mas.last != r_end) {
+				if (pthread_mutex_trylock(&test->dump) != 0) {
+					rcu_read_unlock();
+					goto quit;
+				}
+				printk("last is wrong: %lx (%lu) vs expected %lx (%lu)\n",
+				       mas.last, mas.last, r_end, r_end);
+				RCU_MT_BUG_ON(test, mas.last != r_end);
+			}
 
 			if (i == reader->flip) {
 				alt = xa_mk_value(index + i + RCU_RANGE_COUNT);
@@ -34014,7 +34032,8 @@ static void *rcu_reader_fwd(void *ptr)
 				else if (entry == alt)
 					toggled  = true;
 				else {
-					printk("!!%lu-%lu -> %p not %p or %p\n", mas.index, mas.last, entry, expected, alt);
+					printk("!!%lu-%lu -> %p not %p or %p\n",
+					       mas.index, mas.last, entry, expected, alt);
 					RCU_MT_BUG_ON(test, 1);
 				}
 
@@ -34047,9 +34066,11 @@ static void *rcu_reader_fwd(void *ptr)
 		usleep(test->pause);
 	}
 
+quit:
 	rcu_unregister_thread();
 	return NULL;
 }
+
 /* RCU reader in decreasing index */
 static void *rcu_reader_rev(void *ptr)
 {
@@ -34119,13 +34140,17 @@ static void *rcu_reader_rev(void *ptr)
 				line = __LINE__;
 
 			if (mas.index != r_start) {
+				if (pthread_mutex_trylock(&test->dump) != 0) {
+					rcu_read_unlock();
+					goto quit;
+				}
+
 				alt = xa_mk_value(index + i * 2 + 1 +
 						  RCU_RANGE_COUNT);
 				mt_dump(test->mt, mt_dump_dec);
-				printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
-				       mas.index, mas.last, entry,
-				       r_start, r_end, expected, alt,
-				       line, i);
+				printk("Error: %p %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
+				       mas.node, mas.index, mas.last, entry,
+				       r_start, r_end, expected, alt, line, i);
 			}
 			RCU_MT_BUG_ON(test, mas.index != r_start);
 			RCU_MT_BUG_ON(test, mas.last != r_end);
@@ -34180,6 +34205,7 @@ static void *rcu_reader_rev(void *ptr)
 		usleep(test->pause);
 	}
 
+quit:
 	rcu_unregister_thread();
 	return NULL;
 }
@@ -34329,6 +34355,7 @@ static void rcu_stress(struct maple_tree *mt, bool forward)
 	test.seen_modified = 0;
 	test.thread_count = 0;
 	test.start = test.stop = false;
+	pthread_mutex_init(&test.dump, NULL);
 	seed = time(NULL);
 	srand(seed);
 	for (i = 0; i < RCU_RANGE_COUNT; i++) {
@@ -34414,6 +34441,7 @@ struct rcu_test_struct {
 	unsigned long removed;		/* The index of the removed entry */
 	unsigned long added;		/* The index of the removed entry */
 	unsigned long toggle;		/* The index of the removed entry */
+	pthread_mutex_t dump;
 };
 
 static inline
@@ -34506,7 +34534,9 @@ static void *rcu_loop(void *ptr)
 			/* Out of the interesting range */
 			if (mas.index < test->index || mas.index > test->last) {
 				if (entry != expected) {
-					printk("%lx - %lx = %p not %p\n",
+					if (pthread_mutex_trylock(&test->dump) != 0)
+						break;
+					printk("\nERROR: %lx - %lx = %p not %p\n",
 					       mas.index, mas.last, entry, expected);
 				}
 				MT_BUG_ON(test->mt, entry != expected);
@@ -34854,6 +34884,7 @@ static noinline void __init check_rcu_threaded(struct maple_tree *mt)
 	vals.range_end = ULONG_MAX;
 	vals.seen_entry2 = 0;
 	vals.seen_entry3 = 0;
+	pthread_mutex_init(&vals.dump, NULL);
 
 	run_check_rcu(mt, &vals);
 	mtree_destroy(mt);
@@ -35250,6 +35281,8 @@ static noinline void __init check_spanning_write(struct maple_tree *mt)
 {
 	unsigned long i, max = 5000;
 	MA_STATE(mas, mt, 1200, 2380);
+	struct maple_enode *enode;
+	struct maple_node *pnode;
 
 	for (i = 0; i <= max; i++)
 		mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
@@ -35410,6 +35443,128 @@ static noinline void __init check_spanning_write(struct maple_tree *mt)
 	mas_set_range(&mas, 76, 875);
 	mas_store_gfp(&mas, NULL, GFP_KERNEL);
 	mtree_unlock(mt);
+	mtree_destroy(mt);
+
+	mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+	for (i = 0; i <= max; i++)
+		mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+	if (MAPLE_32BIT)
+		i = 49750; /* 0xC25B */
+	else
+		i = 49835; /* 0xC2AB */
+
+	mtree_lock(mt);
+	/* Store a null across a boundary that ends in a null */
+	mas_set(&mas, i); /* 0xC2AB */
+	MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+	MT_BUG_ON(mt, mas.end != mas.offset);
+	MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+	mas_set_range(&mas, i, mas.last - 1);
+	mas_store_gfp(&mas, NULL, GFP_KERNEL);
+	mt_validate(mt);
+
+	/* Store a null across a boundary that starts and ends in a null */
+	mas_set(&mas, 49849);
+	MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+	MT_BUG_ON(mt, mas.index != 49846);
+	mas_set(&mas, 49876);
+	MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+	MT_BUG_ON(mt, mas.last != 49879);
+	mas_set_range(&mas, 49849, 49876);
+	mas_store_gfp(&mas, NULL, GFP_KERNEL);
+	/* Results in 49846-49879: (nil) */
+	MT_BUG_ON(mt, mas.index != 49846);
+	MT_BUG_ON(mt, mas.last != 49879);
+	mt_validate(mt);
+
+	/* Store a null across a boundary that starts and ends next to nulls */
+	mas_set(&mas, 49800);
+	MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+	MT_BUG_ON(mt, mas.index != 49800);
+	mas_set(&mas, 49815);
+	MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+	MT_BUG_ON(mt, mas.last != 49815);
+	mas_set_range(&mas, 49800, 49815);
+	mas_store_gfp(&mas, NULL, GFP_KERNEL);
+	/* Results in 49846-49879: (nil) */
+	MT_BUG_ON(mt, mas.index != 49796);
+	MT_BUG_ON(mt, mas.last != 49819);
+	mt_validate(mt);
+
+	/* Store a value across a boundary that starts and ends in a null */
+	mas_set(&mas, 49907);
+	MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+	MT_BUG_ON(mt, mas.index != 49906);
+	mas_set(&mas, 49928);
+	MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+	MT_BUG_ON(mt, mas.last != 49929);
+	mas_set_range(&mas, 49907, 49928);
+	mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+	MT_BUG_ON(mt, mas.index != 49907);
+	MT_BUG_ON(mt, mas.last != 49928);
+	mt_validate(mt);
+
+	/* Store a value across a node boundary that causes a 3 way split */
+
+	if (MAPLE_32BIT)
+		i = 49590; /* 0xc1b6 */
+	else
+		i = 49670; /* 0xC206 */
+
+	mas_set(&mas, i);
+	MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+	MT_BUG_ON(mt, mas.index != i);
+	MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+	enode = mas.node;
+	MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+	MT_BUG_ON(mt, mas.index != i + 6);
+	MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+	MT_BUG_ON(mt, enode == mas.node);
+	mas_set_range(&mas, i + 2, i + 7);
+	mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+	MT_BUG_ON(mt, mas.index != i + 2);
+	MT_BUG_ON(mt, mas.last != i + 7);
+	mt_validate(mt);
+
+	/* 2 levels of basically the same testing */
+
+	if (MAPLE_32BIT) {
+		/* 32bit needs a bit more work to fill the nodes.
+		 * The two parent nodes need to be filled (they have one space
+		 * vacant) without causing a split at the store locations (or
+		 * the siblings).
+		 */
+		i = 44426;
+		mas_set(&mas, i);
+		mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+		i = 45126;
+		mas_set(&mas, i);
+		mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+		i = 44790;
+	} else {
+		/* 48950 - 48955 => ptr, 48956 - 48959 => NULL */
+		i = 48950;
+
+	}
+	mas_set(&mas, i);
+	MT_BUG_ON(mt, mas_walk(&mas) == NULL);
+	MT_BUG_ON(mt, mas.index != i);
+	MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+	enode = mas.node;
+	pnode = mte_parent(enode);
+	MT_BUG_ON(mt, mas_next_range(&mas, ULONG_MAX) != NULL);
+	MT_BUG_ON(mt, mas.index != i + 6);
+	MT_BUG_ON(mt, mas.end != mt_slot_count(mas.node) - 1);
+	MT_BUG_ON(mt, enode == mas.node);
+	MT_BUG_ON(mt, pnode == mte_parent(mas.node));
+	mas_set_range(&mas, i + 2, i + 8);
+	mas_store_gfp(&mas, NULL, GFP_KERNEL);
+	mt_validate(mt);
+
+	mtree_unlock(mt);
+	mtree_destroy(mt);
+	rcu_barrier();
 }
 /* End of spanning write testing */
 
@@ -36029,7 +36184,6 @@ static inline int check_vma_modification(struct maple_tree *mt)
 	return 0;
 }
 
-
 void farmer_tests(void)
 {
 	struct maple_node *node;
-- 
2.47.3



  parent reply	other threads:[~2026-01-15 19:37 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-15 19:36 [PATCH 00/28] maple_tree: Replace big node with maple copy Liam R. Howlett
2026-01-15 19:36 ` [PATCH 01/28] maple_tree: Move mas_spanning_rebalance loop to function Liam R. Howlett
2026-01-15 19:36 ` [PATCH 02/28] maple_tree: Extract use of big node from mas_wr_spanning_store() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 03/28] maple_tree: Remove unnecessary assignment of orig_l index Liam R. Howlett
2026-01-15 19:36 ` [PATCH 04/28] maple_tree: inline mas_spanning_rebalance() into mas_wr_spanning_rebalance() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 05/28] maple_tree: Make ma_wr_states reliable for reuse in spanning store Liam R. Howlett
2026-01-15 19:36 ` [PATCH 06/28] maple_tree: Remove l_wr_mas from mas_wr_spanning_rebalance Liam R. Howlett
2026-01-15 19:36 ` [PATCH 07/28] maple_tree: Don't pass through height in mas_wr_spanning_store Liam R. Howlett
2026-01-15 19:36 ` [PATCH 08/28] maple_tree: Move maple_subtree_state from mas_wr_spanning_store to mas_wr_spanning_rebalance Liam R. Howlett
2026-01-15 19:36 ` [PATCH 09/28] maple_tree: Correct right ma_wr_state end pivot in mas_wr_spanning_store() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 10/28] maple_tree: Introduce maple_copy node and use it in mas_spanning_rebalance() Liam R. Howlett
2026-01-16  7:45   ` kernel test robot
2026-01-16 19:46     ` Liam R. Howlett
2026-01-15 19:36 ` Liam R. Howlett [this message]
2026-01-15 19:36 ` [PATCH 12/28] maple_tree: Inline mas_spanning_rebalance_loop() into mas_wr_spanning_rebalance() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 13/28] maple_tree: Change initial big node setup in mas_wr_spanning_rebalance() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 14/28] maple_tree: Introduce ma_leaf_max_gap() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 15/28] maple_tree: Add gap support, slot and pivot sizes for maple copy Liam R. Howlett
2026-01-15 19:36 ` [PATCH 16/28] maple_tree: Start using maple copy node for destination Liam R. Howlett
2026-01-16  9:36   ` kernel test robot
2026-01-16 20:19     ` Liam R. Howlett
2026-01-16 22:44       ` Andrew Morton
2026-01-19 15:06         ` Liam R. Howlett
2026-01-15 19:36 ` [PATCH 17/28] maple_tree: inline mas_wr_spanning_rebalance() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 18/28] maple_tree: Remove unnecessary return statements Liam R. Howlett
2026-01-15 19:36 ` [PATCH 19/28] maple_tree: Separate wr_split_store and wr_rebalance store type code path Liam R. Howlett
2026-01-15 19:36 ` [PATCH 20/28] maple_tree: Add cp_is_new_root() helper Liam R. Howlett
2026-01-15 19:36 ` [PATCH 21/28] maple_tree: Use maple copy node for mas_wr_rebalance() operation Liam R. Howlett
2026-01-15 19:36 ` [PATCH 22/28] maple_tree: Add copy_tree_location() helper Liam R. Howlett
2026-01-15 19:36 ` [PATCH 23/28] maple_tree: Add cp_converged() helper Liam R. Howlett
2026-01-15 19:36 ` [PATCH 24/28] maple_tree: Use maple copy node for mas_wr_split() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 25/28] maple_tree: Remove maple big node and subtree structs Liam R. Howlett
2026-01-15 19:36 ` [PATCH 26/28] maple_tree: Pass maple copy node to mas_wmb_replace() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 27/28] maple_tree: Don't pass end to mas_wr_append() Liam R. Howlett
2026-01-15 19:36 ` [PATCH 28/28] maple_tree: Clean up mas_wr_node_store() Liam R. Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260115193647.1695937-12-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=aliceryhl@google.com \
    --cc=arnd@arndb.de \
    --cc=geert@linux-m68k.org \
    --cc=kuninori.morimoto.gx@renesas.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lists@nerdbynature.de \
    --cc=maple-tree@lists.infradead.org \
    --cc=sidhartha.kumar@oracle.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox