From: andrey.konovalov@linux.dev
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Konovalov <andreyknvl@gmail.com>,
Marco Elver <elver@google.com>,
Alexander Potapenko <glider@google.com>,
Dmitry Vyukov <dvyukov@google.com>,
Vlastimil Babka <vbabka@suse.cz>,
kasan-dev@googlegroups.com, Evgenii Stepanov <eugenis@google.com>,
Oscar Salvador <osalvador@suse.de>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Andrey Konovalov <andreyknvl@google.com>
Subject: [PATCH v4 11/22] lib/stackdepot: store free stack records in a freelist
Date: Mon, 20 Nov 2023 18:47:09 +0100 [thread overview]
Message-ID: <b9e4c79955c2121b69301778643b203d3fb09ccc.1700502145.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1700502145.git.andreyknvl@google.com>
From: Andrey Konovalov <andreyknvl@google.com>
Instead of using the global pool_offset variable to find a free slot
when storing a new stack record, mainlain a freelist of free slots
within the allocated stack pools.
A global next_stack variable is used as the head of the freelist, and
the next field in the stack_record struct is reused as freelist link
(when the record is not in the freelist, this field is used as a link
in the hash table).
This is preparatory patch for implementing the eviction of stack records
from the stack depot.
Reviewed-by: Alexander Potapenko <glider@google.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
Changes v2->v3:
- Add parentheses when referring to function calls in comments.
Changes v1->v2:
- Fix out-of-bounds when initializing a pool.
---
lib/stackdepot.c | 131 +++++++++++++++++++++++++++++------------------
1 file changed, 82 insertions(+), 49 deletions(-)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 68c1ac9aa916..a5eff165c0d5 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -54,8 +54,8 @@ union handle_parts {
};
struct stack_record {
- struct stack_record *next; /* Link in the hash table */
- u32 hash; /* Hash in the hash table */
+ struct stack_record *next; /* Link in hash table or freelist */
+ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
@@ -87,10 +87,10 @@ static unsigned int stack_hash_mask;
static void *stack_pools[DEPOT_MAX_POOLS];
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
-/* Currently used pool in stack_pools. */
-static int pool_index;
-/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Next stack in the freelist of stack records within stack_pools. */
+static struct stack_record *next_stack;
/* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/*
@@ -226,6 +226,42 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
+/* Initializes a stack depol pool. */
+static void depot_init_pool(void *pool)
+{
+ const int records_in_pool = DEPOT_POOL_SIZE / DEPOT_STACK_RECORD_SIZE;
+ int i, offset;
+
+ /* Initialize handles and link stack records to each other. */
+ for (i = 0, offset = 0;
+ offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
+ i++, offset += DEPOT_STACK_RECORD_SIZE) {
+ struct stack_record *stack = pool + offset;
+
+ stack->handle.pool_index = pools_num;
+ stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+
+ if (i < records_in_pool - 1)
+ stack->next = (void *)stack + DEPOT_STACK_RECORD_SIZE;
+ else
+ stack->next = NULL;
+ }
+
+ /* Link stack records into the freelist. */
+ WARN_ON(next_stack);
+ next_stack = pool;
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = pool;
+
+ /*
+ * WRITE_ONCE() pairs with potential concurrent read in
+ * depot_fetch_stack().
+ */
+ WRITE_ONCE(pools_num, pools_num + 1);
+}
+
/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
{
@@ -242,7 +278,7 @@ static void depot_keep_new_pool(void **prealloc)
* Use the preallocated memory for the new pool
* as long as we do not exceed the maximum number of pools.
*/
- if (pool_index + 1 < DEPOT_MAX_POOLS) {
+ if (pools_num < DEPOT_MAX_POOLS) {
new_pool = *prealloc;
*prealloc = NULL;
}
@@ -258,45 +294,42 @@ static void depot_keep_new_pool(void **prealloc)
}
/* Updates references to the current and the next stack depot pools. */
-static bool depot_update_pools(size_t required_size, void **prealloc)
+static bool depot_update_pools(void **prealloc)
{
- /* Check if there is not enough space in the current pool. */
- if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
- /* Bail out if we reached the pool limit. */
- if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return false;
- }
+ /* Check if we still have objects in the freelist. */
+ if (next_stack)
+ goto out_keep_prealloc;
- /*
- * Move on to the new pool.
- * WRITE_ONCE() pairs with potential concurrent read in
- * stack_depot_fetch().
- */
- WRITE_ONCE(pool_index, pool_index + 1);
- stack_pools[pool_index] = new_pool;
+ /* Check if we have a new pool saved and use it. */
+ if (new_pool) {
+ depot_init_pool(new_pool);
new_pool = NULL;
- pool_offset = 0;
- /*
- * If the maximum number of pools is not reached, take note
- * that yet another new pool needs to be allocated.
- * smp_store_release() pairs with smp_load_acquire() in
- * stack_depot_save().
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS)
+ /* Take note that we might need a new new_pool. */
+ if (pools_num < DEPOT_MAX_POOLS)
smp_store_release(&new_pool_required, 1);
+
+ /* Try keeping the preallocated memory for new_pool. */
+ goto out_keep_prealloc;
+ }
+
+ /* Bail out if we reached the pool limit. */
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
}
- /* Check if the current pool is not yet allocated. */
- if (*prealloc && stack_pools[pool_index] == NULL) {
- /* Use the preallocated memory for the current pool. */
- stack_pools[pool_index] = *prealloc;
+ /* Check if we have preallocated memory and use it. */
+ if (*prealloc) {
+ depot_init_pool(*prealloc);
*prealloc = NULL;
return true;
}
- /* Otherwise, try using the preallocated memory for a new pool. */
+ return false;
+
+out_keep_prealloc:
+ /* Keep the preallocated memory for a new pool if required. */
if (*prealloc)
depot_keep_new_pool(prealloc);
return true;
@@ -307,35 +340,35 @@ static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
- size_t required_size = DEPOT_STACK_RECORD_SIZE;
/* Update current and new pools if required and possible. */
- if (!depot_update_pools(required_size, prealloc))
+ if (!depot_update_pools(prealloc))
return NULL;
- /* Check if we have a pool to save the stack trace. */
- if (stack_pools[pool_index] == NULL)
+ /* Check if we have a stack record to save the stack trace. */
+ stack = next_stack;
+ if (!stack)
return NULL;
+ /* Advance the freelist. */
+ next_stack = stack->next;
+
/* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
size = CONFIG_STACKDEPOT_MAX_FRAMES;
/* Save the stack trace. */
- stack = stack_pools[pool_index] + pool_offset;
+ stack->next = NULL;
stack->hash = hash;
stack->size = size;
- stack->handle.pool_index = pool_index;
- stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
- stack->handle.extra = 0;
+ /* stack->handle is already filled in by depot_init_pool(). */
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- pool_offset += required_size;
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, required_size);
+ kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
return stack;
}
@@ -345,16 +378,16 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
union handle_parts parts = { .handle = handle };
/*
* READ_ONCE() pairs with potential concurrent write in
- * depot_update_pools().
+ * depot_init_pool().
*/
- int pool_index_cached = READ_ONCE(pool_index);
+ int pools_num_cached = READ_ONCE(pools_num);
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
- if (parts.pool_index > pool_index_cached) {
+ if (parts.pool_index > pools_num_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pool_index_cached, handle);
+ parts.pool_index, pools_num_cached, handle);
return NULL;
}
--
2.25.1
next prev parent reply other threads:[~2023-11-20 17:48 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-20 17:46 [PATCH v4 00/22] stackdepot: allow evicting stack traces andrey.konovalov
2023-11-20 17:46 ` [PATCH v4 01/22] lib/stackdepot: print disabled message only if truly disabled andrey.konovalov
2024-01-03 8:14 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 02/22] lib/stackdepot: check disabled flag when fetching andrey.konovalov
2024-01-03 8:18 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 03/22] lib/stackdepot: simplify __stack_depot_save andrey.konovalov
2024-01-03 8:19 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 04/22] lib/stackdepot: drop valid bit from handles andrey.konovalov
2024-01-03 8:21 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 05/22] lib/stackdepot: add depot_fetch_stack helper andrey.konovalov
2024-01-03 8:24 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 06/22] lib/stackdepot: use fixed-sized slots for stack records andrey.konovalov
2024-01-03 8:30 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 07/22] lib/stackdepot: fix and clean-up atomic annotations andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 08/22] lib/stackdepot: rework helpers for depot_alloc_stack andrey.konovalov
2024-01-03 8:42 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 09/22] lib/stackdepot: rename next_pool_required to new_pool_required andrey.konovalov
2024-01-03 8:44 ` Oscar Salvador
2023-11-20 17:47 ` [PATCH v4 10/22] lib/stackdepot: store next pool pointer in new_pool andrey.konovalov
2024-01-03 9:06 ` Oscar Salvador
2023-11-20 17:47 ` andrey.konovalov [this message]
2023-11-20 17:47 ` [PATCH v4 12/22] lib/stackdepot: use read/write lock andrey.konovalov
2024-01-03 9:14 ` Oscar Salvador
2024-01-10 23:01 ` Andi Kleen
2024-01-11 9:48 ` Marco Elver
2024-01-11 12:36 ` Andi Kleen
2024-01-11 19:08 ` Marco Elver
2024-01-12 2:38 ` Andrey Konovalov
2024-01-12 8:24 ` Marco Elver
2024-01-12 22:15 ` Marco Elver
2024-01-13 1:24 ` Andi Kleen
2024-01-13 9:12 ` Marco Elver
2024-01-13 9:19 ` Andi Kleen
2024-01-13 9:23 ` Marco Elver
2024-01-13 9:30 ` Marco Elver
2024-01-13 9:31 ` Andi Kleen
2024-01-24 14:15 ` Breno Leitao
2024-01-24 14:21 ` Marco Elver
2024-01-24 16:24 ` Breno Leitao
2023-11-20 17:47 ` [PATCH v4 13/22] lib/stackdepot: use list_head for stack record links andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 14/22] kmsan: use stack_depot_save instead of __stack_depot_save andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 15/22] lib/stackdepot, kasan: add flags to __stack_depot_save and rename andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 16/22] lib/stackdepot: add refcount for records andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 17/22] lib/stackdepot: allow users to evict stack traces andrey.konovalov
2024-01-04 8:52 ` Oscar Salvador
2024-01-04 9:25 ` Marco Elver
2024-01-04 10:19 ` Oscar Salvador
2024-01-04 10:42 ` Marco Elver
2023-11-20 17:47 ` [PATCH v4 18/22] kasan: remove atomic accesses to stack ring entries andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 19/22] kasan: check object_size in kasan_complete_mode_report_info andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 20/22] kasan: use stack_depot_put for tag-based modes andrey.konovalov
2023-11-20 17:47 ` [PATCH v4 21/22] kasan: use stack_depot_put for Generic mode andrey.konovalov
2023-11-22 3:17 ` [BISECTED] Boot hangs when SLUB_DEBUG_ON=y Hyeonggon Yoo
2023-11-22 12:37 ` [REGRESSION] Boot hangs when SLUB_DEBUG_ON=y and KASAN_GENERIC=y Hyeonggon Yoo
2023-11-22 23:13 ` [BISECTED] Boot hangs when SLUB_DEBUG_ON=y Andrey Konovalov
2023-11-20 17:47 ` [PATCH v4 22/22] lib/stackdepot: adjust DEPOT_POOLS_CAP for KMSAN andrey.konovalov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b9e4c79955c2121b69301778643b203d3fb09ccc.1700502145.git.andreyknvl@google.com \
--to=andrey.konovalov@linux.dev \
--cc=akpm@linux-foundation.org \
--cc=andreyknvl@gmail.com \
--cc=andreyknvl@google.com \
--cc=dvyukov@google.com \
--cc=elver@google.com \
--cc=eugenis@google.com \
--cc=glider@google.com \
--cc=kasan-dev@googlegroups.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=osalvador@suse.de \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox