From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 12/62] mm/slub: Convert __slab_free() to take a struct slab
Date: Mon, 4 Oct 2021 14:46:00 +0100 [thread overview]
Message-ID: <20211004134650.4031813-13-willy@infradead.org> (raw)
In-Reply-To: <20211004134650.4031813-1-willy@infradead.org>
Provide a little more typesafety and also convert free_debug_processing()
to take a struct slab.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/slub.c | 52 ++++++++++++++++++++++++++--------------------------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
index 15996ea165ac..0a566a03d424 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1342,21 +1342,21 @@ static inline int free_consistency_checks(struct kmem_cache *s,
/* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing(
- struct kmem_cache *s, struct page *page,
+ struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int bulk_cnt,
unsigned long addr)
{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ struct kmem_cache_node *n = get_node(s, slab_nid(slab));
void *object = head;
int cnt = 0;
unsigned long flags, flags2;
int ret = 0;
spin_lock_irqsave(&n->list_lock, flags);
- slab_lock(page, &flags2);
+ slab_lock(slab_page(slab), &flags2);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!check_slab(s, page))
+ if (!check_slab(s, slab_page(slab)))
goto out;
}
@@ -1364,13 +1364,13 @@ static noinline int free_debug_processing(
cnt++;
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!free_consistency_checks(s, page, object, addr))
+ if (!free_consistency_checks(s, slab_page(slab), object, addr))
goto out;
}
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
- trace(s, page, object, 0);
+ trace(s, slab_page(slab), object, 0);
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
init_object(s, object, SLUB_RED_INACTIVE);
@@ -1383,10 +1383,10 @@ static noinline int free_debug_processing(
out:
if (cnt != bulk_cnt)
- slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
+ slab_err(s, slab_page(slab), "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);
- slab_unlock(page, &flags2);
+ slab_unlock(slab_page(slab), &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
@@ -1609,7 +1609,7 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
static inline int free_debug_processing(
- struct kmem_cache *s, struct page *page,
+ struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int bulk_cnt,
unsigned long addr) { return 0; }
@@ -3270,17 +3270,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
* have a longer lifetime than the cpu slabs in most processing loads.
*
* So we still attempt to reduce cache line usage. Just take the slab
- * lock and free the item. If there is no additional partial page
+ * lock and free the item. If there is no additional partial slab
* handling required then we can return immediately.
*/
-static void __slab_free(struct kmem_cache *s, struct page *page,
+static void __slab_free(struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int cnt,
unsigned long addr)
{
void *prior;
int was_frozen;
- struct page new;
+ struct slab new;
unsigned long counters;
struct kmem_cache_node *n = NULL;
unsigned long flags;
@@ -3291,7 +3291,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return;
if (kmem_cache_debug(s) &&
- !free_debug_processing(s, page, head, tail, cnt, addr))
+ !free_debug_processing(s, slab, head, tail, cnt, addr))
return;
do {
@@ -3299,8 +3299,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
spin_unlock_irqrestore(&n->list_lock, flags);
n = NULL;
}
- prior = page->freelist;
- counters = page->counters;
+ prior = slab->freelist;
+ counters = slab->counters;
set_freepointer(s, tail, prior);
new.counters = counters;
was_frozen = new.frozen;
@@ -3319,7 +3319,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
} else { /* Needs to be taken off a list */
- n = get_node(s, page_to_nid(page));
+ n = get_node(s, slab_nid(slab));
/*
* Speculatively acquire the list_lock.
* If the cmpxchg does not succeed then we may
@@ -3333,7 +3333,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
}
}
- } while (!cmpxchg_double_slab(s, page,
+ } while (!cmpxchg_double_slab(s, slab_page(slab),
prior, counters,
head, new.counters,
"__slab_free"));
@@ -3348,10 +3348,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat(s, FREE_FROZEN);
} else if (new.frozen) {
/*
- * If we just froze the page then put it onto the
+ * If we just froze the slab then put it onto the
* per cpu partial list.
*/
- put_cpu_partial(s, page, 1);
+ put_cpu_partial(s, slab_page(slab), 1);
stat(s, CPU_PARTIAL_FREE);
}
@@ -3366,8 +3366,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* then add it.
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
- remove_full(s, n, page);
- add_partial(n, page, DEACTIVATE_TO_TAIL);
+ remove_full(s, n, slab_page(slab));
+ add_partial(n, slab_page(slab), DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
spin_unlock_irqrestore(&n->list_lock, flags);
@@ -3378,16 +3378,16 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
/*
* Slab on the partial list.
*/
- remove_partial(n, page);
+ remove_partial(n, slab_page(slab));
stat(s, FREE_REMOVE_PARTIAL);
} else {
/* Slab must be on the full list */
- remove_full(s, n, page);
+ remove_full(s, n, slab_page(slab));
}
spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
- discard_slab(s, page);
+ discard_slab(s, slab_page(slab));
}
/*
@@ -3468,7 +3468,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
#endif
stat(s, FREE_FASTPATH);
} else
- __slab_free(s, slab_page(slab), head, tail_obj, cnt, addr);
+ __slab_free(s, slab, head, tail_obj, cnt, addr);
}
@@ -4536,7 +4536,7 @@ void kfree(const void *x)
return;
slab = virt_to_slab(x);
- if (unlikely(!SlabAllocation(slab))) {
+ if (unlikely(!slab_test_cache(slab))) {
free_nonslab_page(slab_page(slab), object);
return;
}
--
2.32.0
next prev parent reply other threads:[~2021-10-04 14:03 UTC|newest]
Thread overview: 77+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-04 13:45 [PATCH 00/62] Separate struct slab from struct page Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 01/62] mm: Convert page_to_section() to pgflags_section() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 02/62] mm: Add pgflags_nid() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 03/62] mm: Split slab into its own type Matthew Wilcox (Oracle)
2021-10-05 16:10 ` David Hildenbrand
2021-10-05 18:48 ` Matthew Wilcox
2021-10-12 7:25 ` David Hildenbrand
2021-10-12 14:13 ` Matthew Wilcox
2021-10-12 14:17 ` David Hildenbrand
2021-10-13 18:08 ` Johannes Weiner
2021-10-13 18:31 ` Matthew Wilcox
2021-10-14 7:22 ` David Hildenbrand
2021-10-14 12:44 ` Johannes Weiner
2021-10-14 13:08 ` Matthew Wilcox
2021-10-04 13:45 ` [PATCH 04/62] mm: Add account_slab() and unaccount_slab() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 05/62] mm: Convert virt_to_cache() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 06/62] mm: Convert __ksize() to " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 07/62] mm: Use struct slab in kmem_obj_info() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 08/62] mm: Convert check_heap_object() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 09/62] mm/slub: Convert process_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 10/62] mm/slub: Convert detached_freelist to use " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 11/62] mm/slub: Convert kfree() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` Matthew Wilcox (Oracle) [this message]
2021-10-04 13:46 ` [PATCH 13/62] mm/slub: Convert new_slab() to return " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 14/62] mm/slub: Convert early_kmem_cache_node_alloc() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 15/62] mm/slub: Convert kmem_cache_cpu to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 16/62] mm/slub: Convert show_slab_objects() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 17/62] mm/slub: Convert validate_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 18/62] mm/slub: Convert count_partial() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 19/62] mm/slub: Convert bootstrap() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 20/62] mm/slub: Convert __kmem_cache_do_shrink() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 21/62] mm/slub: Convert free_partial() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 22/62] mm/slub: Convert list_slab_objects() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 23/62] mm/slub: Convert slab_alloc_node() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 24/62] mm/slub: Convert get_freelist() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 25/62] mm/slub: Convert node_match() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 26/62] mm/slub: Convert slab flushing to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 27/62] mm/slub: Convert __unfreeze_partials to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 28/62] mm/slub: Convert deactivate_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 29/62] mm/slub: Convert acquire_slab() to take a struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 30/62] mm/slub: Convert partial slab management to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 31/62] mm/slub: Convert slab freeing " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 32/62] mm/slub: Convert shuffle_freelist " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 33/62] mm/slub: Remove struct page argument to next_freelist_entry() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 34/62] mm/slub: Remove struct page argument from setup_object() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 35/62] mm/slub: Convert freelist_corrupted() to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 36/62] mm/slub: Convert full slab management " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 37/62] mm/slub: Convert free_consistency_checks() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 38/62] mm/slub: Convert alloc_debug_processing() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 39/62] mm/slub: Convert check_object() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 40/62] mm/slub: Convert on_freelist() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 41/62] mm/slub: Convert check_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 42/62] mm/slub: Convert check_valid_pointer() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 43/62] mm/slub: Convert object_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 44/62] mm/slub: Convert print_trailer() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 45/62] mm/slub: Convert slab_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 46/62] mm/slub: Convert print_page_info() to print_slab_info() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 47/62] mm/slub: Convert trace() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 48/62] mm/slub: Convert cmpxchg_double_slab to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 49/62] mm/slub: Convert get_map() and __fill_map() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 50/62] mm/slub: Convert slab_lock() and slab_unlock() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 51/62] mm/slub: Convert setup_page_debug() to setup_slab_debug() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 52/62] mm/slub: Convert pfmemalloc_match() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 53/62] mm/slub: Remove pfmemalloc_match_unsafe() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 54/62] mm: Convert slab to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 55/62] mm: Convert slob " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 56/62] mm: Convert slub " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 57/62] memcg: Convert object cgroups from struct page to " Matthew Wilcox (Oracle)
2021-10-11 17:13 ` Johannes Weiner
2021-10-12 3:16 ` Matthew Wilcox
2021-10-04 13:46 ` [PATCH 58/62] mm/kasan: Convert " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 59/62] zsmalloc: Stop using slab fields in struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 60/62] bootmem: Use page->index instead of page->freelist Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 61/62] iommu: Use put_pages_list Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 62/62] mm: Remove slab from struct page Matthew Wilcox (Oracle)
2021-10-11 20:07 ` [PATCH 00/62] Separate struct " Johannes Weiner
2021-10-12 3:30 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211004134650.4031813-13-willy@infradead.org \
--to=willy@infradead.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox