From: Vlastimil Babka <vbabka@suse.cz>
To: Matthew Wilcox <willy@infradead.org>,
Christoph Lameter <cl@linux.com>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Roman Gushchin <guro@fb.com>, Hyeonggon Yoo <42.hyeyoo@gmail.com>,
patches@lists.linux.dev, Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH v4 18/32] mm/slub: Finish struct page to struct slab conversion
Date: Tue, 4 Jan 2022 01:10:32 +0100 [thread overview]
Message-ID: <20220104001046.12263-19-vbabka@suse.cz> (raw)
In-Reply-To: <20220104001046.12263-1-vbabka@suse.cz>
Update comments mentioning pages to mention slabs where appropriate.
Also some goto labels.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
---
include/linux/slub_def.h | 2 +-
mm/slub.c | 105 +++++++++++++++++++--------------------
2 files changed, 53 insertions(+), 54 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 00d99afe1c0e..8a9c2876ca89 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -99,7 +99,7 @@ struct kmem_cache {
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
- /* Number of per cpu partial pages to keep around */
+ /* Number of per cpu partial slabs to keep around */
unsigned int cpu_partial_slabs;
#endif
struct kmem_cache_order_objects oo;
diff --git a/mm/slub.c b/mm/slub.c
index e89208f3197a..cc64ba9d9963 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -48,7 +48,7 @@
* 1. slab_mutex (Global Mutex)
* 2. node->list_lock (Spinlock)
* 3. kmem_cache->cpu_slab->lock (Local lock)
- * 4. slab_lock(page) (Only on some arches or for debugging)
+ * 4. slab_lock(slab) (Only on some arches or for debugging)
* 5. object_map_lock (Only for debugging)
*
* slab_mutex
@@ -64,19 +64,19 @@
*
* The slab_lock is only used for debugging and on arches that do not
* have the ability to do a cmpxchg_double. It only protects:
- * A. page->freelist -> List of object free in a page
- * B. page->inuse -> Number of objects in use
- * C. page->objects -> Number of objects in page
- * D. page->frozen -> frozen state
+ * A. slab->freelist -> List of free objects in a slab
+ * B. slab->inuse -> Number of objects in use
+ * C. slab->objects -> Number of objects in slab
+ * D. slab->frozen -> frozen state
*
* Frozen slabs
*
* If a slab is frozen then it is exempt from list management. It is not
* on any list except per cpu partial list. The processor that froze the
- * slab is the one who can perform list operations on the page. Other
+ * slab is the one who can perform list operations on the slab. Other
* processors may put objects onto the freelist but the processor that
* froze the slab is the only one that can retrieve the objects from the
- * page's freelist.
+ * slab's freelist.
*
* list_lock
*
@@ -135,7 +135,7 @@
* minimal so we rely on the page allocators per cpu caches for
* fast frees and allocs.
*
- * page->frozen The slab is frozen and exempt from list processing.
+ * slab->frozen The slab is frozen and exempt from list processing.
* This means that the slab is dedicated to a purpose
* such as satisfying allocations for a specific
* processor. Objects may be freed in the slab while
@@ -250,7 +250,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
+#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
/* Internal SLUB flags */
/* Poison object */
@@ -423,8 +423,8 @@ static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
/*
* We take the number of objects but actually limit the number of
- * pages on the per cpu partial list, in order to limit excessive
- * growth of the list. For simplicity we assume that the pages will
+ * slabs on the per cpu partial list, in order to limit excessive
+ * growth of the list. For simplicity we assume that the slabs will
* be half-full.
*/
nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
@@ -594,9 +594,9 @@ static inline bool slab_add_kunit_errors(void) { return false; }
#endif
/*
- * Determine a map of object in use on a page.
+ * Determine a map of objects in use in a slab.
*
- * Node listlock must be held to guarantee that the page does
+ * Node listlock must be held to guarantee that the slab does
* not vanish from under us.
*/
static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
@@ -1139,7 +1139,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab)
}
/*
- * Determine if a certain object on a page is on the freelist. Must hold the
+ * Determine if a certain object in a slab is on the freelist. Must hold the
* slab lock to guarantee that the chains are in a consistent state.
*/
static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
@@ -2184,7 +2184,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
}
/*
- * Get a page from somewhere. Search in increasing NUMA distances.
+ * Get a slab from somewhere. Search in increasing NUMA distances.
*/
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
struct slab **ret_slab)
@@ -2248,7 +2248,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
}
/*
- * Get a partial page, lock it and return it.
+ * Get a partial slab, lock it and return it.
*/
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct slab **ret_slab)
@@ -2340,7 +2340,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
}
/*
- * Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
+ * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
* unfreezes the slabs and puts it on the proper list.
* Assumes the slab has been already safely taken away from kmem_cache_cpu
* by the caller.
@@ -2387,18 +2387,18 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
}
/*
- * Stage two: Unfreeze the page while splicing the per-cpu
- * freelist to the head of page's freelist.
+ * Stage two: Unfreeze the slab while splicing the per-cpu
+ * freelist to the head of slab's freelist.
*
- * Ensure that the page is unfrozen while the list presence
+ * Ensure that the slab is unfrozen while the list presence
* reflects the actual number of objects during unfreeze.
*
* We setup the list membership and then perform a cmpxchg
- * with the count. If there is a mismatch then the page
- * is not unfrozen but the page is on the wrong list.
+ * with the count. If there is a mismatch then the slab
+ * is not unfrozen but the slab is on the wrong list.
*
* Then we restart the process which may have to remove
- * the page from the list that we just put it on again
+ * the slab from the list that we just put it on again
* because the number of objects in the slab may have
* changed.
*/
@@ -2426,9 +2426,8 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
if (!lock) {
lock = 1;
/*
- * Taking the spinlock removes the possibility
- * that acquire_slab() will see a slab page that
- * is frozen
+ * Taking the spinlock removes the possibility that
+ * acquire_slab() will see a slab that is frozen
*/
spin_lock_irqsave(&n->list_lock, flags);
}
@@ -2569,8 +2568,8 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
}
/*
- * Put a page that was just frozen (in __slab_free|get_partial_node) into a
- * partial page slot if available.
+ * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
+ * partial slab slot if available.
*
* If we did not find a slot then simply move all the partials to the
* per node partial list.
@@ -2841,12 +2840,12 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
}
/*
- * Check the page->freelist of a page and either transfer the freelist to the
- * per cpu freelist or deactivate the page.
+ * Check the slab->freelist and either transfer the freelist to the
+ * per cpu freelist or deactivate the slab.
*
- * The page is still frozen if the return value is not NULL.
+ * The slab is still frozen if the return value is not NULL.
*
- * If this function returns NULL then the page has been unfrozen.
+ * If this function returns NULL then the slab has been unfrozen.
*/
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
{
@@ -2902,7 +2901,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLOWPATH);
-reread_page:
+reread_slab:
slab = READ_ONCE(c->slab);
if (!slab) {
@@ -2939,11 +2938,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!pfmemalloc_match(slab, gfpflags)))
goto deactivate_slab;
- /* must check again c->page in case we got preempted and it changed */
+ /* must check again c->slab in case we got preempted and it changed */
local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(slab != c->slab)) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- goto reread_page;
+ goto reread_slab;
}
freelist = c->freelist;
if (freelist)
@@ -2966,8 +2965,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
/*
* freelist is pointing to the list of objects to be used.
- * page is pointing to the page from which the objects are obtained.
- * That page must be frozen for per cpu allocations to work.
+ * slab is pointing to the slab from which the objects are obtained.
+ * That slab must be frozen for per cpu allocations to work.
*/
VM_BUG_ON(!c->slab->frozen);
c->freelist = get_freepointer(s, freelist);
@@ -2980,7 +2979,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_lock_irqsave(&s->cpu_slab->lock, flags);
if (slab != c->slab) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- goto reread_page;
+ goto reread_slab;
}
freelist = c->freelist;
c->slab = NULL;
@@ -2994,7 +2993,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(c->slab)) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
- goto reread_page;
+ goto reread_slab;
}
if (unlikely(!slub_percpu_partial(c))) {
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -3013,7 +3012,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
freelist = get_partial(s, gfpflags, node, &slab);
if (freelist)
- goto check_new_page;
+ goto check_new_slab;
slub_put_cpu_ptr(s->cpu_slab);
slab = new_slab(s, gfpflags, node);
@@ -3025,7 +3024,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
}
/*
- * No other reference to the page yet so we can
+ * No other reference to the slab yet so we can
* muck around with it freely without cmpxchg
*/
freelist = slab->freelist;
@@ -3033,7 +3032,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLAB);
-check_new_page:
+check_new_slab:
if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, slab, freelist, addr)) {
@@ -3055,7 +3054,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*/
goto return_single;
-retry_load_page:
+retry_load_slab:
local_lock_irqsave(&s->cpu_slab->lock, flags);
if (unlikely(c->slab)) {
@@ -3072,7 +3071,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, CPUSLAB_FLUSH);
- goto retry_load_page;
+ goto retry_load_slab;
}
c->slab = slab;
@@ -3169,9 +3168,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
/*
* Irqless object alloc/free algorithm used here depends on sequence
* of fetching cpu_slab's data. tid should be fetched before anything
- * on c to guarantee that object and page associated with previous tid
+ * on c to guarantee that object and slab associated with previous tid
* won't be used with current tid. If we fetch tid first, object and
- * page could be one associated with next tid and our alloc/free
+ * slab could be one associated with next tid and our alloc/free
* request will be failed. In this case, we will retry. So, no problem.
*/
barrier();
@@ -3295,7 +3294,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
* have a longer lifetime than the cpu slabs in most processing loads.
*
* So we still attempt to reduce cache line usage. Just take the slab
- * lock and free the item. If there is no additional partial page
+ * lock and free the item. If there is no additional partial slab
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct slab *slab,
@@ -3373,7 +3372,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
stat(s, FREE_FROZEN);
} else if (new.frozen) {
/*
- * If we just froze the page then put it onto the
+ * If we just froze the slab then put it onto the
* per cpu partial list.
*/
put_cpu_partial(s, slab, 1);
@@ -3427,7 +3426,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
* with all sorts of special processing.
*
* Bulk free of a freelist with several objects (all pointing to the
- * same page) possible by specifying head and tail ptr, plus objects
+ * same slab) possible by specifying head and tail ptr, plus objects
* count (cnt). Bulk free indicated by tail pointer being set.
*/
static __always_inline void do_slab_free(struct kmem_cache *s,
@@ -4213,7 +4212,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
#endif
/*
- * The larger the object size is, the more pages we want on the partial
+ * The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
set_min_partial(s, ilog2(s->size) / 2);
@@ -4598,12 +4597,12 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
* Build lists of slabs to discard or promote.
*
* Note that concurrent frees may occur while we hold the
- * list_lock. page->inuse here is the upper limit.
+ * list_lock. slab->inuse here is the upper limit.
*/
list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
int free = slab->objects - slab->inuse;
- /* Do not reread page->inuse */
+ /* Do not reread slab->inuse */
barrier();
/* We do not keep full slabs on the list */
@@ -5482,7 +5481,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
slabs += slab->slabs;
}
- /* Approximate half-full pages , see slub_set_cpu_partial() */
+ /* Approximate half-full slabs, see slub_set_cpu_partial() */
objects = (slabs * oo_objects(s->oo)) / 2;
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
--
2.34.1
next prev parent reply other threads:[~2022-01-04 0:11 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-04 0:10 [PATCH v4 00/32] Separate struct slab from struct page Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 01/32] mm: add virt_to_folio() and folio_address() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 02/32] mm/slab: Dissolve slab_map_pages() in its caller Vlastimil Babka
2022-01-06 6:40 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 03/32] mm/slub: Make object_err() static Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 04/32] mm: Split slab into its own type Vlastimil Babka
2022-01-06 11:54 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 05/32] mm: Convert [un]account_slab_page() to struct slab Vlastimil Babka
2022-01-06 13:04 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 06/32] mm: Convert virt_to_cache() to use " Vlastimil Babka
2022-01-06 6:44 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 07/32] mm: Convert __ksize() to " Vlastimil Babka
2022-01-06 13:42 ` Hyeonggon Yoo
2022-01-06 17:26 ` Vlastimil Babka
2022-01-08 6:21 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 08/32] mm: Use struct slab in kmem_obj_info() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 09/32] mm: Convert check_heap_object() to use struct slab Vlastimil Babka
2022-01-06 13:56 ` Hyeonggon Yoo
2022-01-04 0:10 ` [PATCH v4 10/32] mm/slub: Convert detached_freelist to use a " Vlastimil Babka
2022-01-05 0:58 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 11/32] mm/slub: Convert kfree() " Vlastimil Babka
2022-01-05 1:00 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 12/32] mm/slub: Convert __slab_lock() and __slab_unlock() to " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 13/32] mm/slub: Convert print_page_info() to print_slab_info() Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 14/32] mm/slub: Convert alloc_slab_page() to return a struct slab Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 15/32] mm/slub: Convert __free_slab() to use " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 16/32] mm/slub: Convert pfmemalloc_match() to take a " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 17/32] mm/slub: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-04 0:10 ` Vlastimil Babka [this message]
2022-01-04 0:10 ` [PATCH v4 19/32] mm/slab: Convert kmem_getpages() and kmem_freepages() to struct slab Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 20/32] mm/slab: Convert most struct page to struct slab by spatch Vlastimil Babka
2022-01-05 1:52 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 21/32] mm/slab: Finish struct page to struct slab conversion Vlastimil Babka
2022-01-05 2:05 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 22/32] mm: Convert struct page to struct slab in functions used by other subsystems Vlastimil Babka
2022-01-05 2:12 ` Roman Gushchin
2022-01-05 16:39 ` Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 23/32] mm/memcg: Convert slab objcgs from struct page to struct slab Vlastimil Babka
2022-01-05 2:41 ` Roman Gushchin
2022-01-05 17:08 ` Vlastimil Babka
2022-01-06 3:36 ` Roman Gushchin
2022-01-05 2:55 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 24/32] mm/slob: Convert SLOB to use struct slab and struct folio Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 25/32] mm/kasan: Convert to struct folio and struct slab Vlastimil Babka
2022-01-06 4:06 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 26/32] mm/kfence: Convert kfence_guarded_alloc() to " Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 27/32] mm/sl*b: Differentiate struct slab fields by sl*b implementations Vlastimil Babka
2022-01-06 4:12 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 28/32] mm/slub: Simplify struct slab slabs field definition Vlastimil Babka
2022-01-06 4:13 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 29/32] mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled Vlastimil Babka
2022-01-06 4:16 ` Roman Gushchin
2022-01-04 0:10 ` [PATCH v4 30/32] zsmalloc: Stop using slab fields in struct page Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 31/32] bootmem: Use page->index instead of page->freelist Vlastimil Babka
2022-01-04 0:10 ` [PATCH v4 32/32] mm/slob: Remove unnecessary page_mapcount_reset() function call Vlastimil Babka
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220104001046.12263-19-vbabka@suse.cz \
--to=vbabka@suse.cz \
--cc=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=guro@fb.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-mm@kvack.org \
--cc=patches@lists.linux.dev \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox