linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 54/62] mm: Convert slab to use struct slab
Date: Mon,  4 Oct 2021 14:46:42 +0100	[thread overview]
Message-ID: <20211004134650.4031813-55-willy@infradead.org> (raw)
In-Reply-To: <20211004134650.4031813-1-willy@infradead.org>

Use struct slab throughout the slab allocator.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slab.c | 405 +++++++++++++++++++++++++++---------------------------
 mm/slab.h |  24 +---
 2 files changed, 208 insertions(+), 221 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 0d515fd697a0..29dc09e784b8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -218,7 +218,7 @@ static void cache_reap(struct work_struct *unused);
 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 						void **list);
 static inline void fixup_slab_list(struct kmem_cache *cachep,
-				struct kmem_cache_node *n, struct page *page,
+				struct kmem_cache_node *n, struct slab *slab,
 				void **list);
 static int slab_early_init = 1;
 
@@ -372,10 +372,10 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 static int slab_max_order = SLAB_MAX_ORDER_LO;
 static bool slab_max_order_set __initdata;
 
-static inline void *index_to_obj(struct kmem_cache *cache,
-				 const struct page *page, unsigned int idx)
+static inline void *index_to_obj(const struct kmem_cache *cache,
+				 const struct slab *slab, unsigned int idx)
 {
-	return page->s_mem + cache->size * idx;
+	return slab->s_mem + cache->size * idx;
 }
 
 #define BOOT_CPUCACHE_ENTRIES	1
@@ -418,7 +418,7 @@ static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
 	 *
 	 * If the slab management structure is off the slab, then the
 	 * alignment will already be calculated into the size. Because
-	 * the slabs are all pages aligned, the objects will be at the
+	 * the slabs are all page aligned, the objects will be at the
 	 * correct alignment when allocated.
 	 */
 	if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
@@ -550,17 +550,17 @@ static struct array_cache *alloc_arraycache(int node, int entries,
 }
 
 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
-					struct page *page, void *objp)
+					struct slab *slab, void *objp)
 {
 	struct kmem_cache_node *n;
-	int page_node;
+	int slab_node;
 	LIST_HEAD(list);
 
-	page_node = page_to_nid(page);
-	n = get_node(cachep, page_node);
+	slab_node = slab_nid(slab);
+	n = get_node(cachep, slab_node);
 
 	spin_lock(&n->list_lock);
-	free_block(cachep, &objp, 1, page_node, &list);
+	free_block(cachep, &objp, 1, slab_node, &list);
 	spin_unlock(&n->list_lock);
 
 	slabs_destroy(cachep, &list);
@@ -1367,10 +1367,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
  * did not request dmaable memory, we might get it, but that
  * would be relatively rare and ignorable.
  */
-static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
 								int nodeid)
 {
 	struct page *page;
+	struct slab *slab;
 
 	flags |= cachep->allocflags;
 
@@ -1380,44 +1381,42 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
 		return NULL;
 	}
 
-	account_slab_page(page, cachep->gfporder, cachep, flags);
+	slab = (struct slab *)page;
+	account_slab(slab, cachep->gfporder, cachep, flags);
 	__SetPageSlab(page);
 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
 	if (sk_memalloc_socks() && page_is_pfmemalloc(page))
-		SetPageSlabPfmemalloc(page);
+		slab_set_pfmemalloc(slab);
 
-	return page;
+	return slab;
 }
 
 /*
  * Interface to system's page release.
  */
-static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
+static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
 {
+	struct page *page = slab_page(slab);
 	int order = cachep->gfporder;
 
-	BUG_ON(!PageSlab(page));
-	__ClearPageSlabPfmemalloc(page);
+	BUG_ON(!slab_test_cache(slab));
+	__slab_clear_pfmemalloc(slab);
 	__ClearPageSlab(page);
 	page_mapcount_reset(page);
-	/* In union with page->mapping where page allocator expects NULL */
-	page->slab_cache = NULL;
+	page->mapping = NULL;
 
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += 1 << order;
-	unaccount_slab_page(page, order, cachep);
+	unaccount_slab(slab, order, cachep);
 	__free_pages(page, order);
 }
 
 static void kmem_rcu_free(struct rcu_head *head)
 {
-	struct kmem_cache *cachep;
-	struct page *page;
+	struct slab *slab = container_of(head, struct slab, rcu_head);
+	struct kmem_cache *cachep = slab->slab_cache;
 
-	page = container_of(head, struct page, rcu_head);
-	cachep = page->slab_cache;
-
-	kmem_freepages(cachep, page);
+	kmem_freepages(cachep, slab);
 }
 
 #if DEBUG
@@ -1553,18 +1552,18 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
 		/* Print some data about the neighboring objects, if they
 		 * exist:
 		 */
-		struct page *page = virt_to_head_page(objp);
+		struct slab *slab = virt_to_slab(objp);
 		unsigned int objnr;
 
-		objnr = obj_to_index(cachep, page, objp);
+		objnr = obj_to_index(cachep, slab_page(slab), objp);
 		if (objnr) {
-			objp = index_to_obj(cachep, page, objnr - 1);
+			objp = index_to_obj(cachep, slab, objnr - 1);
 			realobj = (char *)objp + obj_offset(cachep);
 			pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
 			print_objinfo(cachep, objp, 2);
 		}
 		if (objnr + 1 < cachep->num) {
-			objp = index_to_obj(cachep, page, objnr + 1);
+			objp = index_to_obj(cachep, slab, objnr + 1);
 			realobj = (char *)objp + obj_offset(cachep);
 			pr_err("Next obj: start=%px, len=%d\n", realobj, size);
 			print_objinfo(cachep, objp, 2);
@@ -1575,17 +1574,17 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
 
 #if DEBUG
 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
-						struct page *page)
+						struct slab *slab)
 {
 	int i;
 
 	if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
-		poison_obj(cachep, page->freelist - obj_offset(cachep),
+		poison_obj(cachep, slab->freelist - obj_offset(cachep),
 			POISON_FREE);
 	}
 
 	for (i = 0; i < cachep->num; i++) {
-		void *objp = index_to_obj(cachep, page, i);
+		void *objp = index_to_obj(cachep, slab, i);
 
 		if (cachep->flags & SLAB_POISON) {
 			check_poison_obj(cachep, objp);
@@ -1601,7 +1600,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
 }
 #else
 static void slab_destroy_debugcheck(struct kmem_cache *cachep,
-						struct page *page)
+						struct slab *slab)
 {
 }
 #endif
@@ -1609,26 +1608,26 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
 /**
  * slab_destroy - destroy and release all objects in a slab
  * @cachep: cache pointer being destroyed
- * @page: page pointer being destroyed
+ * @slab: slab being destroyed
  *
- * Destroy all the objs in a slab page, and release the mem back to the system.
- * Before calling the slab page must have been unlinked from the cache. The
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache. The
  * kmem_cache_node ->list_lock is not held/needed.
  */
-static void slab_destroy(struct kmem_cache *cachep, struct page *page)
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slab)
 {
 	void *freelist;
 
-	freelist = page->freelist;
-	slab_destroy_debugcheck(cachep, page);
+	freelist = slab->freelist;
+	slab_destroy_debugcheck(cachep, slab);
 	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
-		call_rcu(&page->rcu_head, kmem_rcu_free);
+		call_rcu(&slab->rcu_head, kmem_rcu_free);
 	else
-		kmem_freepages(cachep, page);
+		kmem_freepages(cachep, slab);
 
 	/*
 	 * From now on, we don't use freelist
-	 * although actual page can be freed in rcu context
+	 * although actual slab can be freed in rcu context
 	 */
 	if (OFF_SLAB(cachep))
 		kmem_cache_free(cachep->freelist_cache, freelist);
@@ -1640,11 +1639,11 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
  */
 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
 {
-	struct page *page, *n;
+	struct slab *slab, *n;
 
-	list_for_each_entry_safe(page, n, list, slab_list) {
-		list_del(&page->slab_list);
-		slab_destroy(cachep, page);
+	list_for_each_entry_safe(slab, n, list, slab_list) {
+		list_del(&slab->slab_list);
+		slab_destroy(cachep, slab);
 	}
 }
 
@@ -2194,7 +2193,7 @@ static int drain_freelist(struct kmem_cache *cache,
 {
 	struct list_head *p;
 	int nr_freed;
-	struct page *page;
+	struct slab *slab;
 
 	nr_freed = 0;
 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -2206,8 +2205,8 @@ static int drain_freelist(struct kmem_cache *cache,
 			goto out;
 		}
 
-		page = list_entry(p, struct page, slab_list);
-		list_del(&page->slab_list);
+		slab = list_entry(p, struct slab, slab_list);
+		list_del(&slab->slab_list);
 		n->free_slabs--;
 		n->total_slabs--;
 		/*
@@ -2216,7 +2215,7 @@ static int drain_freelist(struct kmem_cache *cache,
 		 */
 		n->free_objects -= cache->num;
 		spin_unlock_irq(&n->list_lock);
-		slab_destroy(cache, page);
+		slab_destroy(cache, slab);
 		nr_freed++;
 	}
 out:
@@ -2291,14 +2290,14 @@ void __kmem_cache_release(struct kmem_cache *cachep)
  * which are all initialized during kmem_cache_init().
  */
 static void *alloc_slabmgmt(struct kmem_cache *cachep,
-				   struct page *page, int colour_off,
+				   struct slab *slab, int colour_off,
 				   gfp_t local_flags, int nodeid)
 {
 	void *freelist;
-	void *addr = page_address(page);
+	void *addr = slab_address(slab);
 
-	page->s_mem = addr + colour_off;
-	page->active = 0;
+	slab->s_mem = addr + colour_off;
+	slab->active = 0;
 
 	if (OBJFREELIST_SLAB(cachep))
 		freelist = NULL;
@@ -2315,24 +2314,24 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
 	return freelist;
 }
 
-static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
+static inline freelist_idx_t get_free_obj(struct slab *slab, unsigned int idx)
 {
-	return ((freelist_idx_t *)page->freelist)[idx];
+	return ((freelist_idx_t *)slab->freelist)[idx];
 }
 
-static inline void set_free_obj(struct page *page,
+static inline void set_free_obj(struct slab *slab,
 					unsigned int idx, freelist_idx_t val)
 {
-	((freelist_idx_t *)(page->freelist))[idx] = val;
+	((freelist_idx_t *)(slab->freelist))[idx] = val;
 }
 
-static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
+static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
 {
 #if DEBUG
 	int i;
 
 	for (i = 0; i < cachep->num; i++) {
-		void *objp = index_to_obj(cachep, page, i);
+		void *objp = index_to_obj(cachep, slab, i);
 
 		if (cachep->flags & SLAB_STORE_USER)
 			*dbg_userword(cachep, objp) = NULL;
@@ -2416,17 +2415,17 @@ static freelist_idx_t next_random_slot(union freelist_init_state *state)
 }
 
 /* Swap two freelist entries */
-static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
+static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b)
 {
-	swap(((freelist_idx_t *)page->freelist)[a],
-		((freelist_idx_t *)page->freelist)[b]);
+	swap(((freelist_idx_t *)slab->freelist)[a],
+		((freelist_idx_t *)slab->freelist)[b]);
 }
 
 /*
  * Shuffle the freelist initialization state based on pre-computed lists.
  * return true if the list was successfully shuffled, false otherwise.
  */
-static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
+static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab)
 {
 	unsigned int objfreelist = 0, i, rand, count = cachep->num;
 	union freelist_init_state state;
@@ -2443,7 +2442,7 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
 			objfreelist = count - 1;
 		else
 			objfreelist = next_random_slot(&state);
-		page->freelist = index_to_obj(cachep, page, objfreelist) +
+		slab->freelist = index_to_obj(cachep, slab, objfreelist) +
 						obj_offset(cachep);
 		count--;
 	}
@@ -2454,51 +2453,51 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
 	 */
 	if (!precomputed) {
 		for (i = 0; i < count; i++)
-			set_free_obj(page, i, i);
+			set_free_obj(slab, i, i);
 
 		/* Fisher-Yates shuffle */
 		for (i = count - 1; i > 0; i--) {
 			rand = prandom_u32_state(&state.rnd_state);
 			rand %= (i + 1);
-			swap_free_obj(page, i, rand);
+			swap_free_obj(slab, i, rand);
 		}
 	} else {
 		for (i = 0; i < count; i++)
-			set_free_obj(page, i, next_random_slot(&state));
+			set_free_obj(slab, i, next_random_slot(&state));
 	}
 
 	if (OBJFREELIST_SLAB(cachep))
-		set_free_obj(page, cachep->num - 1, objfreelist);
+		set_free_obj(slab, cachep->num - 1, objfreelist);
 
 	return true;
 }
 #else
 static inline bool shuffle_freelist(struct kmem_cache *cachep,
-				struct page *page)
+				struct slab *slab)
 {
 	return false;
 }
 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
 
 static void cache_init_objs(struct kmem_cache *cachep,
-			    struct page *page)
+			    struct slab *slab)
 {
 	int i;
 	void *objp;
 	bool shuffled;
 
-	cache_init_objs_debug(cachep, page);
+	cache_init_objs_debug(cachep, slab);
 
 	/* Try to randomize the freelist if enabled */
-	shuffled = shuffle_freelist(cachep, page);
+	shuffled = shuffle_freelist(cachep, slab);
 
 	if (!shuffled && OBJFREELIST_SLAB(cachep)) {
-		page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
+		slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) +
 						obj_offset(cachep);
 	}
 
 	for (i = 0; i < cachep->num; i++) {
-		objp = index_to_obj(cachep, page, i);
+		objp = index_to_obj(cachep, slab, i);
 		objp = kasan_init_slab_obj(cachep, objp);
 
 		/* constructor could break poison info */
@@ -2509,41 +2508,41 @@ static void cache_init_objs(struct kmem_cache *cachep,
 		}
 
 		if (!shuffled)
-			set_free_obj(page, i, i);
+			set_free_obj(slab, i, i);
 	}
 }
 
-static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab)
 {
 	void *objp;
 
-	objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
-	page->active++;
+	objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active));
+	slab->active++;
 
 	return objp;
 }
 
 static void slab_put_obj(struct kmem_cache *cachep,
-			struct page *page, void *objp)
+			struct slab *slab, void *objp)
 {
-	unsigned int objnr = obj_to_index(cachep, page, objp);
+	unsigned int objnr = obj_to_index(cachep, slab_page(slab), objp);
 #if DEBUG
 	unsigned int i;
 
 	/* Verify double free bug */
-	for (i = page->active; i < cachep->num; i++) {
-		if (get_free_obj(page, i) == objnr) {
+	for (i = slab->active; i < cachep->num; i++) {
+		if (get_free_obj(slab, i) == objnr) {
 			pr_err("slab: double free detected in cache '%s', objp %px\n",
 			       cachep->name, objp);
 			BUG();
 		}
 	}
 #endif
-	page->active--;
-	if (!page->freelist)
-		page->freelist = objp + obj_offset(cachep);
+	slab->active--;
+	if (!slab->freelist)
+		slab->freelist = objp + obj_offset(cachep);
 
-	set_free_obj(page, page->active, objnr);
+	set_free_obj(slab, slab->active, objnr);
 }
 
 /*
@@ -2551,26 +2550,26 @@ static void slab_put_obj(struct kmem_cache *cachep,
  * for the slab allocator to be able to lookup the cache and slab of a
  * virtual address for kfree, ksize, and slab debugging.
  */
-static void slab_map_pages(struct kmem_cache *cache, struct page *page,
+static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
 			   void *freelist)
 {
-	page->slab_cache = cache;
-	page->freelist = freelist;
+	slab->slab_cache = cache;
+	slab->freelist = freelist;
 }
 
 /*
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static struct page *cache_grow_begin(struct kmem_cache *cachep,
+static struct slab *cache_grow_begin(struct kmem_cache *cachep,
 				gfp_t flags, int nodeid)
 {
 	void *freelist;
 	size_t offset;
 	gfp_t local_flags;
-	int page_node;
+	int slab_node;
 	struct kmem_cache_node *n;
-	struct page *page;
+	struct slab *slab;
 
 	/*
 	 * Be lazy and only check for valid flags here,  keeping it out of the
@@ -2590,12 +2589,12 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
 	 * Get mem for the objs.  Attempt to allocate a physical page from
 	 * 'nodeid'.
 	 */
-	page = kmem_getpages(cachep, local_flags, nodeid);
-	if (!page)
+	slab = kmem_getpages(cachep, local_flags, nodeid);
+	if (!slab)
 		goto failed;
 
-	page_node = page_to_nid(page);
-	n = get_node(cachep, page_node);
+	slab_node = slab_nid(slab);
+	n = get_node(cachep, slab_node);
 
 	/* Get colour for the slab, and cal the next value. */
 	n->colour_next++;
@@ -2610,57 +2609,57 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
 
 	/*
 	 * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
-	 * page_address() in the latter returns a non-tagged pointer,
+	 * slab_address() in the latter returns a non-tagged pointer,
 	 * as it should be for slab pages.
 	 */
-	kasan_poison_slab(page);
+	kasan_poison_slab(slab_page(slab));
 
 	/* Get slab management. */
-	freelist = alloc_slabmgmt(cachep, page, offset,
-			local_flags & ~GFP_CONSTRAINT_MASK, page_node);
+	freelist = alloc_slabmgmt(cachep, slab, offset,
+			local_flags & ~GFP_CONSTRAINT_MASK, slab_node);
 	if (OFF_SLAB(cachep) && !freelist)
 		goto opps1;
 
-	slab_map_pages(cachep, page, freelist);
+	slab_map_pages(cachep, slab, freelist);
 
-	cache_init_objs(cachep, page);
+	cache_init_objs(cachep, slab);
 
 	if (gfpflags_allow_blocking(local_flags))
 		local_irq_disable();
 
-	return page;
+	return slab;
 
 opps1:
-	kmem_freepages(cachep, page);
+	kmem_freepages(cachep, slab);
 failed:
 	if (gfpflags_allow_blocking(local_flags))
 		local_irq_disable();
 	return NULL;
 }
 
-static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
+static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab)
 {
 	struct kmem_cache_node *n;
 	void *list = NULL;
 
 	check_irq_off();
 
-	if (!page)
+	if (!slab)
 		return;
 
-	INIT_LIST_HEAD(&page->slab_list);
-	n = get_node(cachep, page_to_nid(page));
+	INIT_LIST_HEAD(&slab->slab_list);
+	n = get_node(cachep, slab_nid(slab));
 
 	spin_lock(&n->list_lock);
 	n->total_slabs++;
-	if (!page->active) {
-		list_add_tail(&page->slab_list, &n->slabs_free);
+	if (!slab->active) {
+		list_add_tail(&slab->slab_list, &n->slabs_free);
 		n->free_slabs++;
 	} else
-		fixup_slab_list(cachep, n, page, &list);
+		fixup_slab_list(cachep, n, slab, &list);
 
 	STATS_INC_GROWN(cachep);
-	n->free_objects += cachep->num - page->active;
+	n->free_objects += cachep->num - slab->active;
 	spin_unlock(&n->list_lock);
 
 	fixup_objfreelist_debug(cachep, &list);
@@ -2708,13 +2707,13 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
 				   unsigned long caller)
 {
 	unsigned int objnr;
-	struct page *page;
+	struct slab *slab;
 
 	BUG_ON(virt_to_cache(objp) != cachep);
 
 	objp -= obj_offset(cachep);
 	kfree_debugcheck(objp);
-	page = virt_to_head_page(objp);
+	slab = virt_to_slab(objp);
 
 	if (cachep->flags & SLAB_RED_ZONE) {
 		verify_redzone_free(cachep, objp);
@@ -2724,10 +2723,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
 	if (cachep->flags & SLAB_STORE_USER)
 		*dbg_userword(cachep, objp) = (void *)caller;
 
-	objnr = obj_to_index(cachep, page, objp);
+	objnr = obj_to_index(cachep, slab_page(slab), objp);
 
 	BUG_ON(objnr >= cachep->num);
-	BUG_ON(objp != index_to_obj(cachep, page, objnr));
+	BUG_ON(objp != index_to_obj(cachep, slab, objnr));
 
 	if (cachep->flags & SLAB_POISON) {
 		poison_obj(cachep, objp, POISON_FREE);
@@ -2757,97 +2756,97 @@ static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
 }
 
 static inline void fixup_slab_list(struct kmem_cache *cachep,
-				struct kmem_cache_node *n, struct page *page,
+				struct kmem_cache_node *n, struct slab *slab,
 				void **list)
 {
-	/* move slabp to correct slabp list: */
-	list_del(&page->slab_list);
-	if (page->active == cachep->num) {
-		list_add(&page->slab_list, &n->slabs_full);
+	/* move slab to correct slab list: */
+	list_del(&slab->slab_list);
+	if (slab->active == cachep->num) {
+		list_add(&slab->slab_list, &n->slabs_full);
 		if (OBJFREELIST_SLAB(cachep)) {
 #if DEBUG
 			/* Poisoning will be done without holding the lock */
 			if (cachep->flags & SLAB_POISON) {
-				void **objp = page->freelist;
+				void **objp = slab->freelist;
 
 				*objp = *list;
 				*list = objp;
 			}
 #endif
-			page->freelist = NULL;
+			slab->freelist = NULL;
 		}
 	} else
-		list_add(&page->slab_list, &n->slabs_partial);
+		list_add(&slab->slab_list, &n->slabs_partial);
 }
 
 /* Try to find non-pfmemalloc slab if needed */
-static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
-					struct page *page, bool pfmemalloc)
+static noinline struct slab *get_valid_first_slab(struct kmem_cache_node *n,
+					struct slab *slab, bool pfmemalloc)
 {
-	if (!page)
+	if (!slab)
 		return NULL;
 
 	if (pfmemalloc)
-		return page;
+		return slab;
 
-	if (!PageSlabPfmemalloc(page))
-		return page;
+	if (!slab_test_pfmemalloc(slab))
+		return slab;
 
 	/* No need to keep pfmemalloc slab if we have enough free objects */
 	if (n->free_objects > n->free_limit) {
-		ClearPageSlabPfmemalloc(page);
-		return page;
+		slab_clear_pfmemalloc(slab);
+		return slab;
 	}
 
 	/* Move pfmemalloc slab to the end of list to speed up next search */
-	list_del(&page->slab_list);
-	if (!page->active) {
-		list_add_tail(&page->slab_list, &n->slabs_free);
+	list_del(&slab->slab_list);
+	if (!slab->active) {
+		list_add_tail(&slab->slab_list, &n->slabs_free);
 		n->free_slabs++;
 	} else
-		list_add_tail(&page->slab_list, &n->slabs_partial);
+		list_add_tail(&slab->slab_list, &n->slabs_partial);
 
-	list_for_each_entry(page, &n->slabs_partial, slab_list) {
-		if (!PageSlabPfmemalloc(page))
-			return page;
+	list_for_each_entry(slab, &n->slabs_partial, slab_list) {
+		if (!slab_test_pfmemalloc(slab))
+			return slab;
 	}
 
 	n->free_touched = 1;
-	list_for_each_entry(page, &n->slabs_free, slab_list) {
-		if (!PageSlabPfmemalloc(page)) {
+	list_for_each_entry(slab, &n->slabs_free, slab_list) {
+		if (!slab_test_pfmemalloc(slab)) {
 			n->free_slabs--;
-			return page;
+			return slab;
 		}
 	}
 
 	return NULL;
 }
 
-static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
+static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
 {
-	struct page *page;
+	struct slab *slab;
 
 	assert_spin_locked(&n->list_lock);
-	page = list_first_entry_or_null(&n->slabs_partial, struct page,
+	slab = list_first_entry_or_null(&n->slabs_partial, struct slab,
 					slab_list);
-	if (!page) {
+	if (!slab) {
 		n->free_touched = 1;
-		page = list_first_entry_or_null(&n->slabs_free, struct page,
+		slab = list_first_entry_or_null(&n->slabs_free, struct slab,
 						slab_list);
-		if (page)
+		if (slab)
 			n->free_slabs--;
 	}
 
 	if (sk_memalloc_socks())
-		page = get_valid_first_slab(n, page, pfmemalloc);
+		slab = get_valid_first_slab(n, slab, pfmemalloc);
 
-	return page;
+	return slab;
 }
 
 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
 				struct kmem_cache_node *n, gfp_t flags)
 {
-	struct page *page;
+	struct slab *slab;
 	void *obj;
 	void *list = NULL;
 
@@ -2855,16 +2854,16 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
 		return NULL;
 
 	spin_lock(&n->list_lock);
-	page = get_first_slab(n, true);
-	if (!page) {
+	slab = get_first_slab(n, true);
+	if (!slab) {
 		spin_unlock(&n->list_lock);
 		return NULL;
 	}
 
-	obj = slab_get_obj(cachep, page);
+	obj = slab_get_obj(cachep, slab);
 	n->free_objects--;
 
-	fixup_slab_list(cachep, n, page, &list);
+	fixup_slab_list(cachep, n, slab, &list);
 
 	spin_unlock(&n->list_lock);
 	fixup_objfreelist_debug(cachep, &list);
@@ -2877,20 +2876,20 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
  * or cache_grow_end() for new slab
  */
 static __always_inline int alloc_block(struct kmem_cache *cachep,
-		struct array_cache *ac, struct page *page, int batchcount)
+		struct array_cache *ac, struct slab *slab, int batchcount)
 {
 	/*
 	 * There must be at least one object available for
 	 * allocation.
 	 */
-	BUG_ON(page->active >= cachep->num);
+	BUG_ON(slab->active >= cachep->num);
 
-	while (page->active < cachep->num && batchcount--) {
+	while (slab->active < cachep->num && batchcount--) {
 		STATS_INC_ALLOCED(cachep);
 		STATS_INC_ACTIVE(cachep);
 		STATS_SET_HIGH(cachep);
 
-		ac->entry[ac->avail++] = slab_get_obj(cachep, page);
+		ac->entry[ac->avail++] = slab_get_obj(cachep, slab);
 	}
 
 	return batchcount;
@@ -2903,7 +2902,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 	struct array_cache *ac, *shared;
 	int node;
 	void *list = NULL;
-	struct page *page;
+	struct slab *slab;
 
 	check_irq_off();
 	node = numa_mem_id();
@@ -2936,14 +2935,14 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 
 	while (batchcount > 0) {
 		/* Get slab alloc is to come from. */
-		page = get_first_slab(n, false);
-		if (!page)
+		slab = get_first_slab(n, false);
+		if (!slab)
 			goto must_grow;
 
 		check_spinlock_acquired(cachep);
 
-		batchcount = alloc_block(cachep, ac, page, batchcount);
-		fixup_slab_list(cachep, n, page, &list);
+		batchcount = alloc_block(cachep, ac, slab, batchcount);
+		fixup_slab_list(cachep, n, slab, &list);
 	}
 
 must_grow:
@@ -2962,16 +2961,16 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
 				return obj;
 		}
 
-		page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
+		slab = cache_grow_begin(cachep, gfp_exact_node(flags), node);
 
 		/*
 		 * cache_grow_begin() can reenable interrupts,
 		 * then ac could change.
 		 */
 		ac = cpu_cache_get(cachep);
-		if (!ac->avail && page)
-			alloc_block(cachep, ac, page, batchcount);
-		cache_grow_end(cachep, page);
+		if (!ac->avail && slab)
+			alloc_block(cachep, ac, slab, batchcount);
+		cache_grow_end(cachep, slab);
 
 		if (!ac->avail)
 			return NULL;
@@ -3101,7 +3100,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 	struct zone *zone;
 	enum zone_type highest_zoneidx = gfp_zone(flags);
 	void *obj = NULL;
-	struct page *page;
+	struct slab *slab;
 	int nid;
 	unsigned int cpuset_mems_cookie;
 
@@ -3137,10 +3136,10 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 		 * We may trigger various forms of reclaim on the allowed
 		 * set and go into memory reserves if necessary.
 		 */
-		page = cache_grow_begin(cache, flags, numa_mem_id());
-		cache_grow_end(cache, page);
-		if (page) {
-			nid = page_to_nid(page);
+		slab = cache_grow_begin(cache, flags, numa_mem_id());
+		cache_grow_end(cache, slab);
+		if (slab) {
+			nid = slab_nid(slab);
 			obj = ____cache_alloc_node(cache,
 				gfp_exact_node(flags), nid);
 
@@ -3164,7 +3163,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 				int nodeid)
 {
-	struct page *page;
+	struct slab *slab;
 	struct kmem_cache_node *n;
 	void *obj = NULL;
 	void *list = NULL;
@@ -3175,8 +3174,8 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 
 	check_irq_off();
 	spin_lock(&n->list_lock);
-	page = get_first_slab(n, false);
-	if (!page)
+	slab = get_first_slab(n, false);
+	if (!slab)
 		goto must_grow;
 
 	check_spinlock_acquired_node(cachep, nodeid);
@@ -3185,12 +3184,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 	STATS_INC_ACTIVE(cachep);
 	STATS_SET_HIGH(cachep);
 
-	BUG_ON(page->active == cachep->num);
+	BUG_ON(slab->active == cachep->num);
 
-	obj = slab_get_obj(cachep, page);
+	obj = slab_get_obj(cachep, slab);
 	n->free_objects--;
 
-	fixup_slab_list(cachep, n, page, &list);
+	fixup_slab_list(cachep, n, slab, &list);
 
 	spin_unlock(&n->list_lock);
 	fixup_objfreelist_debug(cachep, &list);
@@ -3198,12 +3197,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 
 must_grow:
 	spin_unlock(&n->list_lock);
-	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
-	if (page) {
+	slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
+	if (slab) {
 		/* This slab isn't counted yet so don't update free_objects */
-		obj = slab_get_obj(cachep, page);
+		obj = slab_get_obj(cachep, slab);
 	}
-	cache_grow_end(cachep, page);
+	cache_grow_end(cachep, slab);
 
 	return obj ? obj : fallback_alloc(cachep, flags);
 }
@@ -3333,40 +3332,40 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
 {
 	int i;
 	struct kmem_cache_node *n = get_node(cachep, node);
-	struct page *page;
+	struct slab *slab;
 
 	n->free_objects += nr_objects;
 
 	for (i = 0; i < nr_objects; i++) {
 		void *objp;
-		struct page *page;
+		struct slab *slab;
 
 		objp = objpp[i];
 
-		page = virt_to_head_page(objp);
-		list_del(&page->slab_list);
+		slab = virt_to_slab(objp);
+		list_del(&slab->slab_list);
 		check_spinlock_acquired_node(cachep, node);
-		slab_put_obj(cachep, page, objp);
+		slab_put_obj(cachep, slab, objp);
 		STATS_DEC_ACTIVE(cachep);
 
 		/* fixup slab chains */
-		if (page->active == 0) {
-			list_add(&page->slab_list, &n->slabs_free);
+		if (slab->active == 0) {
+			list_add(&slab->slab_list, &n->slabs_free);
 			n->free_slabs++;
 		} else {
 			/* Unconditionally move a slab to the end of the
 			 * partial list on free - maximum time for the
 			 * other objects to be freed, too.
 			 */
-			list_add_tail(&page->slab_list, &n->slabs_partial);
+			list_add_tail(&slab->slab_list, &n->slabs_partial);
 		}
 	}
 
 	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
 		n->free_objects -= cachep->num;
 
-		page = list_last_entry(&n->slabs_free, struct page, slab_list);
-		list_move(&page->slab_list, list);
+		slab = list_last_entry(&n->slabs_free, struct slab, slab_list);
+		list_move(&slab->slab_list, list);
 		n->free_slabs--;
 		n->total_slabs--;
 	}
@@ -3402,10 +3401,10 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
 #if STATS
 	{
 		int i = 0;
-		struct page *page;
+		struct slab *slab;
 
-		list_for_each_entry(page, &n->slabs_free, slab_list) {
-			BUG_ON(page->active);
+		list_for_each_entry(slab, &n->slabs_free, slab_list) {
+			BUG_ON(slab->active);
 
 			i++;
 		}
@@ -3481,10 +3480,10 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
 	}
 
 	if (sk_memalloc_socks()) {
-		struct page *page = virt_to_head_page(objp);
+		struct slab *slab = virt_to_slab(objp);
 
-		if (unlikely(PageSlabPfmemalloc(page))) {
-			cache_free_pfmemalloc(cachep, page, objp);
+		if (unlikely(slab_test_pfmemalloc(slab))) {
+			cache_free_pfmemalloc(cachep, slab, objp);
 			return;
 		}
 	}
@@ -3671,7 +3670,7 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 	kpp->kp_data_offset = obj_offset(cachep);
 	slab = virt_to_slab(objp);
 	objnr = obj_to_index(cachep, slab_page(slab), objp);
-	objp = index_to_obj(cachep, slab_page(slab), objnr);
+	objp = index_to_obj(cachep, slab, objnr);
 	kpp->kp_objp = objp;
 	if (DEBUG && cachep->flags & SLAB_STORE_USER)
 		kpp->kp_ret = *dbg_userword(cachep, objp);
@@ -4199,7 +4198,7 @@ void __check_heap_object(const void *ptr, unsigned long n,
 	if (is_kfence_address(ptr))
 		offset = ptr - kfence_object_start(ptr);
 	else
-		offset = ptr - index_to_obj(cachep, slab_page(slab), objnr) - obj_offset(cachep);
+		offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep);
 
 	/* Allow address range falling entirely within usercopy region. */
 	if (offset >= cachep->useroffset &&
diff --git a/mm/slab.h b/mm/slab.h
index 53fe3a746973..7631e274a840 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -489,39 +489,27 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
 	return slab->slab_cache;
 }
 
-static __always_inline void account_slab_page(struct page *page, int order,
+static __always_inline void account_slab(struct slab *slab, int order,
 					      struct kmem_cache *s,
 					      gfp_t gfp)
 {
 	if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
-		memcg_alloc_page_obj_cgroups(page, s, gfp, true);
+		memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true);
 
-	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
 			    PAGE_SIZE << order);
 }
 
-static __always_inline void unaccount_slab_page(struct page *page, int order,
+static __always_inline void unaccount_slab(struct slab *slab, int order,
 						struct kmem_cache *s)
 {
 	if (memcg_kmem_enabled())
-		memcg_free_page_obj_cgroups(page);
+		memcg_free_page_obj_cgroups(slab_page(slab));
 
-	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+	mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
 			    -(PAGE_SIZE << order));
 }
 
-static __always_inline void account_slab(struct slab *slab, int order,
-					 struct kmem_cache *s, gfp_t gfp)
-{
-	account_slab_page(slab_page(slab), order, s, gfp);
-}
-
-static __always_inline void unaccount_slab(struct slab *slab, int order,
-					   struct kmem_cache *s)
-{
-	unaccount_slab_page(slab_page(slab), order, s);
-}
-
 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
 {
 	struct kmem_cache *cachep;
-- 
2.32.0



  parent reply	other threads:[~2021-10-04 14:55 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-04 13:45 [PATCH 00/62] Separate struct slab from struct page Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 01/62] mm: Convert page_to_section() to pgflags_section() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 02/62] mm: Add pgflags_nid() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 03/62] mm: Split slab into its own type Matthew Wilcox (Oracle)
2021-10-05 16:10   ` David Hildenbrand
2021-10-05 18:48     ` Matthew Wilcox
2021-10-12  7:25       ` David Hildenbrand
2021-10-12 14:13         ` Matthew Wilcox
2021-10-12 14:17           ` David Hildenbrand
2021-10-13 18:08             ` Johannes Weiner
2021-10-13 18:31               ` Matthew Wilcox
2021-10-14  7:22                 ` David Hildenbrand
2021-10-14 12:44                   ` Johannes Weiner
2021-10-14 13:08                     ` Matthew Wilcox
2021-10-04 13:45 ` [PATCH 04/62] mm: Add account_slab() and unaccount_slab() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 05/62] mm: Convert virt_to_cache() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 06/62] mm: Convert __ksize() to " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 07/62] mm: Use struct slab in kmem_obj_info() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 08/62] mm: Convert check_heap_object() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 09/62] mm/slub: Convert process_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 10/62] mm/slub: Convert detached_freelist to use " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 11/62] mm/slub: Convert kfree() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 12/62] mm/slub: Convert __slab_free() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 13/62] mm/slub: Convert new_slab() to return " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 14/62] mm/slub: Convert early_kmem_cache_node_alloc() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 15/62] mm/slub: Convert kmem_cache_cpu to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 16/62] mm/slub: Convert show_slab_objects() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 17/62] mm/slub: Convert validate_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 18/62] mm/slub: Convert count_partial() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 19/62] mm/slub: Convert bootstrap() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 20/62] mm/slub: Convert __kmem_cache_do_shrink() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 21/62] mm/slub: Convert free_partial() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 22/62] mm/slub: Convert list_slab_objects() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 23/62] mm/slub: Convert slab_alloc_node() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 24/62] mm/slub: Convert get_freelist() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 25/62] mm/slub: Convert node_match() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 26/62] mm/slub: Convert slab flushing to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 27/62] mm/slub: Convert __unfreeze_partials to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 28/62] mm/slub: Convert deactivate_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 29/62] mm/slub: Convert acquire_slab() to take a struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 30/62] mm/slub: Convert partial slab management to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 31/62] mm/slub: Convert slab freeing " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 32/62] mm/slub: Convert shuffle_freelist " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 33/62] mm/slub: Remove struct page argument to next_freelist_entry() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 34/62] mm/slub: Remove struct page argument from setup_object() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 35/62] mm/slub: Convert freelist_corrupted() to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 36/62] mm/slub: Convert full slab management " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 37/62] mm/slub: Convert free_consistency_checks() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 38/62] mm/slub: Convert alloc_debug_processing() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 39/62] mm/slub: Convert check_object() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 40/62] mm/slub: Convert on_freelist() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 41/62] mm/slub: Convert check_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 42/62] mm/slub: Convert check_valid_pointer() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 43/62] mm/slub: Convert object_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 44/62] mm/slub: Convert print_trailer() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 45/62] mm/slub: Convert slab_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 46/62] mm/slub: Convert print_page_info() to print_slab_info() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 47/62] mm/slub: Convert trace() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 48/62] mm/slub: Convert cmpxchg_double_slab to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 49/62] mm/slub: Convert get_map() and __fill_map() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 50/62] mm/slub: Convert slab_lock() and slab_unlock() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 51/62] mm/slub: Convert setup_page_debug() to setup_slab_debug() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 52/62] mm/slub: Convert pfmemalloc_match() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 53/62] mm/slub: Remove pfmemalloc_match_unsafe() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` Matthew Wilcox (Oracle) [this message]
2021-10-04 13:46 ` [PATCH 55/62] mm: Convert slob to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 56/62] mm: Convert slub " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 57/62] memcg: Convert object cgroups from struct page to " Matthew Wilcox (Oracle)
2021-10-11 17:13   ` Johannes Weiner
2021-10-12  3:16     ` Matthew Wilcox
2021-10-04 13:46 ` [PATCH 58/62] mm/kasan: Convert " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 59/62] zsmalloc: Stop using slab fields in struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 60/62] bootmem: Use page->index instead of page->freelist Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 61/62] iommu: Use put_pages_list Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 62/62] mm: Remove slab from struct page Matthew Wilcox (Oracle)
2021-10-11 20:07 ` [PATCH 00/62] Separate struct " Johannes Weiner
2021-10-12  3:30   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211004134650.4031813-55-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox