linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 15/62] mm/slub: Convert kmem_cache_cpu to struct slab
Date: Mon,  4 Oct 2021 14:46:03 +0100	[thread overview]
Message-ID: <20211004134650.4031813-16-willy@infradead.org> (raw)
In-Reply-To: <20211004134650.4031813-1-willy@infradead.org>

To avoid converting from page to slab, we have to convert all these
functions at once.  Adds a little type-safety.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/slub_def.h |   4 +-
 mm/slub.c                | 208 +++++++++++++++++++--------------------
 2 files changed, 106 insertions(+), 106 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 85499f0586b0..3cc64e9f988c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -48,9 +48,9 @@ enum stat_item {
 struct kmem_cache_cpu {
 	void **freelist;	/* Pointer to next available object */
 	unsigned long tid;	/* Globally unique transaction id */
-	struct page *page;	/* The slab from which we are allocating */
+	struct slab *slab;	/* The slab from which we are allocating */
 #ifdef CONFIG_SLUB_CPU_PARTIAL
-	struct page *partial;	/* Partially allocated frozen slabs */
+	struct slab *partial;	/* Partially allocated frozen slabs */
 #endif
 	local_lock_t lock;	/* Protects the fields above */
 #ifdef CONFIG_SLUB_STATS
diff --git a/mm/slub.c b/mm/slub.c
index 41c4ccd67d95..d849b644d0ed 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2084,9 +2084,9 @@ static inline void *acquire_slab(struct kmem_cache *s,
 }
 
 #ifdef CONFIG_SLUB_CPU_PARTIAL
-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
 #else
-static inline void put_cpu_partial(struct kmem_cache *s, struct page *page,
+static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
 				   int drain) { }
 #endif
 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
@@ -2095,9 +2095,9 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
  * Try to allocate a partial slab from a specific node.
  */
 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
-			      struct page **ret_page, gfp_t gfpflags)
+			      struct slab **ret_slab, gfp_t gfpflags)
 {
-	struct page *page, *page2;
+	struct slab *slab, *slab2;
 	void *object = NULL;
 	unsigned int available = 0;
 	unsigned long flags;
@@ -2113,23 +2113,23 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 		return NULL;
 
 	spin_lock_irqsave(&n->list_lock, flags);
-	list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
+	list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
 		void *t;
 
-		if (!pfmemalloc_match(page, gfpflags))
+		if (!pfmemalloc_match(slab_page(slab), gfpflags))
 			continue;
 
-		t = acquire_slab(s, n, page, object == NULL, &objects);
+		t = acquire_slab(s, n, slab_page(slab), object == NULL, &objects);
 		if (!t)
 			break;
 
 		available += objects;
 		if (!object) {
-			*ret_page = page;
+			*ret_slab = slab;
 			stat(s, ALLOC_FROM_PARTIAL);
 			object = t;
 		} else {
-			put_cpu_partial(s, page, 0);
+			put_cpu_partial(s, slab, 0);
 			stat(s, CPU_PARTIAL_NODE);
 		}
 		if (!kmem_cache_has_cpu_partial(s)
@@ -2142,10 +2142,10 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 }
 
 /*
- * Get a page from somewhere. Search in increasing NUMA distances.
+ * Get a slab from somewhere. Search in increasing NUMA distances.
  */
 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
-			     struct page **ret_page)
+			     struct slab **ret_slab)
 {
 #ifdef CONFIG_NUMA
 	struct zonelist *zonelist;
@@ -2187,7 +2187,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 
 			if (n && cpuset_zone_allowed(zone, flags) &&
 					n->nr_partial > s->min_partial) {
-				object = get_partial_node(s, n, ret_page, flags);
+				object = get_partial_node(s, n, ret_slab, flags);
 				if (object) {
 					/*
 					 * Don't check read_mems_allowed_retry()
@@ -2206,10 +2206,10 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
 }
 
 /*
- * Get a partial page, lock it and return it.
+ * Get a partial slab, lock it and return it.
  */
 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
-			 struct page **ret_page)
+			 struct slab **ret_slab)
 {
 	void *object;
 	int searchnode = node;
@@ -2217,11 +2217,11 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 	if (node == NUMA_NO_NODE)
 		searchnode = numa_mem_id();
 
-	object = get_partial_node(s, get_node(s, searchnode), ret_page, flags);
+	object = get_partial_node(s, get_node(s, searchnode), ret_slab, flags);
 	if (object || node != NUMA_NO_NODE)
 		return object;
 
-	return get_any_partial(s, flags, ret_page);
+	return get_any_partial(s, flags, ret_slab);
 }
 
 #ifdef CONFIG_PREEMPTION
@@ -2506,7 +2506,7 @@ static void unfreeze_partials(struct kmem_cache *s)
 	unsigned long flags;
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
-	partial_page = this_cpu_read(s->cpu_slab->partial);
+	partial_page = slab_page(this_cpu_read(s->cpu_slab->partial));
 	this_cpu_write(s->cpu_slab->partial, NULL);
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
@@ -2519,7 +2519,7 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
 {
 	struct page *partial_page;
 
-	partial_page = slub_percpu_partial(c);
+	partial_page = slab_page(slub_percpu_partial(c));
 	c->partial = NULL;
 
 	if (partial_page)
@@ -2527,52 +2527,52 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
 }
 
 /*
- * Put a page that was just frozen (in __slab_free|get_partial_node) into a
- * partial page slot if available.
+ * Put a slab that was just frozen (in __slab_free|get_partial_node) into a
+ * partial slab slot if available.
  *
  * If we did not find a slot then simply move all the partials to the
  * per node partial list.
  */
-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
 {
-	struct page *oldpage;
-	struct page *page_to_unfreeze = NULL;
+	struct slab *oldslab;
+	struct slab *slab_to_unfreeze = NULL;
 	unsigned long flags;
-	int pages = 0;
+	int slabs = 0;
 	int pobjects = 0;
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
 
-	oldpage = this_cpu_read(s->cpu_slab->partial);
+	oldslab = this_cpu_read(s->cpu_slab->partial);
 
-	if (oldpage) {
-		if (drain && oldpage->pobjects > slub_cpu_partial(s)) {
+	if (oldslab) {
+		if (drain && oldslab->pobjects > slub_cpu_partial(s)) {
 			/*
 			 * Partial array is full. Move the existing set to the
 			 * per node partial list. Postpone the actual unfreezing
 			 * outside of the critical section.
 			 */
-			page_to_unfreeze = oldpage;
-			oldpage = NULL;
+			slab_to_unfreeze = oldslab;
+			oldslab = NULL;
 		} else {
-			pobjects = oldpage->pobjects;
-			pages = oldpage->pages;
+			pobjects = oldslab->pobjects;
+			slabs = oldslab->slabs;
 		}
 	}
 
-	pages++;
-	pobjects += page->objects - page->inuse;
+	slabs++;
+	pobjects += slab->objects - slab->inuse;
 
-	page->pages = pages;
-	page->pobjects = pobjects;
-	page->next = oldpage;
+	slab->slabs = slabs;
+	slab->pobjects = pobjects;
+	slab->next = oldslab;
 
-	this_cpu_write(s->cpu_slab->partial, page);
+	this_cpu_write(s->cpu_slab->partial, slab);
 
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
-	if (page_to_unfreeze) {
-		__unfreeze_partials(s, page_to_unfreeze);
+	if (slab_to_unfreeze) {
+		__unfreeze_partials(s, slab_page(slab_to_unfreeze));
 		stat(s, CPU_PARTIAL_DRAIN);
 	}
 }
@@ -2593,10 +2593,10 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
 
-	page = c->page;
+	page = slab_page(c->slab);
 	freelist = c->freelist;
 
-	c->page = NULL;
+	c->slab = NULL;
 	c->freelist = NULL;
 	c->tid = next_tid(c->tid);
 
@@ -2612,9 +2612,9 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
 {
 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 	void *freelist = c->freelist;
-	struct page *page = c->page;
+	struct page *page = slab_page(c->slab);
 
-	c->page = NULL;
+	c->slab = NULL;
 	c->freelist = NULL;
 	c->tid = next_tid(c->tid);
 
@@ -2648,7 +2648,7 @@ static void flush_cpu_slab(struct work_struct *w)
 	s = sfw->s;
 	c = this_cpu_ptr(s->cpu_slab);
 
-	if (c->page)
+	if (c->slab)
 		flush_slab(s, c);
 
 	unfreeze_partials(s);
@@ -2658,7 +2658,7 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s)
 {
 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-	return c->page || slub_percpu_partial(c);
+	return c->slab || slub_percpu_partial(c);
 }
 
 static DEFINE_MUTEX(flush_lock);
@@ -2872,15 +2872,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			  unsigned long addr, struct kmem_cache_cpu *c)
 {
 	void *freelist;
-	struct page *page;
+	struct slab *slab;
 	unsigned long flags;
 
 	stat(s, ALLOC_SLOWPATH);
 
-reread_page:
+reread_slab:
 
-	page = READ_ONCE(c->page);
-	if (!page) {
+	slab = READ_ONCE(c->slab);
+	if (!slab) {
 		/*
 		 * if the node is not online or has no normal memory, just
 		 * ignore the node constraint
@@ -2892,7 +2892,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	}
 redo:
 
-	if (unlikely(!node_match(page, node))) {
+	if (unlikely(!node_match(slab_page(slab), node))) {
 		/*
 		 * same as above but node_match() being false already
 		 * implies node != NUMA_NO_NODE
@@ -2907,27 +2907,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 	}
 
 	/*
-	 * By rights, we should be searching for a slab page that was
-	 * PFMEMALLOC but right now, we are losing the pfmemalloc
+	 * By rights, we should be searching for a slab that was
+	 * PFMEMALLOC but right now, we lose the pfmemalloc
 	 * information when the page leaves the per-cpu allocator
 	 */
-	if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
+	if (unlikely(!pfmemalloc_match_unsafe(slab_page(slab), gfpflags)))
 		goto deactivate_slab;
 
-	/* must check again c->page in case we got preempted and it changed */
+	/* must check again c->slab in case we got preempted and it changed */
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
-	if (unlikely(page != c->page)) {
+	if (unlikely(slab != c->slab)) {
 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
-		goto reread_page;
+		goto reread_slab;
 	}
 	freelist = c->freelist;
 	if (freelist)
 		goto load_freelist;
 
-	freelist = get_freelist(s, page);
+	freelist = get_freelist(s, slab_page(slab));
 
 	if (!freelist) {
-		c->page = NULL;
+		c->slab = NULL;
 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 		stat(s, DEACTIVATE_BYPASS);
 		goto new_slab;
@@ -2941,10 +2941,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 
 	/*
 	 * freelist is pointing to the list of objects to be used.
-	 * page is pointing to the page from which the objects are obtained.
-	 * That page must be frozen for per cpu allocations to work.
+	 * slab is pointing to the slab from which the objects are obtained.
+	 * That slab must be frozen for per cpu allocations to work.
 	 */
-	VM_BUG_ON(!c->page->frozen);
+	VM_BUG_ON(!c->slab->frozen);
 	c->freelist = get_freepointer(s, freelist);
 	c->tid = next_tid(c->tid);
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -2953,23 +2953,23 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 deactivate_slab:
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
-	if (page != c->page) {
+	if (slab != c->slab) {
 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
-		goto reread_page;
+		goto reread_slab;
 	}
 	freelist = c->freelist;
-	c->page = NULL;
+	c->slab = NULL;
 	c->freelist = NULL;
 	local_unlock_irqrestore(&s->cpu_slab->lock, flags);
-	deactivate_slab(s, page, freelist);
+	deactivate_slab(s, slab_page(slab), freelist);
 
 new_slab:
 
 	if (slub_percpu_partial(c)) {
 		local_lock_irqsave(&s->cpu_slab->lock, flags);
-		if (unlikely(c->page)) {
+		if (unlikely(c->slab)) {
 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
-			goto reread_page;
+			goto reread_slab;
 		}
 		if (unlikely(!slub_percpu_partial(c))) {
 			local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -2977,8 +2977,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 			goto new_objects;
 		}
 
-		page = c->page = slub_percpu_partial(c);
-		slub_set_percpu_partial(c, page);
+		slab = c->slab = slub_percpu_partial(c);
+		slub_set_percpu_partial(c, slab);
 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 		stat(s, CPU_PARTIAL_ALLOC);
 		goto redo;
@@ -2986,32 +2986,32 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 
 new_objects:
 
-	freelist = get_partial(s, gfpflags, node, &page);
+	freelist = get_partial(s, gfpflags, node, &slab);
 	if (freelist)
-		goto check_new_page;
+		goto check_new_slab;
 
 	slub_put_cpu_ptr(s->cpu_slab);
-	page = slab_page(new_slab(s, gfpflags, node));
+	slab = new_slab(s, gfpflags, node);
 	c = slub_get_cpu_ptr(s->cpu_slab);
 
-	if (unlikely(!page)) {
+	if (unlikely(!slab)) {
 		slab_out_of_memory(s, gfpflags, node);
 		return NULL;
 	}
 
 	/*
-	 * No other reference to the page yet so we can
+	 * No other reference to the slab yet so we can
 	 * muck around with it freely without cmpxchg
 	 */
-	freelist = page->freelist;
-	page->freelist = NULL;
+	freelist = slab->freelist;
+	slab->freelist = NULL;
 
 	stat(s, ALLOC_SLAB);
 
-check_new_page:
+check_new_slab:
 
 	if (kmem_cache_debug(s)) {
-		if (!alloc_debug_processing(s, page, freelist, addr)) {
+		if (!alloc_debug_processing(s, slab_page(slab), freelist, addr)) {
 			/* Slab failed checks. Next slab needed */
 			goto new_slab;
 		} else {
@@ -3023,39 +3023,39 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 		}
 	}
 
-	if (unlikely(!pfmemalloc_match(page, gfpflags)))
+	if (unlikely(!pfmemalloc_match(slab_page(slab), gfpflags)))
 		/*
 		 * For !pfmemalloc_match() case we don't load freelist so that
 		 * we don't make further mismatched allocations easier.
 		 */
 		goto return_single;
 
-retry_load_page:
+retry_load_slab:
 
 	local_lock_irqsave(&s->cpu_slab->lock, flags);
-	if (unlikely(c->page)) {
+	if (unlikely(c->slab)) {
 		void *flush_freelist = c->freelist;
-		struct page *flush_page = c->page;
+		struct slab *flush_slab = c->slab;
 
-		c->page = NULL;
+		c->slab = NULL;
 		c->freelist = NULL;
 		c->tid = next_tid(c->tid);
 
 		local_unlock_irqrestore(&s->cpu_slab->lock, flags);
 
-		deactivate_slab(s, flush_page, flush_freelist);
+		deactivate_slab(s, slab_page(flush_slab), flush_freelist);
 
 		stat(s, CPUSLAB_FLUSH);
 
-		goto retry_load_page;
+		goto retry_load_slab;
 	}
-	c->page = page;
+	c->slab = slab;
 
 	goto load_freelist;
 
 return_single:
 
-	deactivate_slab(s, page, get_freepointer(s, freelist));
+	deactivate_slab(s, slab_page(slab), get_freepointer(s, freelist));
 	return freelist;
 }
 
@@ -3159,7 +3159,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
 	 */
 
 	object = c->freelist;
-	page = c->page;
+	page = slab_page(c->slab);
 	/*
 	 * We cannot use the lockless fastpath on PREEMPT_RT because if a
 	 * slowpath has taken the local_lock_irqsave(), it is not protected
@@ -3351,7 +3351,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
 			 * If we just froze the slab then put it onto the
 			 * per cpu partial list.
 			 */
-			put_cpu_partial(s, slab_page(slab), 1);
+			put_cpu_partial(s, slab, 1);
 			stat(s, CPU_PARTIAL_FREE);
 		}
 
@@ -3427,7 +3427,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
 	/* Same with comment on barrier() in slab_alloc_node() */
 	barrier();
 
-	if (likely(slab_page(slab) == c->page)) {
+	if (likely(slab == c->slab)) {
 #ifndef CONFIG_PREEMPT_RT
 		void **freelist = READ_ONCE(c->freelist);
 
@@ -3453,7 +3453,7 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
 
 		local_lock(&s->cpu_slab->lock);
 		c = this_cpu_ptr(s->cpu_slab);
-		if (unlikely(slab_page(slab) != c->page)) {
+		if (unlikely(slab != c->slab)) {
 			local_unlock(&s->cpu_slab->lock);
 			goto redo;
 		}
@@ -5221,7 +5221,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			int node;
 			struct page *page;
 
-			page = READ_ONCE(c->page);
+			page = slab_page(READ_ONCE(c->slab));
 			if (!page)
 				continue;
 
@@ -5236,7 +5236,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 			total += x;
 			nodes[node] += x;
 
-			page = slub_percpu_partial_read_once(c);
+			page = slab_page(slub_percpu_partial_read_once(c));
 			if (page) {
 				node = page_to_nid(page);
 				if (flags & SO_TOTAL)
@@ -5441,31 +5441,31 @@ SLAB_ATTR_RO(objects_partial);
 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
 {
 	int objects = 0;
-	int pages = 0;
+	int slabs = 0;
 	int cpu;
 	int len = 0;
 
 	for_each_online_cpu(cpu) {
-		struct page *page;
+		struct slab *slab;
 
-		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
+		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
-		if (page) {
-			pages += page->pages;
-			objects += page->pobjects;
+		if (slab) {
+			slabs += slab->slabs;
+			objects += slab->pobjects;
 		}
 	}
 
-	len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
+	len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
 
 #ifdef CONFIG_SMP
 	for_each_online_cpu(cpu) {
-		struct page *page;
+		struct slab *slab;
 
-		page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
-		if (page)
+		slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
+		if (slab)
 			len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
-					     cpu, page->pobjects, page->pages);
+					     cpu, slab->pobjects, slab->slabs);
 	}
 #endif
 	len += sysfs_emit_at(buf, len, "\n");
-- 
2.32.0



  parent reply	other threads:[~2021-10-04 14:05 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-04 13:45 [PATCH 00/62] Separate struct slab from struct page Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 01/62] mm: Convert page_to_section() to pgflags_section() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 02/62] mm: Add pgflags_nid() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 03/62] mm: Split slab into its own type Matthew Wilcox (Oracle)
2021-10-05 16:10   ` David Hildenbrand
2021-10-05 18:48     ` Matthew Wilcox
2021-10-12  7:25       ` David Hildenbrand
2021-10-12 14:13         ` Matthew Wilcox
2021-10-12 14:17           ` David Hildenbrand
2021-10-13 18:08             ` Johannes Weiner
2021-10-13 18:31               ` Matthew Wilcox
2021-10-14  7:22                 ` David Hildenbrand
2021-10-14 12:44                   ` Johannes Weiner
2021-10-14 13:08                     ` Matthew Wilcox
2021-10-04 13:45 ` [PATCH 04/62] mm: Add account_slab() and unaccount_slab() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 05/62] mm: Convert virt_to_cache() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 06/62] mm: Convert __ksize() to " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 07/62] mm: Use struct slab in kmem_obj_info() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 08/62] mm: Convert check_heap_object() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 09/62] mm/slub: Convert process_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 10/62] mm/slub: Convert detached_freelist to use " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 11/62] mm/slub: Convert kfree() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 12/62] mm/slub: Convert __slab_free() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 13/62] mm/slub: Convert new_slab() to return " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 14/62] mm/slub: Convert early_kmem_cache_node_alloc() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` Matthew Wilcox (Oracle) [this message]
2021-10-04 13:46 ` [PATCH 16/62] mm/slub: Convert show_slab_objects() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 17/62] mm/slub: Convert validate_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 18/62] mm/slub: Convert count_partial() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 19/62] mm/slub: Convert bootstrap() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 20/62] mm/slub: Convert __kmem_cache_do_shrink() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 21/62] mm/slub: Convert free_partial() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 22/62] mm/slub: Convert list_slab_objects() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 23/62] mm/slub: Convert slab_alloc_node() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 24/62] mm/slub: Convert get_freelist() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 25/62] mm/slub: Convert node_match() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 26/62] mm/slub: Convert slab flushing to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 27/62] mm/slub: Convert __unfreeze_partials to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 28/62] mm/slub: Convert deactivate_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 29/62] mm/slub: Convert acquire_slab() to take a struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 30/62] mm/slub: Convert partial slab management to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 31/62] mm/slub: Convert slab freeing " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 32/62] mm/slub: Convert shuffle_freelist " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 33/62] mm/slub: Remove struct page argument to next_freelist_entry() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 34/62] mm/slub: Remove struct page argument from setup_object() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 35/62] mm/slub: Convert freelist_corrupted() to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 36/62] mm/slub: Convert full slab management " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 37/62] mm/slub: Convert free_consistency_checks() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 38/62] mm/slub: Convert alloc_debug_processing() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 39/62] mm/slub: Convert check_object() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 40/62] mm/slub: Convert on_freelist() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 41/62] mm/slub: Convert check_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 42/62] mm/slub: Convert check_valid_pointer() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 43/62] mm/slub: Convert object_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 44/62] mm/slub: Convert print_trailer() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 45/62] mm/slub: Convert slab_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 46/62] mm/slub: Convert print_page_info() to print_slab_info() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 47/62] mm/slub: Convert trace() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 48/62] mm/slub: Convert cmpxchg_double_slab to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 49/62] mm/slub: Convert get_map() and __fill_map() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 50/62] mm/slub: Convert slab_lock() and slab_unlock() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 51/62] mm/slub: Convert setup_page_debug() to setup_slab_debug() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 52/62] mm/slub: Convert pfmemalloc_match() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 53/62] mm/slub: Remove pfmemalloc_match_unsafe() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 54/62] mm: Convert slab to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 55/62] mm: Convert slob " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 56/62] mm: Convert slub " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 57/62] memcg: Convert object cgroups from struct page to " Matthew Wilcox (Oracle)
2021-10-11 17:13   ` Johannes Weiner
2021-10-12  3:16     ` Matthew Wilcox
2021-10-04 13:46 ` [PATCH 58/62] mm/kasan: Convert " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 59/62] zsmalloc: Stop using slab fields in struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 60/62] bootmem: Use page->index instead of page->freelist Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 61/62] iommu: Use put_pages_list Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 62/62] mm: Remove slab from struct page Matthew Wilcox (Oracle)
2021-10-11 20:07 ` [PATCH 00/62] Separate struct " Johannes Weiner
2021-10-12  3:30   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211004134650.4031813-16-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox