linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/6] page->index removals in mm
@ 2024-07-23 15:34 Matthew Wilcox (Oracle)
  2024-07-23 15:34 ` [PATCH 1/6] bootmem: Stop using page->index Matthew Wilcox (Oracle)
                   ` (5 more replies)
  0 siblings, 6 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

As part of shrinking struct page, we need to stop using page->index.
This patchset gets rid of most of the remaining references to page->index
in mm, as well as increasing the number of functions which take a
const folio/page pointer.  It shrinks the text segment of mm by a few
hundred bytes in my test config, probably mostly from removing calls to
compound_head() in page_to_pgoff().

Matthew Wilcox (Oracle) (6):
  bootmem: Stop using page->index
  mm: Constify page_address_in_vma()
  mm: Convert page_to_pgoff() to page_pgoff()
  mm: Mass constification of folio/page pointers
  mm: Remove references to page->index in huge_memory.c
  mm: Use page->private instead of page->index in percpu

 arch/x86/mm/init_64.c        |  9 ++++-----
 include/linux/bootmem_info.h | 25 +++++++++++++++++--------
 include/linux/ksm.h          |  7 ++++---
 include/linux/pagemap.h      | 18 ------------------
 include/linux/rmap.h         | 12 ++++++------
 mm/bootmem_info.c            | 11 ++++++-----
 mm/huge_memory.c             | 18 +++++++++---------
 mm/internal.h                | 13 ++++++++++---
 mm/ksm.c                     |  5 +++--
 mm/memory-failure.c          | 28 +++++++++++++++-------------
 mm/page_vma_mapped.c         |  5 +++--
 mm/percpu.c                  |  4 ++--
 mm/rmap.c                    | 18 ++++++++++--------
 mm/sparse.c                  |  8 ++++----
 mm/util.c                    |  2 +-
 15 files changed, 94 insertions(+), 89 deletions(-)

-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 1/6] bootmem: Stop using page->index
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
@ 2024-07-23 15:34 ` Matthew Wilcox (Oracle)
  2024-07-23 15:34 ` [PATCH 2/6] mm: Constify page_address_in_vma() Matthew Wilcox (Oracle)
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Encode the type into the bottom four bits of page->private and the
info into the remaining bits.  Also turn the bootmem type into a
named enum.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/x86/mm/init_64.c        |  9 ++++-----
 include/linux/bootmem_info.h | 25 +++++++++++++++++--------
 mm/bootmem_info.c            | 11 ++++++-----
 mm/sparse.c                  |  8 ++++----
 4 files changed, 31 insertions(+), 22 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d8dbeac8b206..d77f22850aa2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -983,13 +983,12 @@ int arch_add_memory(int nid, u64 start, u64 size,
 
 static void __meminit free_pagetable(struct page *page, int order)
 {
-	unsigned long magic;
-	unsigned int nr_pages = 1 << order;
-
 	/* bootmem page has reserved flag */
 	if (PageReserved(page)) {
-		magic = page->index;
-		if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
+		enum bootmem_type type = bootmem_type(page);
+		unsigned long nr_pages = 1 << order;
+
+		if (type == SECTION_INFO || type == MIX_SECTION_INFO) {
 			while (nr_pages--)
 				put_page_bootmem(page++);
 		} else
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index cffa38a73618..e2fe5de93dcc 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -6,11 +6,10 @@
 #include <linux/kmemleak.h>
 
 /*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * Types for free bootmem stored in the low bits of page->private.
  */
-enum {
-	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+enum bootmem_type {
+	MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1,
 	SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
 	MIX_SECTION_INFO,
 	NODE_INFO,
@@ -21,9 +20,19 @@ enum {
 void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
 
 void get_page_bootmem(unsigned long info, struct page *page,
-		      unsigned long type);
+		enum bootmem_type type);
 void put_page_bootmem(struct page *page);
 
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+	return (unsigned long)page->private & 0xf;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+	return (unsigned long)page->private >> 4;
+}
+
 /*
  * Any memory allocated via the memblock allocator and not via the
  * buddy will be marked reserved already in the memmap. For those
@@ -31,7 +40,7 @@ void put_page_bootmem(struct page *page);
  */
 static inline void free_bootmem_page(struct page *page)
 {
-	unsigned long magic = page->index;
+	enum bootmem_type type = bootmem_type(page);
 
 	/*
 	 * The reserve_bootmem_region sets the reserved flag on bootmem
@@ -39,7 +48,7 @@ static inline void free_bootmem_page(struct page *page)
 	 */
 	VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
 
-	if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+	if (type == SECTION_INFO || type == MIX_SECTION_INFO)
 		put_page_bootmem(page);
 	else
 		VM_BUG_ON_PAGE(1, page);
@@ -54,7 +63,7 @@ static inline void put_page_bootmem(struct page *page)
 }
 
 static inline void get_page_bootmem(unsigned long info, struct page *page,
-				    unsigned long type)
+				    enum bootmem_type type)
 {
 }
 
diff --git a/mm/bootmem_info.c b/mm/bootmem_info.c
index fa7cb0c87c03..95f288169a38 100644
--- a/mm/bootmem_info.c
+++ b/mm/bootmem_info.c
@@ -14,23 +14,24 @@
 #include <linux/memory_hotplug.h>
 #include <linux/kmemleak.h>
 
-void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
+void get_page_bootmem(unsigned long info, struct page *page,
+		enum bootmem_type type)
 {
-	page->index = type;
+	BUG_ON(type > 0xf);
+	BUG_ON(info > (ULONG_MAX >> 4));
 	SetPagePrivate(page);
-	set_page_private(page, info);
+	set_page_private(page, info << 4 | type);
 	page_ref_inc(page);
 }
 
 void put_page_bootmem(struct page *page)
 {
-	unsigned long type = page->index;
+	enum bootmem_type type = bootmem_type(page);
 
 	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
 	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 
 	if (page_ref_dec_return(page) == 1) {
-		page->index = 0;
 		ClearPagePrivate(page);
 		set_page_private(page, 0);
 		INIT_LIST_HEAD(&page->lru);
diff --git a/mm/sparse.c b/mm/sparse.c
index e4b830091d13..ad89ce5d9d28 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -721,19 +721,19 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
 static void free_map_bootmem(struct page *memmap)
 {
 	unsigned long maps_section_nr, removing_section_nr, i;
-	unsigned long magic, nr_pages;
+	unsigned long type, nr_pages;
 	struct page *page = virt_to_page(memmap);
 
 	nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
 		>> PAGE_SHIFT;
 
 	for (i = 0; i < nr_pages; i++, page++) {
-		magic = page->index;
+		type = bootmem_type(page);
 
-		BUG_ON(magic == NODE_INFO);
+		BUG_ON(type == NODE_INFO);
 
 		maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
-		removing_section_nr = page_private(page);
+		removing_section_nr = bootmem_info(page);
 
 		/*
 		 * When this function is called, the removing section is
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 2/6] mm: Constify page_address_in_vma()
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
  2024-07-23 15:34 ` [PATCH 1/6] bootmem: Stop using page->index Matthew Wilcox (Oracle)
@ 2024-07-23 15:34 ` Matthew Wilcox (Oracle)
  2024-07-24  5:54   ` kernel test robot
  2024-07-24  7:24   ` kernel test robot
  2024-07-23 15:34 ` [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
                   ` (3 subsequent siblings)
  5 siblings, 2 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

If we also mark the struct folio argument to folio_anon_vma(),
we can make page_address_in_vma() take a const struct page pointer.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/rmap.h | 2 +-
 mm/internal.h        | 2 +-
 mm/rmap.c            | 5 +++--
 mm/util.c            | 2 +-
 4 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 0978c64f49d8..d1fca5b76039 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -732,7 +732,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
 /*
  * Used by swapoff to help locate where page is expected in vma.
  */
-unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+unsigned long page_address_in_vma(const struct page *, struct vm_area_struct *);
 
 /*
  * Cleans the PTEs of shared mappings.
diff --git a/mm/internal.h b/mm/internal.h
index b4d86436565b..e511708b2be0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -810,7 +810,7 @@ static inline bool is_data_mapping(vm_flags_t flags)
 }
 
 /* mm/util.c */
-struct anon_vma *folio_anon_vma(struct folio *folio);
+struct anon_vma *folio_anon_vma(const struct folio *folio);
 
 #ifdef CONFIG_MMU
 void unmap_mapping_folio(struct folio *folio);
diff --git a/mm/rmap.c b/mm/rmap.c
index 8616308610b9..886bf67ba382 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -771,9 +771,10 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
  * At what user virtual address is page expected in vma?
  * Caller should check the page is actually part of the vma.
  */
-unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_address_in_vma(const struct page *page,
+		struct vm_area_struct *vma)
 {
-	struct folio *folio = page_folio(page);
+	const struct folio *folio = page_folio(page);
 	pgoff_t pgoff;
 
 	if (folio_test_anon(folio)) {
diff --git a/mm/util.c b/mm/util.c
index bc488f0121a7..8afe3b90d650 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -780,7 +780,7 @@ void *vcalloc_noprof(size_t n, size_t size)
 }
 EXPORT_SYMBOL(vcalloc_noprof);
 
-struct anon_vma *folio_anon_vma(struct folio *folio)
+struct anon_vma *folio_anon_vma(const struct folio *folio)
 {
 	unsigned long mapping = (unsigned long)folio->mapping;
 
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff()
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
  2024-07-23 15:34 ` [PATCH 1/6] bootmem: Stop using page->index Matthew Wilcox (Oracle)
  2024-07-23 15:34 ` [PATCH 2/6] mm: Constify page_address_in_vma() Matthew Wilcox (Oracle)
@ 2024-07-23 15:34 ` Matthew Wilcox (Oracle)
  2024-07-24  7:24   ` kernel test robot
  2024-07-23 15:34 ` [PATCH 4/6] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Change the function signature to pass in the folio as all three
callers have it.  This removes a reference to page->index, which we're
trying to get rid of.  Also move page_pgoff() to mm/internal.h as
code outside mm has no business calling it.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/pagemap.h | 18 ------------------
 mm/internal.h           |  6 ++++++
 mm/memory-failure.c     |  4 ++--
 mm/rmap.c               |  2 +-
 4 files changed, 9 insertions(+), 21 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 483a191bb4df..1f295ef7d10d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -913,24 +913,6 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
 	return read_cache_folio(mapping, index, NULL, file);
 }
 
-/*
- * Get the offset in PAGE_SIZE (even for hugetlb pages).
- */
-static inline pgoff_t page_to_pgoff(struct page *page)
-{
-	struct page *head;
-
-	if (likely(!PageTransTail(page)))
-		return page->index;
-
-	head = compound_head(page);
-	/*
-	 *  We don't initialize ->index for tail pages: calculate based on
-	 *  head page
-	 */
-	return head->index + page - head;
-}
-
 /*
  * Return byte-offset into filesystem object for page.
  */
diff --git a/mm/internal.h b/mm/internal.h
index e511708b2be0..8dfd9527ac1e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -919,6 +919,12 @@ void mlock_drain_remote(int cpu);
 
 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 
+static inline pgoff_t page_pgoff(const struct folio *folio,
+		const struct page *page)
+{
+	return folio->index + folio_page_idx(folio, page);
+}
+
 /**
  * vma_address - Find the virtual address a page range is mapped at
  * @vma: The vma which maps this object.
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 581d3e5c9117..572c742ecf48 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -617,7 +617,7 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
 	if (av == NULL)	/* Not actually mapped anymore */
 		return;
 
-	pgoff = page_to_pgoff(page);
+	pgoff = page_pgoff(folio, page);
 	rcu_read_lock();
 	for_each_process(tsk) {
 		struct vm_area_struct *vma;
@@ -653,7 +653,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
 
 	i_mmap_lock_read(mapping);
 	rcu_read_lock();
-	pgoff = page_to_pgoff(page);
+	pgoff = page_pgoff(folio, page);
 	for_each_process(tsk) {
 		struct task_struct *t = task_early_kill(tsk, force_early);
 		unsigned long addr;
diff --git a/mm/rmap.c b/mm/rmap.c
index 886bf67ba382..ba1920291ac6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1266,7 +1266,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page,
 	 */
 	VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root,
 			folio);
-	VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
+	VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address),
 		       page);
 }
 
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 4/6] mm: Mass constification of folio/page pointers
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
                   ` (2 preceding siblings ...)
  2024-07-23 15:34 ` [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
@ 2024-07-23 15:34 ` Matthew Wilcox (Oracle)
  2024-07-23 15:35 ` [PATCH 5/6] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
  2024-07-23 15:35 ` [PATCH 6/6] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)
  5 siblings, 0 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:34 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

Now that page_pgoff() takes const pointers, we can constify the
pointers to a lot of functions.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/ksm.h  |  7 ++++---
 include/linux/rmap.h | 10 +++++-----
 mm/internal.h        |  5 +++--
 mm/ksm.c             |  5 +++--
 mm/memory-failure.c  | 24 +++++++++++++-----------
 mm/page_vma_mapped.c |  5 +++--
 mm/rmap.c            | 11 ++++++-----
 7 files changed, 37 insertions(+), 30 deletions(-)

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 11690dacd986..c4a8891f6e7d 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -92,7 +92,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
 
 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
-void collect_procs_ksm(struct folio *folio, struct page *page,
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
 		struct list_head *to_kill, int force_early);
 long ksm_process_profit(struct mm_struct *);
 
@@ -125,8 +125,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
 {
 }
 
-static inline void collect_procs_ksm(struct folio *folio, struct page *page,
-				     struct list_head *to_kill, int force_early)
+static inline void collect_procs_ksm(const struct folio *folio,
+		const struct page *page, struct list_head *to_kill,
+		int force_early)
 {
 }
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d1fca5b76039..bef597736e60 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
 	unlink_anon_vmas(next);
 }
 
-struct anon_vma *folio_get_anon_vma(struct folio *folio);
+struct anon_vma *folio_get_anon_vma(const struct folio *folio);
 
 /* RMAP flags, currently only relevant for some anon rmap operations. */
 typedef int __bitwise rmap_t;
@@ -194,8 +194,8 @@ enum rmap_level {
 	RMAP_LEVEL_PMD,
 };
 
-static inline void __folio_rmap_sanity_checks(struct folio *folio,
-		struct page *page, int nr_pages, enum rmap_level level)
+static inline void __folio_rmap_sanity_checks(const struct folio *folio,
+		const struct page *page, int nr_pages, enum rmap_level level)
 {
 	/* hugetlb folios are handled separately. */
 	VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
@@ -769,14 +769,14 @@ struct rmap_walk_control {
 	bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
 					unsigned long addr, void *arg);
 	int (*done)(struct folio *folio);
-	struct anon_vma *(*anon_lock)(struct folio *folio,
+	struct anon_vma *(*anon_lock)(const struct folio *folio,
 				      struct rmap_walk_control *rwc);
 	bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 };
 
 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
-struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
 					  struct rmap_walk_control *rwc);
 
 #else	/* !CONFIG_MMU */
diff --git a/mm/internal.h b/mm/internal.h
index 8dfd9527ac1e..ec01e63572ae 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1090,10 +1090,11 @@ void ClearPageHWPoisonTakenOff(struct page *page);
 bool take_page_off_buddy(struct page *page);
 bool put_page_back_buddy(struct page *page);
 struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
-void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
 		     struct vm_area_struct *vma, struct list_head *to_kill,
 		     unsigned long ksm_addr);
-unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+unsigned long page_mapped_in_vma(const struct page *page,
+		struct vm_area_struct *vma);
 
 extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
         unsigned long, unsigned long,
diff --git a/mm/ksm.c b/mm/ksm.c
index df6bae3a5a2c..8d45cfe7671f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1080,7 +1080,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
 	return err;
 }
 
-static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
+static inline
+struct ksm_stable_node *folio_stable_node(const struct folio *folio)
 {
 	return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
 }
@@ -3085,7 +3086,7 @@ void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
 /*
  * Collect processes when the error hit an ksm page.
  */
-void collect_procs_ksm(struct folio *folio, struct page *page,
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
 		struct list_head *to_kill, int force_early)
 {
 	struct ksm_stable_node *stable_node;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 572c742ecf48..729e9c49cc57 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -445,7 +445,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
  * Schedule a process for later kill.
  * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
  */
-static void __add_to_kill(struct task_struct *tsk, struct page *p,
+static void __add_to_kill(struct task_struct *tsk, const struct page *p,
 			  struct vm_area_struct *vma, struct list_head *to_kill,
 			  unsigned long addr)
 {
@@ -461,7 +461,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
 	if (is_zone_device_page(p))
 		tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
 	else
-		tk->size_shift = page_shift(compound_head(p));
+		tk->size_shift = folio_shift(page_folio(p));
 
 	/*
 	 * Send SIGKILL if "tk->addr == -EFAULT". Also, as
@@ -486,7 +486,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
 	list_add_tail(&tk->nd, to_kill);
 }
 
-static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
+static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
 		struct vm_area_struct *vma, struct list_head *to_kill,
 		unsigned long addr)
 {
@@ -509,7 +509,7 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
 	return false;
 }
 
-void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
+void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
 		     struct vm_area_struct *vma, struct list_head *to_kill,
 		     unsigned long addr)
 {
@@ -606,8 +606,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
 /*
  * Collect processes when the error hit an anonymous page.
  */
-static void collect_procs_anon(struct folio *folio, struct page *page,
-		struct list_head *to_kill, int force_early)
+static void collect_procs_anon(const struct folio *folio,
+		const struct page *page, struct list_head *to_kill,
+		int force_early)
 {
 	struct task_struct *tsk;
 	struct anon_vma *av;
@@ -643,8 +644,9 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
 /*
  * Collect processes when the error hit a file mapped page.
  */
-static void collect_procs_file(struct folio *folio, struct page *page,
-		struct list_head *to_kill, int force_early)
+static void collect_procs_file(const struct folio *folio,
+		const struct page *page, struct list_head *to_kill,
+		int force_early)
 {
 	struct vm_area_struct *vma;
 	struct task_struct *tsk;
@@ -680,7 +682,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
 }
 
 #ifdef CONFIG_FS_DAX
-static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
+static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
 			      struct vm_area_struct *vma,
 			      struct list_head *to_kill, pgoff_t pgoff)
 {
@@ -691,7 +693,7 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
 /*
  * Collect processes when the error hit a fsdax page.
  */
-static void collect_procs_fsdax(struct page *page,
+static void collect_procs_fsdax(const struct page *page,
 		struct address_space *mapping, pgoff_t pgoff,
 		struct list_head *to_kill, bool pre_remove)
 {
@@ -725,7 +727,7 @@ static void collect_procs_fsdax(struct page *page,
 /*
  * Collect the processes who have the corrupted page mapped to kill.
  */
-static void collect_procs(struct folio *folio, struct page *page,
+static void collect_procs(const struct folio *folio, const struct page *page,
 		struct list_head *tokill, int force_early)
 {
 	if (!folio->mapping)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ae5cc42aa208..9b6632aab5f7 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -325,9 +325,10 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  * outside the VMA or not present, returns -EFAULT.
  * Only valid for normal file or anonymous VMAs.
  */
-unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_mapped_in_vma(const struct page *page,
+		struct vm_area_struct *vma)
 {
-	struct folio *folio = page_folio(page);
+	const struct folio *folio = page_folio(page);
 	pgoff_t pgoff = folio->index + folio_page_idx(folio, page);
 	struct page_vma_mapped_walk pvmw = {
 		.pfn = page_to_pfn(page),
diff --git a/mm/rmap.c b/mm/rmap.c
index ba1920291ac6..9bcddd8ec228 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -496,7 +496,7 @@ void __init anon_vma_init(void)
  * concurrently without folio lock protection). See folio_lock_anon_vma_read()
  * which has already covered that, and comment above remap_pages().
  */
-struct anon_vma *folio_get_anon_vma(struct folio *folio)
+struct anon_vma *folio_get_anon_vma(const struct folio *folio)
 {
 	struct anon_vma *anon_vma = NULL;
 	unsigned long anon_mapping;
@@ -540,7 +540,7 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio)
  * reference like with folio_get_anon_vma() and then block on the mutex
  * on !rwc->try_lock case.
  */
-struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
 					  struct rmap_walk_control *rwc)
 {
 	struct anon_vma *anon_vma = NULL;
@@ -1250,8 +1250,9 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
  * @vma:	the vm area in which the mapping is added
  * @address:	the user virtual address mapped
  */
-static void __page_check_anon_rmap(struct folio *folio, struct page *page,
-	struct vm_area_struct *vma, unsigned long address)
+static void __page_check_anon_rmap(const struct folio *folio,
+		const struct page *page, struct vm_area_struct *vma,
+		unsigned long address)
 {
 	/*
 	 * The page's anon-rmap details (mapping and index) are guaranteed to
@@ -2535,7 +2536,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
 		anon_vma_free(root);
 }
 
-static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
+static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
 					    struct rmap_walk_control *rwc)
 {
 	struct anon_vma *anon_vma;
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 5/6] mm: Remove references to page->index in huge_memory.c
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
                   ` (3 preceding siblings ...)
  2024-07-23 15:34 ` [PATCH 4/6] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
@ 2024-07-23 15:35 ` Matthew Wilcox (Oracle)
  2024-07-23 15:35 ` [PATCH 6/6] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)
  5 siblings, 0 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:35 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

We already have folios in all these places; it's just a matter of
using them instead of the pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/huge_memory.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f9696c94e211..4ffcae1c82e1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2860,8 +2860,8 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
 	/* ->mapping in first and second tail page is replaced by other uses */
 	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
 			page_tail);
-	page_tail->mapping = head->mapping;
-	page_tail->index = head->index + tail;
+	new_folio->mapping = folio->mapping;
+	new_folio->index = folio->index + tail;
 
 	/*
 	 * page->private should not be set in tail pages. Fix up and warn once
@@ -2937,11 +2937,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 	ClearPageHasHWPoisoned(head);
 
 	for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
+		struct folio *tail;
 		__split_huge_page_tail(folio, i, lruvec, list, new_order);
+		tail = page_folio(head + i);
 		/* Some pages can be beyond EOF: drop them from page cache */
-		if (head[i].index >= end) {
-			struct folio *tail = page_folio(head + i);
-
+		if (tail->index >= end) {
 			if (shmem_mapping(folio->mapping))
 				nr_dropped++;
 			else if (folio_test_clear_dirty(tail))
@@ -2949,12 +2949,12 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 					inode_to_wb(folio->mapping->host));
 			__filemap_remove_folio(tail, NULL);
 			folio_put(tail);
-		} else if (!PageAnon(page)) {
-			__xa_store(&folio->mapping->i_pages, head[i].index,
-					head + i, 0);
+		} else if (!folio_test_anon(folio)) {
+			__xa_store(&folio->mapping->i_pages, tail->index,
+					tail, 0);
 		} else if (swap_cache) {
 			__xa_store(&swap_cache->i_pages, offset + i,
-					head + i, 0);
+					tail, 0);
 		}
 	}
 
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH 6/6] mm: Use page->private instead of page->index in percpu
  2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
                   ` (4 preceding siblings ...)
  2024-07-23 15:35 ` [PATCH 5/6] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
@ 2024-07-23 15:35 ` Matthew Wilcox (Oracle)
  5 siblings, 0 replies; 10+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-07-23 15:35 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm

The percpu allocator only uses one field in struct page, just change
it from page->index to page->private.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/percpu.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/percpu.c b/mm/percpu.c
index 20d91af8c033..763fe7641602 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -253,13 +253,13 @@ static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 /* set the pointer to a chunk in a page struct */
 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 {
-	page->index = (unsigned long)pcpu;
+	page->private = (unsigned long)pcpu;
 }
 
 /* obtain pointer to a chunk from a page struct */
 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 {
-	return (struct pcpu_chunk *)page->index;
+	return (struct pcpu_chunk *)page->private;
 }
 
 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
-- 
2.43.0



^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/6] mm: Constify page_address_in_vma()
  2024-07-23 15:34 ` [PATCH 2/6] mm: Constify page_address_in_vma() Matthew Wilcox (Oracle)
@ 2024-07-24  5:54   ` kernel test robot
  2024-07-24  7:24   ` kernel test robot
  1 sibling, 0 replies; 10+ messages in thread
From: kernel test robot @ 2024-07-24  5:54 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton
  Cc: llvm, oe-kbuild-all, Linux Memory Management List,
	Matthew Wilcox (Oracle)

Hi Matthew,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on linus/master next-20240723]
[cannot apply to tip/x86/mm dennis-percpu/for-next v6.10]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/bootmem-Stop-using-page-index/20240723-233932
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240723153503.1669586-3-willy%40infradead.org
patch subject: [PATCH 2/6] mm: Constify page_address_in_vma()
config: x86_64-allnoconfig (https://download.01.org/0day-ci/archive/20240724/202407241351.QvA9uyqf-lkp@intel.com/config)
compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240724/202407241351.QvA9uyqf-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407241351.QvA9uyqf-lkp@intel.com/

All errors (new ones prefixed by >>):

>> mm/rmap.c:796:40: error: passing 'const struct folio *' to parameter of type 'struct folio *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
     796 |         pgoff = folio->index + folio_page_idx(folio, page);
         |                                               ^~~~~
   include/linux/mm.h:216:62: note: expanded from macro 'folio_page_idx'
     216 | #define folio_page_idx(folio, p)        (page_to_pfn(p) - folio_pfn(folio))
         |                                                                     ^~~~~
   include/linux/mm.h:1877:53: note: passing argument to parameter 'folio' here
    1877 | static inline unsigned long folio_pfn(struct folio *folio)
         |                                                     ^
   1 error generated.


vim +796 mm/rmap.c

72b252aed506b8 Mel Gorman              2015-09-04  769  
^1da177e4c3f41 Linus Torvalds          2005-04-16  770  /*
bf89c8c8673223 Huang Shijie            2009-10-01  771   * At what user virtual address is page expected in vma?
ab941e0fff3947 Naoya Horiguchi         2010-05-11  772   * Caller should check the page is actually part of the vma.
^1da177e4c3f41 Linus Torvalds          2005-04-16  773   */
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  774) unsigned long page_address_in_vma(const struct page *page,
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  775) 		struct vm_area_struct *vma)
^1da177e4c3f41 Linus Torvalds          2005-04-16  776  {
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  777) 	const struct folio *folio = page_folio(page);
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  778) 	pgoff_t pgoff;
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  779) 
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  780) 	if (folio_test_anon(folio)) {
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  781) 		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
4829b906cc063c Hugh Dickins            2010-10-02  782  		/*
4829b906cc063c Hugh Dickins            2010-10-02  783  		 * Note: swapoff's unuse_vma() is more efficient with this
4829b906cc063c Hugh Dickins            2010-10-02  784  		 * check, and needs it to match anon_vma when KSM is active.
4829b906cc063c Hugh Dickins            2010-10-02  785  		 */
4829b906cc063c Hugh Dickins            2010-10-02  786  		if (!vma->anon_vma || !page__anon_vma ||
4829b906cc063c Hugh Dickins            2010-10-02  787  		    vma->anon_vma->root != page__anon_vma->root)
21d0d443cdc165 Andrea Arcangeli        2010-08-09  788  			return -EFAULT;
31657170deaf1d Jue Wang                2021-06-15  789  	} else if (!vma->vm_file) {
^1da177e4c3f41 Linus Torvalds          2005-04-16  790  		return -EFAULT;
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  791) 	} else if (vma->vm_file->f_mapping != folio->mapping) {
^1da177e4c3f41 Linus Torvalds          2005-04-16  792  		return -EFAULT;
31657170deaf1d Jue Wang                2021-06-15  793  	}
494334e43c16d6 Hugh Dickins            2021-06-15  794  
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  795) 	/* The !page__anon_vma above handles KSM folios */
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28 @796) 	pgoff = folio->index + folio_page_idx(folio, page);
e0abfbb6714244 Matthew Wilcox (Oracle  2024-03-28  797) 	return vma_address(vma, pgoff, 1);
^1da177e4c3f41 Linus Torvalds          2005-04-16  798  }
^1da177e4c3f41 Linus Torvalds          2005-04-16  799  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 2/6] mm: Constify page_address_in_vma()
  2024-07-23 15:34 ` [PATCH 2/6] mm: Constify page_address_in_vma() Matthew Wilcox (Oracle)
  2024-07-24  5:54   ` kernel test robot
@ 2024-07-24  7:24   ` kernel test robot
  1 sibling, 0 replies; 10+ messages in thread
From: kernel test robot @ 2024-07-24  7:24 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton
  Cc: oe-kbuild-all, Linux Memory Management List, Matthew Wilcox (Oracle)

Hi Matthew,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on linus/master next-20240724]
[cannot apply to tip/x86/mm dennis-percpu/for-next v6.10]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/bootmem-Stop-using-page-index/20240723-233932
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240723153503.1669586-3-willy%40infradead.org
patch subject: [PATCH 2/6] mm: Constify page_address_in_vma()
config: sh-migor_defconfig (https://download.01.org/0day-ci/archive/20240724/202407241541.GgplaeTD-lkp@intel.com/config)
compiler: sh4-linux-gcc (GCC) 14.1.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240724/202407241541.GgplaeTD-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407241541.GgplaeTD-lkp@intel.com/

All warnings (new ones prefixed by >>):

   In file included from mm/rmap.c:56:
   mm/rmap.c: In function 'page_address_in_vma':
>> mm/rmap.c:796:47: warning: passing argument 1 of 'folio_pfn' discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers]
     796 |         pgoff = folio->index + folio_page_idx(folio, page);
         |                                               ^~~~~
   include/linux/mm.h:216:69: note: in definition of macro 'folio_page_idx'
     216 | #define folio_page_idx(folio, p)        (page_to_pfn(p) - folio_pfn(folio))
         |                                                                     ^~~~~
   include/linux/mm.h:1877:53: note: expected 'struct folio *' but argument is of type 'const struct folio *'
    1877 | static inline unsigned long folio_pfn(struct folio *folio)
         |                                       ~~~~~~~~~~~~~~^~~~~


vim +796 mm/rmap.c

72b252aed506b8 Mel Gorman              2015-09-04  769  
^1da177e4c3f41 Linus Torvalds          2005-04-16  770  /*
bf89c8c8673223 Huang Shijie            2009-10-01  771   * At what user virtual address is page expected in vma?
ab941e0fff3947 Naoya Horiguchi         2010-05-11  772   * Caller should check the page is actually part of the vma.
^1da177e4c3f41 Linus Torvalds          2005-04-16  773   */
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  774) unsigned long page_address_in_vma(const struct page *page,
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  775) 		struct vm_area_struct *vma)
^1da177e4c3f41 Linus Torvalds          2005-04-16  776  {
eb7052fd71d991 Matthew Wilcox (Oracle  2024-07-23  777) 	const struct folio *folio = page_folio(page);
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  778) 	pgoff_t pgoff;
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  779) 
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  780) 	if (folio_test_anon(folio)) {
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  781) 		struct anon_vma *page__anon_vma = folio_anon_vma(folio);
4829b906cc063c Hugh Dickins            2010-10-02  782  		/*
4829b906cc063c Hugh Dickins            2010-10-02  783  		 * Note: swapoff's unuse_vma() is more efficient with this
4829b906cc063c Hugh Dickins            2010-10-02  784  		 * check, and needs it to match anon_vma when KSM is active.
4829b906cc063c Hugh Dickins            2010-10-02  785  		 */
4829b906cc063c Hugh Dickins            2010-10-02  786  		if (!vma->anon_vma || !page__anon_vma ||
4829b906cc063c Hugh Dickins            2010-10-02  787  		    vma->anon_vma->root != page__anon_vma->root)
21d0d443cdc165 Andrea Arcangeli        2010-08-09  788  			return -EFAULT;
31657170deaf1d Jue Wang                2021-06-15  789  	} else if (!vma->vm_file) {
^1da177e4c3f41 Linus Torvalds          2005-04-16  790  		return -EFAULT;
e05b34539d008a Matthew Wilcox (Oracle  2022-01-29  791) 	} else if (vma->vm_file->f_mapping != folio->mapping) {
^1da177e4c3f41 Linus Torvalds          2005-04-16  792  		return -EFAULT;
31657170deaf1d Jue Wang                2021-06-15  793  	}
494334e43c16d6 Hugh Dickins            2021-06-15  794  
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28  795) 	/* The !page__anon_vma above handles KSM folios */
412ad5fbe9285f Matthew Wilcox (Oracle  2024-03-28 @796) 	pgoff = folio->index + folio_page_idx(folio, page);
e0abfbb6714244 Matthew Wilcox (Oracle  2024-03-28  797) 	return vma_address(vma, pgoff, 1);
^1da177e4c3f41 Linus Torvalds          2005-04-16  798  }
^1da177e4c3f41 Linus Torvalds          2005-04-16  799  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff()
  2024-07-23 15:34 ` [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
@ 2024-07-24  7:24   ` kernel test robot
  0 siblings, 0 replies; 10+ messages in thread
From: kernel test robot @ 2024-07-24  7:24 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle), Andrew Morton
  Cc: llvm, oe-kbuild-all, Linux Memory Management List,
	Matthew Wilcox (Oracle)

Hi Matthew,

kernel test robot noticed the following build errors:

[auto build test ERROR on akpm-mm/mm-everything]
[also build test ERROR on linus/master next-20240724]
[cannot apply to tip/x86/mm dennis-percpu/for-next v6.10]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/bootmem-Stop-using-page-index/20240723-233932
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20240723153503.1669586-4-willy%40infradead.org
patch subject: [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff()
config: x86_64-allnoconfig (https://download.01.org/0day-ci/archive/20240724/202407241549.hEuWenpa-lkp@intel.com/config)
compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240724/202407241549.hEuWenpa-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202407241549.hEuWenpa-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from mm/filemap.c:51:
>> mm/internal.h:899:39: error: passing 'const struct folio *' to parameter of type 'struct folio *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
     899 |         return folio->index + folio_page_idx(folio, page);
         |                                              ^~~~~
   include/linux/mm.h:216:62: note: expanded from macro 'folio_page_idx'
     216 | #define folio_page_idx(folio, p)        (page_to_pfn(p) - folio_pfn(folio))
         |                                                                     ^~~~~
   include/linux/mm.h:1877:53: note: passing argument to parameter 'folio' here
    1877 | static inline unsigned long folio_pfn(struct folio *folio)
         |                                                     ^
   1 error generated.
--
   In file included from mm/rmap.c:85:
>> mm/internal.h:899:39: error: passing 'const struct folio *' to parameter of type 'struct folio *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
     899 |         return folio->index + folio_page_idx(folio, page);
         |                                              ^~~~~
   include/linux/mm.h:216:62: note: expanded from macro 'folio_page_idx'
     216 | #define folio_page_idx(folio, p)        (page_to_pfn(p) - folio_pfn(folio))
         |                                                                     ^~~~~
   include/linux/mm.h:1877:53: note: passing argument to parameter 'folio' here
    1877 | static inline unsigned long folio_pfn(struct folio *folio)
         |                                                     ^
   mm/rmap.c:796:40: error: passing 'const struct folio *' to parameter of type 'struct folio *' discards qualifiers [-Werror,-Wincompatible-pointer-types-discards-qualifiers]
     796 |         pgoff = folio->index + folio_page_idx(folio, page);
         |                                               ^~~~~
   include/linux/mm.h:216:62: note: expanded from macro 'folio_page_idx'
     216 | #define folio_page_idx(folio, p)        (page_to_pfn(p) - folio_pfn(folio))
         |                                                                     ^~~~~
   include/linux/mm.h:1877:53: note: passing argument to parameter 'folio' here
    1877 | static inline unsigned long folio_pfn(struct folio *folio)
         |                                                     ^
   2 errors generated.


vim +899 mm/internal.h

   895	
   896	static inline pgoff_t page_pgoff(const struct folio *folio,
   897			const struct page *page)
   898	{
 > 899		return folio->index + folio_page_idx(folio, page);
   900	}
   901	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2024-07-24  7:24 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-07-23 15:34 [PATCH 0/6] page->index removals in mm Matthew Wilcox (Oracle)
2024-07-23 15:34 ` [PATCH 1/6] bootmem: Stop using page->index Matthew Wilcox (Oracle)
2024-07-23 15:34 ` [PATCH 2/6] mm: Constify page_address_in_vma() Matthew Wilcox (Oracle)
2024-07-24  5:54   ` kernel test robot
2024-07-24  7:24   ` kernel test robot
2024-07-23 15:34 ` [PATCH 3/6] mm: Convert page_to_pgoff() to page_pgoff() Matthew Wilcox (Oracle)
2024-07-24  7:24   ` kernel test robot
2024-07-23 15:34 ` [PATCH 4/6] mm: Mass constification of folio/page pointers Matthew Wilcox (Oracle)
2024-07-23 15:35 ` [PATCH 5/6] mm: Remove references to page->index in huge_memory.c Matthew Wilcox (Oracle)
2024-07-23 15:35 ` [PATCH 6/6] mm: Use page->private instead of page->index in percpu Matthew Wilcox (Oracle)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox